def test_auto_mapper_decimal_typed(spark_session: SparkSession) -> None:
    # Arrange
    spark_session.createDataFrame(
        [
            (1, "Qureshi", "Imran", 54.45),
            (2, "Vidal", "Michael", 123467.678),
            (3, "Paul", "Kyle", 13.0),
        ],
        ["member_id", "last_name", "first_name", "my_age"],
    ).createOrReplaceTempView("patients")

    source_df: DataFrame = spark_session.table("patients")
    # source_df = source_df.withColumn("my_age", col("my_age").cast("float"))

    df = source_df.select("member_id")
    df.createOrReplaceTempView("members")

    # Act
    mapper = AutoMapper(
        view="members",
        source_view="patients",
        keys=["member_id"],
        drop_key_columns=False,
    ).columns(age=A.decimal(A.column("my_age"), 10, 2))

    assert isinstance(mapper, AutoMapper)
    sql_expressions: Dict[str, Column] = mapper.get_column_specs(source_df=source_df)
    for column_name, sql_expression in sql_expressions.items():
        print(f"{column_name}: {sql_expression}")

    assert_compare_expressions(
        sql_expressions["age"], col("b.my_age").cast("decimal(10,2)").alias("age")
    )

    result_df: DataFrame = mapper.transform(df=df)

    # Assert
    result_df.printSchema()
    result_df.show()

    assert result_df.where("member_id == 1").select("age").collect()[0][0] == Decimal(
        "54.45"
    )
    assert result_df.where("member_id == 2").select("age").collect()[0][0] == Decimal(
        "123467.68"
    )
    assert result_df.where("member_id == 3").select("age").collect()[0][0] == Decimal(
        "13.00"
    )

    assert dict(result_df.dtypes)["age"] == "decimal(10,2)"
Example #2
0
def test_automapper_optional_ifexists(spark_session: SparkSession) -> None:
    # Arrange
    spark_session.createDataFrame(
        [
            (1, "Qureshi", "Imran", "54"),
            (2, "Vidal", "Michael", None),
        ],
        ["member_id", "last_name", "first_name", "my_age"],
    ).createOrReplaceTempView("patients")

    source_df: DataFrame = spark_session.table("patients")

    df = source_df.select("member_id")
    df.createOrReplaceTempView("members")

    # Act
    mapper = AutoMapper(
        view="members",
        source_view="patients",
        keys=["member_id"],
        drop_key_columns=False,
    ).columns(
        optional_age=AutoMapperIfColumnExistsType(
            column=A.column("my_age"),
            if_exists=A.number(A.column("my_age")),
            if_not_exists=A.text("no age"),
        ),
        optional_foo=AutoMapperIfColumnExistsType(
            column=A.column("foo"),
            if_exists=A.text("foo col is there"),
            if_not_exists=A.text("no foo"),
        ),
    )

    assert isinstance(mapper, AutoMapper)
    sql_expressions: Dict[str, Column] = mapper.get_column_specs(source_df=source_df)
    for column_name, sql_expression in sql_expressions.items():
        print(f"{column_name}: {sql_expression}")

    result_df: DataFrame = mapper.transform(df=df)

    # Assert
    result_df.printSchema()
    result_df.show()

    assert result_df.where("member_id == 1").select(
        "optional_age", "optional_foo"
    ).collect()[0][:] == (54, "no foo")
    assert result_df.where("member_id == 2").select(
        "optional_age", "optional_foo"
    ).collect()[0][:] == (None, "no foo")
def test_auto_mapper_boolean(spark_session: SparkSession) -> None:
    # Arrange
    spark_session.createDataFrame(
        [
            (1, "Qureshi", "Imran", "0"),
            (2, "Vidal", "Michael", "1"),
        ],
        ["member_id", "last_name", "first_name", "my_age"],
    ).createOrReplaceTempView("patients")

    source_df: DataFrame = spark_session.table("patients")

    df = source_df.select("member_id")
    df.createOrReplaceTempView("members")

    # Act
    mapper = AutoMapper(view="members",
                        source_view="patients",
                        keys=["member_id"]).columns(
                            age=A.boolean(A.column("my_age")),
                            is_active=A.boolean("False"),
                        )

    assert isinstance(mapper, AutoMapper)
    sql_expressions: Dict[str, Column] = mapper.get_column_specs(
        source_df=source_df)
    for column_name, sql_expression in sql_expressions.items():
        print(f"{column_name}: {sql_expression}")

    assert_compare_expressions(sql_expressions["age"],
                               col("b.my_age").cast("boolean").alias("age"))
    assert_compare_expressions(sql_expressions["is_active"],
                               lit("False").cast("boolean").alias("is_active"))

    result_df: DataFrame = mapper.transform(df=df)

    # Assert
    result_df.printSchema()
    result_df.show()

    assert result_df.where("member_id == 1").select(
        "age",
        "is_active",
    ).collect()[0][:] == (False, False)
    assert result_df.where("member_id == 2").select(
        "age",
        "is_active",
    ).collect()[0][:] == (True, False)

    assert dict(result_df.dtypes)["age"] == "boolean"
    assert dict(result_df.dtypes)["is_active"] == "boolean"
def test_auto_mapper_hash(spark_session: SparkSession) -> None:
    # Arrange
    spark_session.createDataFrame(
        [
            (1, "Qureshi", "54"),
            (2, "Vidal", "67"),
            (3, "Vidal", None),
            (4, None, None),
        ],
        ["member_id", "last_name", "my_age"],
    ).createOrReplaceTempView("patients")

    source_df: DataFrame = spark_session.table("patients")

    source_df = source_df.withColumn("my_age", col("my_age").cast("int"))

    df = source_df.select("member_id")
    df.createOrReplaceTempView("members")

    # Act
    mapper = AutoMapper(
        view="members", source_view="patients", keys=["member_id"]
    ).columns(age=A.hash(A.column("my_age"), A.column("last_name")))

    assert isinstance(mapper, AutoMapper)
    sql_expressions: Dict[str, Column] = mapper.get_column_specs(source_df=source_df)
    for column_name, sql_expression in sql_expressions.items():
        print(f"{column_name}: {sql_expression}")

    assert str(sql_expressions["age"]) == str(
        hash(col("b.my_age"), col("b.last_name")).cast("string").alias("age")
    )

    result_df: DataFrame = mapper.transform(df=df)

    # Assert
    result_df.printSchema()
    result_df.show()

    assert (
        result_df.where("member_id == 1").select("age").collect()[0][0] == "-543157534"
    )
    assert (
        result_df.where("member_id == 2").select("age").collect()[0][0] == "2048196121"
    )
    assert (
        result_df.where("member_id == 3").select("age").collect()[0][0] == "-80001407"
    )
    assert result_df.where("member_id == 4").select("age").collect()[0][0] == "42"

    assert dict(result_df.dtypes)["age"] == "string"
def test_automapper_map_no_default(spark_session: SparkSession) -> None:
    # Arrange
    spark_session.createDataFrame(
        [
            (1, "Qureshi", "Imran", "Y"),
            (2, "Vidal", "Michael", "N"),
            (3, "Vidal", "Michael", "f"),
        ],
        ["member_id", "last_name", "first_name", "has_kids"],
    ).createOrReplaceTempView("patients")

    source_df: DataFrame = spark_session.table("patients")

    df = source_df.select("member_id")
    df.createOrReplaceTempView("members")

    # Act
    mapper = AutoMapper(
        view="members",
        source_view="patients",
        keys=["member_id"],
        keep_null_rows=True).columns(has_kids=A.map(A.column("has_kids"), {
            "Y": "Yes",
            "N": "No"
        }))

    assert isinstance(mapper, AutoMapper)
    sql_expressions: Dict[str, Column] = mapper.get_column_specs(
        source_df=source_df)
    for column_name, sql_expression in sql_expressions.items():
        print(f"{column_name}: {sql_expression}")

    assert_compare_expressions(
        sql_expressions["has_kids"],
        when(col("b.has_kids").eqNullSafe(lit("Y")), lit("Yes")).when(
            col("b.has_kids").eqNullSafe(lit("N")),
            lit("No")).otherwise(lit(None)).alias("___has_kids"),
    )

    result_df: DataFrame = mapper.transform(df=df)

    # Assert
    result_df.printSchema()
    result_df.show()

    assert result_df.where("member_id == 1").select(
        "has_kids").collect()[0][0] == "Yes"
    assert result_df.where("member_id == 2").select(
        "has_kids").collect()[0][0] == "No"
    assert result_df.where("member_id == 3").select(
        "has_kids").collect()[0][0] is None
def test_auto_mapper_date_format(spark_session: SparkSession) -> None:
    # Arrange
    spark_session.createDataFrame(
        [
            (1, "Qureshi", "Imran", "1970-01-01 12:30"),
            (2, "Vidal", "Michael", "1970-02-02 06:30"),
        ],
        ["member_id", "last_name", "first_name", "opening_time"],
    ).createOrReplaceTempView("patients")

    source_df: DataFrame = spark_session.table("patients")

    source_df = source_df.withColumn(
        "opening_time", to_timestamp("opening_time",
                                     format="yyyy-MM-dd hh:mm"))

    assert dict(source_df.dtypes)["opening_time"] == "timestamp"

    df = source_df.select("member_id")
    df.createOrReplaceTempView("members")

    # Act
    mapper = AutoMapper(
        view="members", source_view="patients",
        keys=["member_id"]).columns(openingTime=A.datetime(
            A.column("opening_time")).to_date_format("hh:mm:ss"))

    assert isinstance(mapper, AutoMapper)
    sql_expressions: Dict[str, Column] = mapper.get_column_specs(
        source_df=source_df)
    for column_name, sql_expression in sql_expressions.items():
        print(f"{column_name}: {sql_expression}")

    assert str(sql_expressions["openingTime"]) == str(
        date_format(coalesce(to_timestamp(col("b.opening_time"))),
                    "hh:mm:ss").alias("openingTime"))

    result_df: DataFrame = mapper.transform(df=df)

    # Assert
    result_df.printSchema()
    result_df.show()

    assert (result_df.where("member_id == 1").select("openingTime").collect()
            [0][0] == "12:30:00")
    assert (result_df.where("member_id == 2").select("openingTime").collect()
            [0][0] == "06:30:00")

    # check type
    assert dict(result_df.dtypes)["openingTime"] == "string"
def test_automapper_nested_array_filter_simple_with_array(
    spark_session: SparkSession, ) -> None:
    clean_spark_session(spark_session)
    data_dir: Path = Path(__file__).parent.joinpath("./")

    environ["LOGLEVEL"] = "DEBUG"

    data_json_file: Path = data_dir.joinpath("data.json")

    source_df: DataFrame = spark_session.read.json(str(data_json_file),
                                                   multiLine=True)

    source_df.createOrReplaceTempView("patients")

    source_df.show(truncate=False)

    # Act
    mapper = AutoMapper(
        view="members",
        source_view="patients").columns(age=A.nested_array_filter(
            array_field=A.column("array1"),
            inner_array_field=A.field("array2"),
            match_property="reference",
            match_value=A.text("bar"),
        ))

    assert isinstance(mapper, AutoMapper)
    sql_expressions: Dict[str, Column] = mapper.get_column_specs(
        source_df=source_df)
    for column_name, sql_expression in sql_expressions.items():
        print(f"{column_name}: {sql_expression}")

    assert_compare_expressions(
        sql_expressions["age"],
        filter(
            col("b.array1"),
            lambda y: exists(
                y["array2"], lambda x: x["reference"] == lit("bar").cast(
                    "string")),
        ).alias("age"),
    )
    result_df: DataFrame = mapper.transform(df=source_df)

    result_df.printSchema()
    result_df.show(truncate=False)

    assert result_df.count() == 2
    assert result_df.select("age").collect()[0][0] == []
    assert result_df.select(
        "age").collect()[1][0][0]["array2"][0]["reference"] == "bar"
def test_auto_mapper_sanitize(spark_session: SparkSession) -> None:
    # Arrange
    spark_session.createDataFrame(
        [
            (
                1,
                "MedStar NRN PMR at Good Samaritan Hosp Good Health Center",
                "Imran",
                "1970-01-01",
            ),
            (2, "Vidal", "Michael", "1970-02-02"),
        ],
        ["member_id", "last_name", "first_name", "date_of_birth"],
    ).createOrReplaceTempView("patients")

    source_df: DataFrame = spark_session.table("patients")

    df = source_df.select("member_id")
    df.createOrReplaceTempView("members")

    # Act
    mapper = AutoMapper(
        view="members", source_view="patients", keys=[
            "member_id"
        ]).columns(my_column=A.column("last_name").sanitize(replacement="."))

    assert isinstance(mapper, AutoMapper)
    sql_expressions: Dict[str, Column] = mapper.get_column_specs(
        source_df=source_df)
    for column_name, sql_expression in sql_expressions.items():
        print(f"{column_name}: {sql_expression}")

    not_normal_characters: str = r"[^\w\r\n\t _.,!\"'/$-]"

    assert str(sql_expressions["my_column"]) == str(
        regexp_replace(col("b.last_name"), not_normal_characters,
                       ".").alias("my_column"))

    result_df: DataFrame = mapper.transform(df=df)

    # Assert
    result_df.printSchema()
    result_df.show(truncate=False)

    # noinspection SpellCheckingInspection
    assert (result_df.where("member_id == 1").select("my_column").collect()[0]
            [0] == "MedStar NRN PMR at Good Samaritan Hosp.Good Health Center")
    # noinspection SpellCheckingInspection
    assert (result_df.where("member_id == 2").select("my_column").collect()[0]
            [0] == "Vidal")
def test_automapper_null_if_empty(spark_session: SparkSession) -> None:
    # Arrange
    clean_spark_session(session=spark_session)
    spark_session.createDataFrame(
        [
            (1, "Qureshi", "Imran", "54"),
            (2, "Vidal", "Michael", ""),
            (3, "Vidal3", "Michael", None),
        ],
        ["member_id", "last_name", "first_name", "my_age"],
    ).createOrReplaceTempView("patients")

    source_df: DataFrame = spark_session.table("patients")
    source_df.show()

    df = source_df.select("member_id")
    df.createOrReplaceTempView("members")

    # Act
    mapper = AutoMapper(
        view="members",
        source_view="patients",
        keys=["member_id"],
        drop_key_columns=False,
    ).columns(age=A.column("my_age").to_null_if_empty())

    assert isinstance(mapper, AutoMapper)
    sql_expressions: Dict[str, Column] = mapper.get_column_specs(
        source_df=source_df)
    for column_name, sql_expression in sql_expressions.items():
        print(f"{column_name}: {sql_expression}")

    assert str(sql_expressions["age"]) == str(
        when(col("b.my_age").eqNullSafe(""),
             lit(None)).otherwise(col("b.my_age")).alias("age"))

    result_df: DataFrame = mapper.transform(df=df)

    # Assert
    result_df.printSchema()
    result_df.show()

    assert result_df.where("member_id == 1").select(
        "age").collect()[0][0] == "54"
    assert result_df.where("member_id == 2").select(
        "age").collect()[0][0] is None
    assert result_df.where("member_id == 3").select(
        "age").collect()[0][0] is None

    assert dict(result_df.dtypes)["age"] == "string"
def test_automapper_if_list(spark_session: SparkSession) -> None:
    # Arrange
    spark_session.createDataFrame(
        [
            (1, "Qureshi", "Imran", "54"),
            (2, "Qureshi", "Imran", "59"),
            (3, "Vidal", "Michael", None),
        ],
        ["member_id", "last_name", "first_name", "my_age"],
    ).createOrReplaceTempView("patients")

    source_df: DataFrame = spark_session.table("patients")

    df = source_df.select("member_id")
    df.createOrReplaceTempView("members")

    # Act
    mapper = AutoMapper(view="members",
                        source_view="patients",
                        keys=["member_id"]).columns(age=A.if_(
                            column=A.column("my_age"),
                            check=["54", "59"],
                            value=A.number(A.column("my_age")),
                            else_=A.number(A.text("100")),
                        ))

    assert isinstance(mapper, AutoMapper)
    sql_expressions: Dict[str, Column] = mapper.get_column_specs(
        source_df=source_df)
    for column_name, sql_expression in sql_expressions.items():
        print(f"{column_name}: {sql_expression}")

    assert str(sql_expressions["age"]) == str(
        when(col("b.my_age").isin(["54", "59"]),
             col("b.my_age").cast("long")).otherwise(
                 lit("100").cast(StringType()).cast(LongType())).alias("age"))

    result_df: DataFrame = mapper.transform(df=df)

    # Assert
    result_df.printSchema()
    result_df.show()

    assert result_df.where("member_id == 1").select(
        "age").collect()[0][0] == 54
    assert result_df.where("member_id == 2").select(
        "age").collect()[0][0] == 59
    assert result_df.where("member_id == 3").select(
        "age").collect()[0][0] == 100
    assert dict(result_df.dtypes)["age"] in ("int", "long", "bigint")
def test_auto_mapper_complex_with_extension(
        spark_session: SparkSession) -> None:
    # Arrange
    spark_session.createDataFrame([
        (1, 'Qureshi', 'Imran', 45),
        (2, 'Vidal', 'Michael', 35),
    ], ['member_id', 'last_name', 'first_name', 'my_age'
        ]).createOrReplaceTempView("patients")

    source_df: DataFrame = spark_session.table("patients")

    df = source_df.select("member_id")
    df.createOrReplaceTempView("members")

    # Act
    mapper = AutoMapper(view="members",
                        source_view="patients",
                        keys=["member_id"],
                        drop_key_columns=False).complex(
                            MyClass(
                                name=A.column("last_name"),
                                age=A.number(A.column("my_age")),
                                extension=AutoMapperList([
                                    MyProcessingStatusExtension(
                                        processing_status=A.text("foo"),
                                        request_id=A.text("bar"),
                                        date_processed=A.date("2021-01-01"))
                                ])))

    assert isinstance(mapper, AutoMapper)
    sql_expressions: Dict[str, Column] = mapper.get_column_specs(
        source_df=source_df)
    for column_name, sql_expression in sql_expressions.items():
        print(f"{column_name}: {sql_expression}")

    result_df: DataFrame = mapper.transform(df=df)

    # Assert
    assert str(sql_expressions["name"]) == str(
        col("b.last_name").cast("string").alias("name"))
    assert str(sql_expressions["age"]) == str(
        col("b.my_age").cast("int").alias("age"))

    result_df.printSchema()
    result_df.show(truncate=False)

    assert result_df.where("member_id == 1").select(
        "name").collect()[0][0] == "Qureshi"

    assert dict(result_df.dtypes)["age"] == "int"
def test_auto_mapper_fhir_reference(spark_session: SparkSession) -> None:
    # Arrange
    spark_session.createDataFrame(
        [
            (1, "Qureshi"),
            (2, "Vidal"),
        ],
        ["member_id", "last_name"],
    ).createOrReplaceTempView("patients")

    source_df: DataFrame = spark_session.table("patients")

    df = source_df.select("member_id")
    df.createOrReplaceTempView("members")

    # Act
    mapper = AutoMapper(
        view="members", source_view="patients", keys=["member_id"]
    ).columns(
        patient=Patient(
            id_=FhirId(A.column("last_name")),
            managingOrganization=Reference(
                reference=FhirReference("Organization", A.column("last_name"))
            ),
        )
    )

    assert isinstance(mapper, AutoMapper)
    sql_expressions: Dict[str, Column] = mapper.get_column_specs(source_df=source_df)
    for column_name, sql_expression in sql_expressions.items():
        print(f"{column_name}: {sql_expression}")

    result_df: DataFrame = mapper.transform(df=df)

    # Assert
    result_df.printSchema()
    result_df.show(truncate=False)

    assert (
        result_df.where("member_id == 1")
        .selectExpr("patient.managingOrganization.reference")
        .collect()[0][0]
        == "Organization/Qureshi"
    )
    assert (
        result_df.where("member_id == 2")
        .selectExpr("patient.managingOrganization.reference")
        .collect()[0][0]
        == "Organization/Vidal"
    )
Example #13
0
def test_auto_mapper_struct(spark_session: SparkSession) -> None:
    # Arrange
    spark_session.createDataFrame(
        [
            (1, "Qureshi", "Imran"),
            (2, "Vidal", "Michael"),
        ],
        ["member_id", "last_name", "first_name"],
    ).createOrReplaceTempView("patients")

    source_df: DataFrame = spark_session.table("patients")

    df = source_df.select("member_id")
    df.createOrReplaceTempView("members")

    # Act
    mapper = AutoMapper(
        view="members",
        source_view="patients",
        keys=["member_id"],
        drop_key_columns=False,
    ).columns(dst2=A.struct({
        "use": "usual",
        "family": "imran"
    }))

    assert isinstance(mapper, AutoMapper)
    sql_expressions: Dict[str, Column] = mapper.get_column_specs(
        source_df=source_df)
    for column_name, sql_expression in sql_expressions.items():
        print(f"{column_name}: {sql_expression}")

    result_df: DataFrame = mapper.transform(df=df)

    # Assert
    assert_compare_expressions(
        sql_expressions["dst2"],
        struct(lit("usual").alias("use"),
               lit("imran").alias("family")).alias("dst2"),
    )

    result_df.printSchema()
    result_df.show()

    result_df.where("member_id == 1").select("dst2").show()
    result_df.where("member_id == 1").select("dst2").printSchema()

    result = result_df.where("member_id == 1").select("dst2").collect()[0][0]
    assert result[0] == "usual"
    assert result[1] == "imran"
Example #14
0
def test_auto_mapper_multiple_columns_simpler_syntax(
        spark_session: SparkSession) -> None:
    # Arrange
    spark_session.createDataFrame([
        (1, 'Qureshi', 'Imran'),
        (2, 'Vidal', 'Michael'),
    ], ['member_id', 'last_name', 'first_name'
        ]).createOrReplaceTempView("patients")

    source_df: DataFrame = spark_session.table("patients")

    df = source_df.select("member_id")
    df.createOrReplaceTempView("members")

    # Act
    mapper = AutoMapper(
        view="members",
        source_view="patients",
        keys=["member_id"],
        drop_key_columns=False).columns(dst1="src1").columns(
            dst2=["address1"]).columns(dst3=["address1", "address2"]).columns(
                dst4=[dict(use="usual", family="[last_name]")])

    assert isinstance(mapper, AutoMapper)
    sql_expressions: Dict[str, Column] = mapper.get_column_specs(
        source_df=source_df)
    for column_name, sql_expression in sql_expressions.items():
        print(f"{column_name}: {sql_expression}")

    result_df: DataFrame = mapper.transform(df=df)

    # Assert
    result_df.printSchema()
    result_df.show()

    assert len(result_df.columns) == 5
    assert result_df.where("member_id == 1").select(
        "dst1").collect()[0][0] == "src1"
    assert result_df.where("member_id == 1").select(
        "dst2").collect()[0][0][0] == "address1"

    assert result_df.where("member_id == 1").select(
        "dst3").collect()[0][0][0] == "address1"
    assert result_df.where("member_id == 1").select(
        "dst3").collect()[0][0][1] == "address2"

    assert result_df.where("member_id == 1").select(
        "dst4").collect()[0][0][0][0] == "usual"
    assert result_df.where("member_id == 1").select(
        "dst4").collect()[0][0][0][1] == "Qureshi"
def test_automapper_copy_unmapped_properties(
        spark_session: SparkSession) -> None:
    # Arrange
    clean_spark_session(session=spark_session)
    spark_session.createDataFrame(
        [
            ("Qureshi", "Imran", "Iqbal"),
            ("Vidal", "Michael", "Lweis"),
        ],
        ["last_name", "first_name", "middle_name"],
    ).createOrReplaceTempView("patients")

    source_df: DataFrame = spark_session.table("patients")

    # Act
    mapper = AutoMapper(
        view="members",
        source_view="patients",
        copy_all_unmapped_properties=True,
        copy_all_unmapped_properties_exclude=["first_name"],
    ).columns(last_name="last_name", )

    sql_expressions: Dict[str, Column] = mapper.get_column_specs(
        source_df=source_df)
    for column_name, sql_expression in sql_expressions.items():
        print(f"{column_name}: {sql_expression}")

    mapper.transform(df=source_df)
    result_df: DataFrame = spark_session.table("members")

    # Assert
    result_df.printSchema()
    result_df.show()

    assert len(result_df.columns) == 2, list(result_df.columns)

    assert result_df.columns == ["last_name", "middle_name"]
def test_auto_mapper_date_column_typed(spark_session: SparkSession) -> None:
    # Arrange
    spark_session.createDataFrame(
        [
            (1, "Qureshi", "Imran", "1970-01-01"),
            (2, "Vidal", "Michael", "1970-02-02"),
        ],
        ["member_id", "last_name", "first_name", "date_of_birth"],
    ).createOrReplaceTempView("patients")

    source_df: DataFrame = spark_session.table("patients")

    source_df = source_df.withColumn(
        "date_of_birth", to_date("date_of_birth", format="yyyy-MM-dd")
    )

    assert dict(source_df.dtypes)["date_of_birth"] == "date"

    df = source_df.select("member_id")
    df.createOrReplaceTempView("members")

    # Act
    mapper = AutoMapper(
        view="members", source_view="patients", keys=["member_id"]
    ).columns(birthDate=A.date(A.column("date_of_birth")))

    assert isinstance(mapper, AutoMapper)
    sql_expressions: Dict[str, Column] = mapper.get_column_specs(source_df=source_df)
    for column_name, sql_expression in sql_expressions.items():
        print(f"{column_name}: {sql_expression}")

    assert str(sql_expressions["birthDate"]) == str(
        col("b.date_of_birth").alias("birthDate")
    )

    result_df: DataFrame = mapper.transform(df=df)

    # Assert
    result_df.printSchema()
    result_df.show()

    assert result_df.where("member_id == 1").select("birthDate").collect()[0][
        0
    ] == date(1970, 1, 1)
    assert result_df.where("member_id == 2").select("birthDate").collect()[0][
        0
    ] == date(1970, 2, 2)

    assert dict(result_df.dtypes)["birthDate"] == "date"
def test_auto_mapper_handles_duplicates(spark_session: SparkSession) -> None:
    # Arrange
    clean_spark_session(session=spark_session)
    spark_session.createDataFrame(
        [
            (1, "Qureshi", "Imran"),
            (2, "Qureshi", "Imran"),
            (3, "Qureshi", "Imran2"),
            (4, "Vidal", "Michael"),
        ],
        ["member_id", "last_name", "first_name"],
    ).createOrReplaceTempView("patients")

    source_df: DataFrame = spark_session.table("patients")

    # Act
    mapper = AutoMapper(view="members",
                        source_view="patients",
                        keys=["member_id"
                              ]).columns(dst1="src1",
                                         dst2=A.column("last_name"),
                                         dst3=A.column("first_name"))

    sql_expressions: Dict[str, Column] = mapper.get_column_specs(
        source_df=source_df)
    for column_name, sql_expression in sql_expressions.items():
        print(f"{column_name}: {sql_expression}")

    mapper.transform(df=source_df)
    result_df: DataFrame = spark_session.table("members")

    # Assert
    result_df.printSchema()
    result_df.show()

    assert result_df.count() == 3
def test_auto_mapper_full_no_views(spark_session: SparkSession) -> None:
    # Arrange
    source_df = spark_session.createDataFrame([
        (1, 'Qureshi', 'Imran'),
        (2, 'Vidal', 'Michael'),
    ], ['member_id', 'last_name', 'first_name'])

    # example of a variable
    client_address_variable: str = "address1"

    # Act
    mapper = AutoMapper(keys=["member_id"], drop_key_columns=False).columns(
        dst1="src1",
        dst2=AutoMapperList([client_address_variable]),
        dst3=AutoMapperList([client_address_variable, "address2"]))

    company_name: str = "Microsoft"

    if company_name == "Microsoft":
        mapper = mapper.columns(dst4=AutoMapperList(
            [A.complex(use="usual", family=A.column("last_name"))]))

    sql_expressions: Dict[str, Column] = mapper.get_column_specs(
        source_df=source_df)
    for column_name, sql_expression in sql_expressions.items():
        print(f"{column_name}: {sql_expression}")

    result_df: DataFrame = mapper.transform(df=source_df)

    # Assert
    result_df.printSchema()
    result_df.show()

    assert len(result_df.columns) == 5
    assert result_df.where("member_id == 1").select(
        "dst1").collect()[0][0] == "src1"
    assert result_df.where("member_id == 1").select(
        "dst2").collect()[0][0][0] == "address1"

    assert result_df.where("member_id == 1").select(
        "dst3").collect()[0][0][0] == "address1"
    assert result_df.where("member_id == 1").select(
        "dst3").collect()[0][0][1] == "address2"

    assert result_df.where("member_id == 1").select(
        "dst4").collect()[0][0][0][0] == "usual"
    assert result_df.where("member_id == 1").select(
        "dst4").collect()[0][0][0][1] == "Qureshi"
def test_auto_mapper_date_literal(spark_session: SparkSession) -> None:
    # Arrange
    spark_session.createDataFrame(
        [
            (1, "Qureshi", "Imran"),
            (2, "Vidal", "Michael"),
        ],
        ["member_id", "last_name", "first_name"],
    ).createOrReplaceTempView("patients")

    source_df: DataFrame = spark_session.table("patients")

    df = source_df.select("member_id")
    df.createOrReplaceTempView("members")

    # Act
    mapper = AutoMapper(
        view="members",
        source_view="patients",
        keys=["member_id"],
        drop_key_columns=False,
    ).columns(birthDate=A.date("1970-01-01"))

    assert isinstance(mapper, AutoMapper)
    sql_expressions: Dict[str, Column] = mapper.get_column_specs(source_df=source_df)
    for column_name, sql_expression in sql_expressions.items():
        print(f"{column_name}: {sql_expression}")

    assert str(sql_expressions["birthDate"]) == str(
        coalesce(
            to_date(lit("1970-01-01"), format="y-M-d"),
            to_date(lit("1970-01-01"), format="yyyyMMdd"),
            to_date(lit("1970-01-01"), format="M/d/y"),
        ).alias("birthDate")
    )

    result_df: DataFrame = mapper.transform(df=df)

    # Assert
    result_df.printSchema()
    result_df.show()

    assert result_df.where("member_id == 1").select("birthDate").collect()[0][
        0
    ] == date(1970, 1, 1)
    assert result_df.where("member_id == 2").select("birthDate").collect()[0][
        0
    ] == date(1970, 1, 1)
def test_auto_mapper_array_multiple_items_with_null(
    spark_session: SparkSession, ) -> None:
    # Arrange
    spark_session.createDataFrame(
        [
            (1, "Qureshi", "Imran"),
            (2, "Vidal", "Michael"),
        ],
        ["member_id", "last_name", "first_name"],
    ).createOrReplaceTempView("patients")

    source_df: DataFrame = spark_session.table("patients")

    df: DataFrame = source_df.select("member_id")
    df.createOrReplaceTempView("members")

    # Act
    mapper = AutoMapper(
        view="members",
        source_view="patients",
        keys=["member_id"],
        drop_key_columns=False,
    ).columns(dst2=AutoMapperList(["address1", "address2", None]))

    assert isinstance(mapper, AutoMapper)
    sql_expressions: Dict[str, Column] = mapper.get_column_specs(
        source_df=source_df)
    for column_name, sql_expression in sql_expressions.items():
        print(f"{column_name}: {sql_expression}")

    assert str(sql_expressions["dst2"]) == str(
        filter(array(lit("address1"), lit("address2"), lit(None)),
               lambda x: x.isNotNull()).alias("dst2"))

    result_df: DataFrame = mapper.transform(df=df)

    # Assert
    result_df.printSchema()
    result_df.show()

    assert (result_df.where("member_id == 1").select("dst2").collect()[0][0][0]
            == "address1")
    assert (result_df.where("member_id == 1").select("dst2").collect()[0][0][1]
            == "address2")
    assert (result_df.where("member_id == 2").select("dst2").collect()[0][0][0]
            == "address1")
    assert (result_df.where("member_id == 2").select("dst2").collect()[0][0][1]
            == "address2")
Example #21
0
def test_auto_mapper_coalesce(spark_session: SparkSession) -> None:
    # Arrange
    spark_session.createDataFrame(
        [
            (1, "Qureshi", "Imran", None),
            (2, None, "Michael", "1970-02-02"),
            (3, None, "Michael", None),
        ],
        ["member_id", "last_name", "first_name", "date_of_birth"],
    ).createOrReplaceTempView("patients")

    source_df: DataFrame = spark_session.table("patients")

    df = source_df.select("member_id")
    df.createOrReplaceTempView("members")

    # Act
    mapper = AutoMapper(
        view="members", source_view="patients", keys=[
            "member_id"
        ]).columns(my_column=A.coalesce(A.column(
            "last_name"), A.column("date_of_birth"), A.text("last_resort")))

    assert isinstance(mapper, AutoMapper)
    sql_expressions: Dict[str, Column] = mapper.get_column_specs(
        source_df=source_df)
    for column_name, sql_expression in sql_expressions.items():
        print(f"{column_name}: {sql_expression}")

    assert str(sql_expressions["my_column"]) == str(
        coalesce(
            col("b.last_name"),
            col("b.date_of_birth"),
            lit("last_resort").cast(StringType()),
        ).alias("my_column"))

    result_df: DataFrame = mapper.transform(df=df)

    # Assert
    result_df.printSchema()
    result_df.show()

    assert (result_df.where("member_id == 1").select("my_column").collect()[0]
            [0] == "Qureshi")
    assert (result_df.where("member_id == 2").select("my_column").collect()[0]
            [0] == "1970-02-02")
    assert (result_df.where("member_id == 3").select("my_column").collect()[0]
            [0] == "last_resort")
def test_auto_mapper_datetime_column_default(
        spark_session: SparkSession) -> None:
    # Arrange
    spark_session.createDataFrame(
        [
            (1, "Qureshi", "Imran", "18922"),
            (2, "Vidal", "Michael", "1609390500"),
        ],
        ["member_id", "last_name", "first_name", "ts"],
    ).createOrReplaceTempView("patients")

    source_df: DataFrame = spark_session.table("patients")

    df = source_df.select("member_id")
    df.createOrReplaceTempView("members")

    # Act
    mapper = AutoMapper(
        view="members", source_view="patients",
        keys=["member_id"]).columns(timestamp=A.unix_timestamp(A.column("ts")))

    assert isinstance(mapper, AutoMapper)
    sql_expressions: Dict[str, Column] = mapper.get_column_specs(
        source_df=source_df)
    for column_name, sql_expression in sql_expressions.items():
        print(f"{column_name}: {sql_expression}")

    assert_compare_expressions(
        sql_expressions["timestamp"],
        to_timestamp(from_unixtime(col("b.ts"), "yyyy-MM-dd HH:mm:ss"),
                     "yyyy-MM-dd HH:mm:ss").alias("timestamp"),
    )

    result_df: DataFrame = mapper.transform(df=df)

    # Assert
    result_df.printSchema()
    result_df.show()

    assert result_df.count() == 2

    assert result_df.where("member_id == 1").select(
        "timestamp").collect()[0][0] == datetime(1970, 1, 1, 5, 15, 22)
    assert result_df.where("member_id == 2").select(
        "timestamp").collect()[0][0] == datetime(2020, 12, 31, 4, 55, 0)

    assert dict(result_df.dtypes)["timestamp"] == "timestamp"
Example #23
0
def test_auto_mapper_split_by_delimiter_and_transform(
    spark_session: SparkSession, ) -> None:
    # Arrange
    spark_session.createDataFrame(
        [
            (1, "Qureshi", "Imran", "1970-01-01"),
            (2, "Vidal|Bates", "Michael", "1970-02-02"),
        ],
        ["member_id", "last_name", "first_name", "date_of_birth"],
    ).createOrReplaceTempView("patients")

    source_df: DataFrame = spark_session.table("patients")

    df = source_df.select("member_id")
    df.createOrReplaceTempView("members")

    # Act
    mapper = AutoMapper(
        view="members", source_view="patients", keys=["member_id"]).complex(
            MyObject(my_column=A.transform(
                A.split_by_delimiter(A.column("last_name"), "|"),
                A.complex(bar=A.field("_"), bar2=A.field("_")),
            )))

    assert isinstance(mapper, AutoMapper)
    sql_expressions: Dict[str, Column] = mapper.get_column_specs(
        source_df=source_df)
    for column_name, sql_expression in sql_expressions.items():
        print(f"{column_name}: {sql_expression}")

    # assert str(sql_expressions["my_column"]) == str(
    #     split(col("b.last_name"), "[|]", -1).alias("my_column")
    # )

    result_df: DataFrame = mapper.transform(df=df)

    # Assert
    result_df.printSchema()
    result_df.show()

    assert (result_df.where("member_id == 1").select("my_column").collect()[0]
            [0][0]["bar"] == "Qureshi")

    assert (result_df.where("member_id == 2").select("my_column").collect()[0]
            [0][0]["bar"] == "Vidal")
    assert (result_df.where("member_id == 2").select("my_column").collect()[0]
            [0][1]["bar"] == "Bates")
def test_auto_mapper_lpad(spark_session: SparkSession) -> None:
    # Arrange
    spark_session.createDataFrame(
        [
            (1, "1234"),
            (2, "1234567"),
            (3, "123456789"),
        ],
        ["member_id", "empi"],
    ).createOrReplaceTempView("patients")

    source_df: DataFrame = spark_session.table("patients")

    df = source_df.select("member_id")
    df.createOrReplaceTempView("members")

    # Act
    mapper = AutoMapper(
        view="members", source_view="patients", keys=["member_id"]
    ).columns(my_column=A.lpad(column=A.column("empi"), length=9, pad="0"))

    # Assert
    assert isinstance(mapper, AutoMapper)
    sql_expressions: Dict[str, Column] = mapper.get_column_specs(source_df=source_df)

    assert str(sql_expressions["my_column"]) == str(
        lpad(col=col("b.empi"), len=9, pad="0").alias("my_column")
    )

    result_df: DataFrame = mapper.transform(df=df)

    # noinspection SpellCheckingInspection
    assert (
        result_df.where("member_id == 1").select("my_column").collect()[0][0]
        == "000001234"
    )
    # noinspection SpellCheckingInspection
    assert (
        result_df.where("member_id == 2").select("my_column").collect()[0][0]
        == "001234567"
    )

    # noinspection SpellCheckingInspection
    assert (
        result_df.where("member_id == 3").select("my_column").collect()[0][0]
        == "123456789"
    )
Example #25
0
def test_automapper_if_regex(spark_session: SparkSession) -> None:
    # Arrange
    spark_session.createDataFrame([
        (1, 'Qureshi', 'Imran', "54"),
        (2, 'Vidal', 'Michael', None),
    ], ['member_id', 'last_name', 'first_name', "my_age"
        ]).createOrReplaceTempView("patients")

    source_df: DataFrame = spark_session.table("patients")

    df = source_df.select("member_id")
    df.createOrReplaceTempView("members")

    # Act
    mapper = AutoMapper(view="members",
                        source_view="patients",
                        keys=["member_id"]).columns(
                            age=A.if_regex(column=A.column("my_age"),
                                           check="5*",
                                           value=A.number(A.column("my_age")),
                                           else_=A.number(A.text("100"))))

    assert isinstance(mapper, AutoMapper)
    sql_expressions: Dict[str, Column] = mapper.get_column_specs(
        source_df=source_df)
    for column_name, sql_expression in sql_expressions.items():
        print(f"{column_name}: {sql_expression}")

    assert str(sql_expressions["age"]) == str(
        when(col("b.my_age").rlike("5*"),
             col("b.my_age").cast(IntegerType())).otherwise(
                 lit("100").cast(StringType()).cast(
                     IntegerType())).alias("age"))

    result_df: DataFrame = mapper.transform(df=df)

    # Assert
    result_df.printSchema()
    result_df.show()

    assert result_df.where("member_id == 1").select(
        "age").collect()[0][0] == 54
    assert result_df.where("member_id == 2").select(
        "age").collect()[0][0] == 100

    assert dict(result_df.dtypes)["age"] == "int"
def test_auto_mapper_complex_with_defined_class(
        spark_session: SparkSession) -> None:
    # Arrange
    spark_session.createDataFrame(
        [
            (1, "Qureshi", "Imran", 45),
            (2, "Vidal", "Michael", 35),
        ],
        ["member_id", "last_name", "first_name", "my_age"],
    ).createOrReplaceTempView("patients")

    source_df: DataFrame = spark_session.table("patients")

    df = source_df.select("member_id")
    df.createOrReplaceTempView("members")

    # Act
    mapper = AutoMapper(
        view="members",
        source_view="patients",
        keys=["member_id"],
        drop_key_columns=False,
    ).complex(
        MyClass(name=A.column("last_name"), age=A.number(A.column("my_age"))))

    assert isinstance(mapper, AutoMapper)
    sql_expressions: Dict[str, Column] = mapper.get_column_specs(
        source_df=source_df)
    for column_name, sql_expression in sql_expressions.items():
        print(f"{column_name}: {sql_expression}")

    result_df: DataFrame = mapper.transform(df=df)

    # Assert
    assert str(sql_expressions["name"]) == str(
        col("b.last_name").cast("string").alias("name"))
    assert str(sql_expressions["age"]) == str(
        col("b.my_age").cast("long").alias("age"))

    result_df.printSchema()
    result_df.show()

    assert result_df.where("member_id == 1").select(
        "name").collect()[0][0] == "Qureshi"

    assert dict(result_df.dtypes)["age"] in ("int", "long", "bigint")
Example #27
0
def test_auto_mapper_amount(spark_session: SparkSession) -> None:
    # Arrange
    spark_session.createDataFrame([
        (1, 'Qureshi', 'Imran', "54.45"),
        (2, 'Vidal', 'Michael', "67.67"),
    ], ['member_id', 'last_name', 'first_name', "my_age"
        ]).createOrReplaceTempView("patients")

    source_df: DataFrame = spark_session.table("patients")

    df = source_df.select("member_id")
    df.createOrReplaceTempView("members")

    # Act
    mapper = AutoMapper(view="members",
                        source_view="patients",
                        keys=["member_id"
                              ]).columns(age=A.amount(A.column("my_age")))

    debug_text: str = mapper.to_debug_string()
    print(debug_text)

    assert isinstance(mapper, AutoMapper)
    sql_expressions: Dict[str, Column] = mapper.get_column_specs(
        source_df=source_df)
    for column_name, sql_expression in sql_expressions.items():
        print(f"{column_name}: {sql_expression}")

    assert str(sql_expressions["age"]) == str(
        col("b.my_age").cast("float").alias("age"))

    result_df: DataFrame = mapper.transform(df=df)

    # Assert
    result_df.printSchema()
    result_df.show()

    assert approx(
        result_df.where("member_id == 1").select("age").collect()[0]
        [0]) == approx(54.45)
    assert approx(
        result_df.where("member_id == 2").select("age").collect()[0]
        [0]) == approx(67.67)

    assert dict(result_df.dtypes)["age"] == "float"
Example #28
0
def test_automapper_concat_array(spark_session: SparkSession) -> None:
    clean_spark_session(spark_session)
    data_dir: Path = Path(__file__).parent.joinpath("./")

    data_json_file: Path = data_dir.joinpath("data.json")

    source_df: DataFrame = spark_session.read.json(str(data_json_file),
                                                   multiLine=True)

    source_df.createOrReplaceTempView("patients")

    source_df.show(truncate=False)

    # Act
    mapper = AutoMapper(
        view="members", source_view="patients",
        drop_key_columns=False).columns(
            age=A.column("identifier").concat(A.text("foo").to_array()))

    assert isinstance(mapper, AutoMapper)
    sql_expressions: Dict[str, Column] = mapper.get_column_specs(
        source_df=source_df)
    for column_name, sql_expression in sql_expressions.items():
        print(f"{column_name}: {sql_expression}")

    assert_compare_expressions(
        sql_expressions["age"],
        concat(col("b.identifier"),
               array(lit("foo").cast("string"))).alias("age"),
    )
    result_df: DataFrame = mapper.transform(df=source_df)

    result_df.show(truncate=False)

    assert result_df.where("id == 1730325416").select(
        "age").collect()[0][0] == [
            "bar",
            "foo",
        ]

    assert result_df.where("id == 1467734301").select(
        "age").collect()[0][0] == [
            "John",
            "foo",
        ]
Example #29
0
def test_automapper_map(spark_session: SparkSession) -> None:
    # Arrange
    spark_session.createDataFrame([
        (1, 'Qureshi', 'Imran', "Y"),
        (2, 'Vidal', 'Michael', "N"),
        (3, 'Vidal', 'Michael', "f"),
    ], ['member_id', 'last_name', 'first_name', "has_kids"
        ]).createOrReplaceTempView("patients")

    source_df: DataFrame = spark_session.table("patients")

    df = source_df.select("member_id")
    df.createOrReplaceTempView("members")

    # Act
    mapper = AutoMapper(
        view="members", source_view="patients",
        keys=["member_id"]).columns(has_kids=A.map(A.column("has_kids"), {
            "Y": "Yes",
            "N": "No"
        }, "unknown"))

    assert isinstance(mapper, AutoMapper)
    sql_expressions: Dict[str, Column] = mapper.get_column_specs(
        source_df=source_df)
    for column_name, sql_expression in sql_expressions.items():
        print(f"{column_name}: {sql_expression}")

    assert str(sql_expressions["has_kids"]) == str(
        when(col("b.has_kids").eqNullSafe(lit("Y")), lit("Yes")).when(
            col("b.has_kids").eqNullSafe(lit("N")),
            lit("No")).otherwise(lit("unknown")).alias("___has_kids"))

    result_df: DataFrame = mapper.transform(df=df)

    # Assert
    result_df.printSchema()
    result_df.show()

    assert result_df.where("member_id == 1").select(
        "has_kids").collect()[0][0] == "Yes"
    assert result_df.where("member_id == 2").select(
        "has_kids").collect()[0][0] == "No"
    assert result_df.where("member_id == 3").select(
        "has_kids").collect()[0][0] == "unknown"
def test_auto_mapper_complex_with_mappers(spark_session: SparkSession) -> None:
    # Arrange
    spark_session.createDataFrame(
        [
            (1, 'Qureshi', 'Imran'),
            (2, 'Vidal', 'Michael'),
        ], ['member_id', 'last_name', 'first_name']
    ).createOrReplaceTempView("patients")

    source_df: DataFrame = spark_session.table("patients")

    df = source_df.select("member_id")
    df.createOrReplaceTempView("members")

    # Act
    mapper = AutoMapper(
        view="members",
        source_view="patients",
        keys=["member_id"],
        drop_key_columns=False
    ).columns(dst2=A.complex(use="usual", family=A.complex(given="foo")))

    assert isinstance(mapper, AutoMapper)
    sql_expressions: Dict[str, Column] = mapper.get_column_specs(
        source_df=source_df
    )
    for column_name, sql_expression in sql_expressions.items():
        print(f"{column_name}: {sql_expression}")

    result_df: DataFrame = mapper.transform(df=df)

    # Assert
    assert str(sql_expressions["dst2"]) == str(
        struct(
            expr("usual").alias("use"),
            struct(expr("foo").alias("given")).alias("family")
        ).alias("dst2")
    )

    result_df.printSchema()
    result_df.show()

    result = result_df.where("member_id == 1").select("dst2").collect()[0][0]
    assert result[0] == "usual"
    assert result[1][0] == "foo"