def test_auto_mapper_amount(spark_session: SparkSession) -> None: # Arrange spark_session.createDataFrame( [ (1, "Qureshi", "Imran", "54.45"), (2, "Vidal", "Michael", "67.67"), (3, "Alex", "Hearn", "1286782.17"), ], ["member_id", "last_name", "first_name", "my_age"], ).createOrReplaceTempView("patients") source_df: DataFrame = spark_session.table("patients") df = source_df.select("member_id") df.createOrReplaceTempView("members") # Act mapper = AutoMapper( view="members", source_view="patients", keys=["member_id"] ).columns( age=A.amount(A.column("my_age")), null_col=A.amount(AutoMapperDataTypeLiteral(None)), ) debug_text: str = mapper.to_debug_string() print(debug_text) assert isinstance(mapper, AutoMapper) sql_expressions: Dict[str, Column] = mapper.get_column_specs(source_df=source_df) for column_name, sql_expression in sql_expressions.items(): print(f"{column_name}: {sql_expression}") assert_compare_expressions( sql_expressions["age"], col("b.my_age").cast("double").alias("age") ) assert_compare_expressions( sql_expressions["null_col"], lit(None).cast("double").alias("null_col") ) result_df: DataFrame = mapper.transform(df=df) # Assert result_df.printSchema() result_df.show() assert approx( result_df.where("member_id == 1").select("age", "null_col").collect()[0][:] ) == (approx(54.45), None) assert approx( result_df.where("member_id == 2").select("age", "null_col").collect()[0][:] ) == (approx(67.67), None) # Ensuring exact match in situations in which float arithmetic errors might occur assert ( str(result_df.where("member_id == 3").select("age").collect()[0][0]) == "1286782.17" ) assert dict(result_df.dtypes)["age"] == "double" assert dict(result_df.dtypes)["null_col"] == "double"
def test_automapper_filter(spark_session: SparkSession) -> None: clean_spark_session(spark_session) data_dir: Path = Path(__file__).parent.joinpath("./") data_json_file: Path = data_dir.joinpath("data.json") source_df: DataFrame = spark_session.read.json(str(data_json_file), multiLine=True) source_df.createOrReplaceTempView("patients") source_df.show(truncate=False) # Act mapper = AutoMapper(view="members", source_view="patients").columns( age=A.filter(column=A.column("identifier"), func=lambda x: x["use"] == lit("usual"))) assert isinstance(mapper, AutoMapper) sql_expressions: Dict[str, Column] = mapper.get_column_specs( source_df=source_df) for column_name, sql_expression in sql_expressions.items(): print(f"{column_name}: {sql_expression}") assert_compare_expressions( sql_expressions["age"], filter("b.identifier", lambda x: x["use"] == lit("usual")).alias("age"), ) result_df: DataFrame = mapper.transform(df=source_df) result_df.show(truncate=False)
def test_automapper_map(spark_session: SparkSession) -> None: # Arrange spark_session.createDataFrame( [ (1, "Qureshi", "Imran", "Y"), (2, "Vidal", "Michael", "N"), (3, "Vidal", "Michael", "f"), (4, "Qureshi", "Imran", None), ], ["member_id", "last_name", "first_name", "has_kids"], ).createOrReplaceTempView("patients") source_df: DataFrame = spark_session.table("patients") df = source_df.select("member_id") df.createOrReplaceTempView("members") # Act mapper = AutoMapper(view="members", source_view="patients", keys=["member_id"]).columns(has_kids=A.map( A.column("has_kids"), { None: "Unspecified", "Y": "Yes", "N": "No" }, "unknown", )) assert isinstance(mapper, AutoMapper) sql_expressions: Dict[str, Column] = mapper.get_column_specs( source_df=source_df) for column_name, sql_expression in sql_expressions.items(): print(f"{column_name}: {sql_expression}") assert_compare_expressions( sql_expressions["has_kids"], when(col("b.has_kids").eqNullSafe(lit(None)), lit("Unspecified")).when( col("b.has_kids").eqNullSafe(lit("Y")), lit("Yes")).when( col("b.has_kids").eqNullSafe(lit("N")), lit("No")).otherwise(lit("unknown")).alias("___has_kids"), ) result_df: DataFrame = mapper.transform(df=df) # Assert result_df.printSchema() result_df.show() assert result_df.where("member_id == 1").select( "has_kids").collect()[0][0] == "Yes" assert result_df.where("member_id == 2").select( "has_kids").collect()[0][0] == "No" assert (result_df.where("member_id == 3").select("has_kids").collect()[0] [0] == "unknown") assert (result_df.where("member_id == 4").select("has_kids").collect()[0] [0] == "Unspecified")
def test_auto_mapper_coalesce(spark_session: SparkSession) -> None: # Arrange spark_session.createDataFrame( [ (1, "Qureshi", "Imran", None), (2, None, "Michael", "1970-02-02"), (3, None, "Michael", None), ], ["member_id", "last_name", "first_name", "date_of_birth"], ).createOrReplaceTempView("patients") source_df: DataFrame = spark_session.table("patients") df = source_df.select("member_id") df.createOrReplaceTempView("members") # Act mapper = AutoMapper( view="members", source_view="patients", keys=["member_id"] ).columns( my_column=A.coalesce( A.column("last_name"), A.column("date_of_birth"), A.text("last_resort") ) ) assert isinstance(mapper, AutoMapper) sql_expressions: Dict[str, Column] = mapper.get_column_specs(source_df=source_df) for column_name, sql_expression in sql_expressions.items(): print(f"{column_name}: {sql_expression}") assert_compare_expressions( sql_expressions["my_column"], coalesce( col("b.last_name"), col("b.date_of_birth"), lit("last_resort").cast(StringType()), ).alias("my_column"), ) result_df: DataFrame = mapper.transform(df=df) # Assert result_df.printSchema() result_df.show() assert ( result_df.where("member_id == 1").select("my_column").collect()[0][0] == "Qureshi" ) assert ( result_df.where("member_id == 2").select("my_column").collect()[0][0] == "1970-02-02" ) assert ( result_df.where("member_id == 3").select("my_column").collect()[0][0] == "last_resort" )
def test_auto_mapper_number(spark_session: SparkSession) -> None: # Arrange spark_session.createDataFrame( [ (1, "Qureshi", "Imran", "54"), (2, "Vidal", "Michael", "67"), (3, "Old", "Methusela", "131026061001"), ], ["member_id", "last_name", "first_name", "my_age"], ).createOrReplaceTempView("patients") source_df: DataFrame = spark_session.table("patients") df = source_df.select("member_id") df.createOrReplaceTempView("members") # Act mapper = AutoMapper( view="members", source_view="patients", keys=["member_id"], drop_key_columns=False, ).columns( age=A.number(A.column("my_age")), null_field=A.number(AutoMapperDataTypeLiteral(None)), ) assert isinstance(mapper, AutoMapper) sql_expressions: Dict[str, Column] = mapper.get_column_specs(source_df=source_df) for column_name, sql_expression in sql_expressions.items(): print(f"{column_name}: {sql_expression}") assert_compare_expressions( sql_expressions["age"], col("b.my_age").cast("long").alias("age"), ) assert_compare_expressions( sql_expressions["null_field"], lit(None).cast("long").alias("null_field") ) result_df: DataFrame = mapper.transform(df=df) # Assert result_df.printSchema() result_df.show() assert result_df.where("member_id == 1").select("age").collect()[0][0] == 54 assert result_df.where("member_id == 2").select("age").collect()[0][0] == 67 assert ( result_df.where("member_id == 3").select("age").collect()[0][0] == 131026061001 ) assert ( result_df.where("member_id == 1").select("null_field").collect()[0][0] is None ) assert dict(result_df.dtypes)["age"] in ("int", "long", "bigint")
def test_auto_mapper_datetime_column_default( spark_session: SparkSession) -> None: # Arrange spark_session.createDataFrame( [ (1, "Qureshi", "Imran", "18922"), (2, "Vidal", "Michael", "1609390500"), ], ["member_id", "last_name", "first_name", "ts"], ).createOrReplaceTempView("patients") source_df: DataFrame = spark_session.table("patients") df = source_df.select("member_id") df.createOrReplaceTempView("members") # Act mapper = AutoMapper(view="members", source_view="patients", keys=["member_id"]).columns( timestamp=A.unix_timestamp(A.column("ts")), literal_val=A.unix_timestamp("1609390500"), ) assert isinstance(mapper, AutoMapper) sql_expressions: Dict[str, Column] = mapper.get_column_specs( source_df=source_df) for column_name, sql_expression in sql_expressions.items(): print(f"{column_name}: {sql_expression}") assert_compare_expressions( sql_expressions["literal_val"], to_timestamp(from_unixtime("1609390500", "yyyy-MM-dd HH:mm:ss"), "yyyy-MM-dd HH:mm:ss").alias("literal_val"), ) result_df: DataFrame = mapper.transform(df=df) # Assert result_df.printSchema() result_df.show() assert result_df.count() == 2 assert result_df.where("member_id == 1").select( "timestamp").collect()[0][0] == datetime(1970, 1, 1, 5, 15, 22) assert result_df.where("member_id == 2").select( "timestamp").collect()[0][0] == datetime(2020, 12, 31, 4, 55, 0) assert result_df.where("member_id == 1").select( "literal_val").collect()[0][0] == datetime(2020, 12, 31, 4, 55, 0) assert result_df.where("member_id == 2").select( "literal_val").collect()[0][0] == datetime(2020, 12, 31, 4, 55, 0) assert dict(result_df.dtypes)["timestamp"] == "timestamp" assert dict(result_df.dtypes)["literal_val"] == "timestamp"
def test_auto_mapper_complex_with_extension( spark_session: SparkSession) -> None: # Arrange spark_session.createDataFrame( [ (1, "Qureshi", "Imran", 45), (2, "Vidal", "Michael", 35), ], ["member_id", "last_name", "first_name", "my_age"], ).createOrReplaceTempView("patients") source_df: DataFrame = spark_session.table("patients") df = source_df.select("member_id") df.createOrReplaceTempView("members") # Act mapper = AutoMapper( view="members", source_view="patients", keys=["member_id"], drop_key_columns=False, ).complex( MyClass( name=A.column("last_name"), age=A.number(A.column("my_age")), extension=AutoMapperList([ MyProcessingStatusExtension( processing_status=A.text("foo"), request_id=A.text("bar"), date_processed=A.date("2021-01-01"), ) ]), )) assert isinstance(mapper, AutoMapper) sql_expressions: Dict[str, Column] = mapper.get_column_specs( source_df=source_df) for column_name, sql_expression in sql_expressions.items(): print(f"{column_name}: {sql_expression}") result_df: DataFrame = mapper.transform(df=df) # Assert assert_compare_expressions(sql_expressions["name"], col("b.last_name").cast("string").alias("name")) assert_compare_expressions(sql_expressions["age"], col("b.my_age").cast("long").alias("age")) result_df.printSchema() result_df.show(truncate=False) assert result_df.where("member_id == 1").select( "name").collect()[0][0] == "Qureshi" assert dict(result_df.dtypes)["age"] in ("int", "long", "bigint")
def test_auto_mapper_array_multiple_items_with_null( spark_session: SparkSession, ) -> None: # Arrange spark_session.createDataFrame( [ (1, "Qureshi", "Imran"), (2, "Vidal", "Michael"), ], ["member_id", "last_name", "first_name"], ).createOrReplaceTempView("patients") source_df: DataFrame = spark_session.table("patients") df: DataFrame = source_df.select("member_id") df.createOrReplaceTempView("members") # Act mapper = AutoMapper( view="members", source_view="patients", keys=["member_id"], drop_key_columns=False, ).columns(dst2=AutoMapperList(["address1", "address2", None])) assert isinstance(mapper, AutoMapper) sql_expressions: Dict[str, Column] = mapper.get_column_specs( source_df=source_df) for column_name, sql_expression in sql_expressions.items(): print(f"{column_name}: {sql_expression}") assert_compare_expressions( sql_expressions["dst2"], when( array(lit("address1"), lit("address2"), lit(None)).isNotNull(), filter( coalesce(array(lit("address1"), lit("address2"), lit(None)), array()), lambda x: x.isNotNull(), ), ).alias("dst2"), ) result_df: DataFrame = mapper.transform(df=df) # Assert result_df.printSchema() result_df.show() assert (result_df.where("member_id == 1").select("dst2").collect()[0][0][0] == "address1") assert (result_df.where("member_id == 1").select("dst2").collect()[0][0][1] == "address2") assert (result_df.where("member_id == 2").select("dst2").collect()[0][0][0] == "address1") assert (result_df.where("member_id == 2").select("dst2").collect()[0][0][1] == "address2")
def test_auto_mapper_regex_replace_unicode( spark_session: SparkSession) -> None: # Arrange spark_session.createDataFrame( [ ( 1, "MedStar NRN PMR at Good Samaritan Hosp Good Health Center", "Imran", "1970-01-01", ), (2, "Vidal", "Michael", "1970-02-02"), ], ["member_id", "last_name", "first_name", "date_of_birth"], ).createOrReplaceTempView("patients") source_df: DataFrame = spark_session.table("patients") df = source_df.select("member_id") df.createOrReplaceTempView("members") not_normal_characters: str = r"[^\w\r\n\t _.,!\"'/$-]" # source_df.select(regexp_extract('last_name', not_normal_characters, 1).alias('d')).show() # Act mapper = AutoMapper( view="members", source_view="patients", keys=["member_id"]).columns(my_column=A.column( "last_name").regex_replace(not_normal_characters, ".")) assert isinstance(mapper, AutoMapper) sql_expressions: Dict[str, Column] = mapper.get_column_specs( source_df=source_df) for column_name, sql_expression in sql_expressions.items(): print(f"{column_name}: {sql_expression}") assert_compare_expressions( sql_expressions["my_column"], regexp_replace(col("b.last_name"), not_normal_characters, ".").alias("my_column"), ) result_df: DataFrame = mapper.transform(df=df) # Assert result_df.printSchema() result_df.show(truncate=False) # noinspection SpellCheckingInspection assert (result_df.where("member_id == 1").select("my_column").collect()[0] [0] == "MedStar NRN PMR at Good Samaritan Hosp.Good Health Center") # noinspection SpellCheckingInspection assert (result_df.where("member_id == 2").select("my_column").collect()[0] [0] == "Vidal")
def test_automapper_if_not_null_or_empty(spark_session: SparkSession) -> None: # Arrange clean_spark_session(session=spark_session) spark_session.createDataFrame( [ (1, "Qureshi", "Imran", "54"), (2, "Vidal", "Michael", ""), (3, "Vidal3", "Michael", None), ], ["member_id", "last_name", "first_name", "my_age"], ).createOrReplaceTempView("patients") source_df: DataFrame = spark_session.table("patients") source_df.show() df = source_df.select("member_id") df.createOrReplaceTempView("members") # Act mapper = AutoMapper( view="members", source_view="patients", keys=["member_id"], drop_key_columns=False, ).columns(age=A.if_not_null_or_empty(A.column("my_age"), A.column( "my_age"), A.text("100"))) assert isinstance(mapper, AutoMapper) sql_expressions: Dict[str, Column] = mapper.get_column_specs( source_df=source_df) for column_name, sql_expression in sql_expressions.items(): print(f"{column_name}: {sql_expression}") assert_compare_expressions( sql_expressions["age"], when( col("b.my_age").isNull() | col("b.my_age").eqNullSafe(""), lit("100").cast(StringType()), ).otherwise(col("b.my_age")).alias("age"), ) result_df: DataFrame = mapper.transform(df=df) # Assert result_df.printSchema() result_df.show() assert result_df.where("member_id == 1").select( "age").collect()[0][0] == "54" assert result_df.where("member_id == 2").select( "age").collect()[0][0] == "100" assert result_df.where("member_id == 3").select( "age").collect()[0][0] == "100" assert dict(result_df.dtypes)["age"] == "string"
def test_auto_mapper_decimal(spark_session: SparkSession) -> None: # Arrange spark_session.createDataFrame( [ (1, "Qureshi", "Imran", "54.45"), (2, "Vidal", "Michael", "123467.678"), (3, "Paul", "Kyle", "13"), ], ["member_id", "last_name", "first_name", "my_age"], ).createOrReplaceTempView("patients") source_df: DataFrame = spark_session.table("patients") df = source_df.select("member_id") df.createOrReplaceTempView("members") # Act mapper = AutoMapper( view="members", source_view="patients", keys=["member_id"], drop_key_columns=False, ).columns(age=A.decimal(A.column("my_age"), 10, 2)) debug_text: str = mapper.to_debug_string() print(debug_text) assert isinstance(mapper, AutoMapper) sql_expressions: Dict[str, Column] = mapper.get_column_specs(source_df=source_df) for column_name, sql_expression in sql_expressions.items(): print(f"{column_name}: {sql_expression}") assert_compare_expressions( sql_expressions["age"], col("b.my_age").cast("decimal(10,2)").alias("age") ) result_df: DataFrame = mapper.transform(df=df) # Assert result_df.printSchema() result_df.show() assert result_df.where("member_id == 1").select("age").collect()[0][0] == Decimal( "54.45" ) assert result_df.where("member_id == 2").select("age").collect()[0][0] == Decimal( "123467.68" ) assert result_df.where("member_id == 3").select("age").collect()[0][0] == Decimal( "13.00" ) assert dict(result_df.dtypes)["age"] == "decimal(10,2)"
def test_automapper_if_list(spark_session: SparkSession) -> None: # Arrange spark_session.createDataFrame( [ (1, "Qureshi", "Imran", "54"), (2, "Qureshi", "Imran", "59"), (3, "Vidal", "Michael", None), ], ["member_id", "last_name", "first_name", "my_age"], ).createOrReplaceTempView("patients") source_df: DataFrame = spark_session.table("patients") df = source_df.select("member_id") df.createOrReplaceTempView("members") # Act mapper = AutoMapper(view="members", source_view="patients", keys=["member_id"]).columns(age=A.if_( column=A.column("my_age"), check=["54", "59"], value=A.number(A.column("my_age")), else_=A.number(A.text("100")), )) assert isinstance(mapper, AutoMapper) sql_expressions: Dict[str, Column] = mapper.get_column_specs( source_df=source_df) for column_name, sql_expression in sql_expressions.items(): print(f"{column_name}: {sql_expression}") assert_compare_expressions( sql_expressions["age"], when(col("b.my_age").isin(["54", "59"]), col("b.my_age").cast("long")).otherwise( lit("100").cast(StringType()).cast(LongType())).alias("age"), ) result_df: DataFrame = mapper.transform(df=df) # Assert result_df.printSchema() result_df.show() assert result_df.where("member_id == 1").select( "age").collect()[0][0] == 54 assert result_df.where("member_id == 2").select( "age").collect()[0][0] == 59 assert result_df.where("member_id == 3").select( "age").collect()[0][0] == 100 assert dict(result_df.dtypes)["age"] in ("int", "long", "bigint")
def test_auto_mapper_date_format(spark_session: SparkSession) -> None: # Arrange spark_session.createDataFrame( [ (1, "Qureshi", "Imran", "1970-01-01 12:30"), (2, "Vidal", "Michael", "1970-02-02 06:30"), ], ["member_id", "last_name", "first_name", "opening_time"], ).createOrReplaceTempView("patients") source_df: DataFrame = spark_session.table("patients") source_df = source_df.withColumn( "opening_time", to_timestamp("opening_time", format="yyyy-MM-dd hh:mm")) assert dict(source_df.dtypes)["opening_time"] == "timestamp" df = source_df.select("member_id") df.createOrReplaceTempView("members") # Act mapper = AutoMapper( view="members", source_view="patients", keys=["member_id"]).columns(openingTime=A.datetime( A.column("opening_time")).to_date_format("hh:mm:ss")) assert isinstance(mapper, AutoMapper) sql_expressions: Dict[str, Column] = mapper.get_column_specs( source_df=source_df) for column_name, sql_expression in sql_expressions.items(): print(f"{column_name}: {sql_expression}") assert_compare_expressions( sql_expressions["openingTime"], date_format(coalesce(to_timestamp(col("b.opening_time"))), "hh:mm:ss").alias("openingTime"), ) result_df: DataFrame = mapper.transform(df=df) # Assert result_df.printSchema() result_df.show() assert (result_df.where("member_id == 1").select("openingTime").collect() [0][0] == "12:30:00") assert (result_df.where("member_id == 2").select("openingTime").collect() [0][0] == "06:30:00") # check type assert dict(result_df.dtypes)["openingTime"] == "string"
def test_auto_mapper_hash(spark_session: SparkSession) -> None: # Arrange spark_session.createDataFrame( [ (1, "Qureshi", "54"), (2, "Vidal", "67"), (3, "Vidal", None), (4, None, None), ], ["member_id", "last_name", "my_age"], ).createOrReplaceTempView("patients") source_df: DataFrame = spark_session.table("patients") source_df = source_df.withColumn("my_age", col("my_age").cast("int")) df = source_df.select("member_id") df.createOrReplaceTempView("members") # Act mapper = AutoMapper( view="members", source_view="patients", keys=["member_id" ]).columns(age=A.hash(A.column("my_age"), A.column("last_name"))) assert isinstance(mapper, AutoMapper) sql_expressions: Dict[str, Column] = mapper.get_column_specs( source_df=source_df) for column_name, sql_expression in sql_expressions.items(): print(f"{column_name}: {sql_expression}") assert_compare_expressions( sql_expressions["age"], hash(col("b.my_age"), col("b.last_name")).cast("string").alias("age"), ) result_df: DataFrame = mapper.transform(df=df) # Assert result_df.printSchema() result_df.show() assert (result_df.where("member_id == 1").select("age").collect()[0][0] == "-543157534") assert (result_df.where("member_id == 2").select("age").collect()[0][0] == "2048196121") assert (result_df.where("member_id == 3").select("age").collect()[0][0] == "-80001407") assert result_df.where("member_id == 4").select( "age").collect()[0][0] == "42" assert dict(result_df.dtypes)["age"] == "string"
def test_auto_mapper_boolean(spark_session: SparkSession) -> None: # Arrange spark_session.createDataFrame( [ (1, "Qureshi", "Imran", "0"), (2, "Vidal", "Michael", "1"), ], ["member_id", "last_name", "first_name", "my_age"], ).createOrReplaceTempView("patients") source_df: DataFrame = spark_session.table("patients") df = source_df.select("member_id") df.createOrReplaceTempView("members") # Act mapper = AutoMapper(view="members", source_view="patients", keys=["member_id"]).columns( age=A.boolean(A.column("my_age")), is_active=A.boolean("False"), ) assert isinstance(mapper, AutoMapper) sql_expressions: Dict[str, Column] = mapper.get_column_specs( source_df=source_df) for column_name, sql_expression in sql_expressions.items(): print(f"{column_name}: {sql_expression}") assert_compare_expressions(sql_expressions["age"], col("b.my_age").cast("boolean").alias("age")) assert_compare_expressions(sql_expressions["is_active"], lit("False").cast("boolean").alias("is_active")) result_df: DataFrame = mapper.transform(df=df) # Assert result_df.printSchema() result_df.show() assert result_df.where("member_id == 1").select( "age", "is_active", ).collect()[0][:] == (False, False) assert result_df.where("member_id == 2").select( "age", "is_active", ).collect()[0][:] == (True, False) assert dict(result_df.dtypes)["age"] == "boolean" assert dict(result_df.dtypes)["is_active"] == "boolean"
def test_auto_mapper_struct(spark_session: SparkSession) -> None: # Arrange spark_session.createDataFrame( [ (1, "Qureshi", "Imran"), (2, "Vidal", "Michael"), ], ["member_id", "last_name", "first_name"], ).createOrReplaceTempView("patients") source_df: DataFrame = spark_session.table("patients") df = source_df.select("member_id") df.createOrReplaceTempView("members") # Act mapper = AutoMapper( view="members", source_view="patients", keys=["member_id"], drop_key_columns=False, ).columns(dst2=A.struct({ "use": "usual", "family": "imran" })) assert isinstance(mapper, AutoMapper) sql_expressions: Dict[str, Column] = mapper.get_column_specs( source_df=source_df) for column_name, sql_expression in sql_expressions.items(): print(f"{column_name}: {sql_expression}") result_df: DataFrame = mapper.transform(df=df) # Assert assert_compare_expressions( sql_expressions["dst2"], struct(lit("usual").alias("use"), lit("imran").alias("family")).alias("dst2"), ) result_df.printSchema() result_df.show() result_df.where("member_id == 1").select("dst2").show() result_df.where("member_id == 1").select("dst2").printSchema() result = result_df.where("member_id == 1").select("dst2").collect()[0][0] assert result[0] == "usual" assert result[1] == "imran"
def test_automapper_nested_array_filter_simple_with_array( spark_session: SparkSession, ) -> None: clean_spark_session(spark_session) data_dir: Path = Path(__file__).parent.joinpath("./") environ["LOGLEVEL"] = "DEBUG" data_json_file: Path = data_dir.joinpath("data.json") source_df: DataFrame = spark_session.read.json(str(data_json_file), multiLine=True) source_df.createOrReplaceTempView("patients") source_df.show(truncate=False) # Act mapper = AutoMapper( view="members", source_view="patients").columns(age=A.nested_array_filter( array_field=A.column("array1"), inner_array_field=A.field("array2"), match_property="reference", match_value=A.text("bar"), )) assert isinstance(mapper, AutoMapper) sql_expressions: Dict[str, Column] = mapper.get_column_specs( source_df=source_df) for column_name, sql_expression in sql_expressions.items(): print(f"{column_name}: {sql_expression}") assert_compare_expressions( sql_expressions["age"], filter( col("b.array1"), lambda y: exists( y["array2"], lambda x: x["reference"] == lit("bar").cast( "string")), ).alias("age"), ) result_df: DataFrame = mapper.transform(df=source_df) result_df.printSchema() result_df.show(truncate=False) assert result_df.count() == 2 assert result_df.select("age").collect()[0][0] == [] assert result_df.select( "age").collect()[1][0][0]["array2"][0]["reference"] == "bar"
def test_auto_mapper_date_literal(spark_session: SparkSession) -> None: # Arrange spark_session.createDataFrame( [ (1, "Qureshi", "Imran"), (2, "Vidal", "Michael"), ], ["member_id", "last_name", "first_name"], ).createOrReplaceTempView("patients") source_df: DataFrame = spark_session.table("patients") df = source_df.select("member_id") df.createOrReplaceTempView("members") # Act mapper = AutoMapper( view="members", source_view="patients", keys=["member_id"], drop_key_columns=False, ).columns(birthDate=A.date("1970-01-01")) assert isinstance(mapper, AutoMapper) sql_expressions: Dict[str, Column] = mapper.get_column_specs( source_df=source_df) for column_name, sql_expression in sql_expressions.items(): print(f"{column_name}: {sql_expression}") assert_compare_expressions( sql_expressions["birthDate"], coalesce( to_date(lit("1970-01-01"), format="y-M-d"), to_date(lit("1970-01-01"), format="yyyyMMdd"), to_date(lit("1970-01-01"), format="M/d/y"), ).alias("birthDate"), ) result_df: DataFrame = mapper.transform(df=df) # Assert result_df.printSchema() result_df.show() assert result_df.where("member_id == 1").select( "birthDate").collect()[0][0] == date(1970, 1, 1) assert result_df.where("member_id == 2").select( "birthDate").collect()[0][0] == date(1970, 1, 1)
def test_auto_mapper_date_column_typed(spark_session: SparkSession) -> None: # Arrange spark_session.createDataFrame( [ (1, "Qureshi", "Imran", "1970-01-01"), (2, "Vidal", "Michael", "1970-02-02"), ], ["member_id", "last_name", "first_name", "date_of_birth"], ).createOrReplaceTempView("patients") source_df: DataFrame = spark_session.table("patients") source_df = source_df.withColumn( "date_of_birth", to_date("date_of_birth", format="yyyy-MM-dd")) assert dict(source_df.dtypes)["date_of_birth"] == "date" df = source_df.select("member_id") df.createOrReplaceTempView("members") # Act mapper = AutoMapper(view="members", source_view="patients", keys=[ "member_id" ]).columns(birthDate=A.date(A.column("date_of_birth"))) assert isinstance(mapper, AutoMapper) sql_expressions: Dict[str, Column] = mapper.get_column_specs( source_df=source_df) for column_name, sql_expression in sql_expressions.items(): print(f"{column_name}: {sql_expression}") assert_compare_expressions(sql_expressions["birthDate"], col("b.date_of_birth").alias("birthDate")) result_df: DataFrame = mapper.transform(df=df) # Assert result_df.printSchema() result_df.show() assert result_df.where("member_id == 1").select( "birthDate").collect()[0][0] == date(1970, 1, 1) assert result_df.where("member_id == 2").select( "birthDate").collect()[0][0] == date(1970, 2, 2) assert dict(result_df.dtypes)["birthDate"] == "date"
def test_auto_mapper_amount_typed(spark_session: SparkSession) -> None: # Arrange spark_session.createDataFrame( [ (1, "Qureshi", "Imran", "54.45"), (2, "Vidal", "Michael", "67.67"), ], ["member_id", "last_name", "first_name", "my_age"], ).createOrReplaceTempView("patients") source_df: DataFrame = spark_session.table("patients") source_df = source_df.withColumn("my_age", col("my_age").cast("float")) df = source_df.select("member_id") df.createOrReplaceTempView("members") # Act mapper = AutoMapper(view="members", source_view="patients", keys=["member_id" ]).columns(age=A.amount(A.column("my_age"))) assert isinstance(mapper, AutoMapper) sql_expressions: Dict[str, Column] = mapper.get_column_specs( source_df=source_df) for column_name, sql_expression in sql_expressions.items(): print(f"{column_name}: {sql_expression}") assert_compare_expressions(sql_expressions["age"], col("b.my_age").alias("age")) result_df: DataFrame = mapper.transform(df=df) # Assert result_df.printSchema() result_df.show() assert approx( result_df.where("member_id == 1").select("age").collect()[0] [0]) == approx(54.45) assert approx( result_df.where("member_id == 2").select("age").collect()[0] [0]) == approx(67.67) assert dict(result_df.dtypes)["age"] == "double"
def test_auto_mapper_split_by_delimiter(spark_session: SparkSession) -> None: # Arrange spark_session.createDataFrame( [ (1, "Qureshi", "Imran", "1970-01-01"), (2, "Vidal|Bates", "Michael", "1970-02-02"), ], ["member_id", "last_name", "first_name", "date_of_birth"], ).createOrReplaceTempView("patients") source_df: DataFrame = spark_session.table("patients") df = source_df.select("member_id") df.createOrReplaceTempView("members") # Act mapper = AutoMapper( view="members", source_view="patients", keys=[ "member_id" ]).columns(my_column=A.split_by_delimiter(A.column("last_name"), "|")) assert isinstance(mapper, AutoMapper) sql_expressions: Dict[str, Column] = mapper.get_column_specs( source_df=source_df) for column_name, sql_expression in sql_expressions.items(): print(f"{column_name}: {sql_expression}") assert_compare_expressions( sql_expressions["my_column"], split(col("b.last_name"), "[|]", -1).alias("my_column"), ) result_df: DataFrame = mapper.transform(df=df) # Assert result_df.printSchema() result_df.show() assert result_df.where("member_id == 1").select( "my_column").collect()[0][0] == ["Qureshi"] assert result_df.where("member_id == 2").select( "my_column").collect()[0][0] == [ "Vidal", "Bates", ]
def test_automapper_concat_array(spark_session: SparkSession) -> None: clean_spark_session(spark_session) data_dir: Path = Path(__file__).parent.joinpath("./") data_json_file: Path = data_dir.joinpath("data.json") source_df: DataFrame = spark_session.read.json(str(data_json_file), multiLine=True) source_df.createOrReplaceTempView("patients") source_df.show(truncate=False) # Act mapper = AutoMapper( view="members", source_view="patients", drop_key_columns=False).columns( age=A.column("identifier").concat(A.text("foo").to_array())) assert isinstance(mapper, AutoMapper) sql_expressions: Dict[str, Column] = mapper.get_column_specs( source_df=source_df) for column_name, sql_expression in sql_expressions.items(): print(f"{column_name}: {sql_expression}") assert_compare_expressions( sql_expressions["age"], concat(col("b.identifier"), array(lit("foo").cast("string"))).alias("age"), ) result_df: DataFrame = mapper.transform(df=source_df) result_df.show(truncate=False) assert result_df.where("id == 1730325416").select( "age").collect()[0][0] == [ "bar", "foo", ] assert result_df.where("id == 1467734301").select( "age").collect()[0][0] == [ "John", "foo", ]
def test_auto_mapper_lpad(spark_session: SparkSession) -> None: # Arrange spark_session.createDataFrame( [ (1, "1234"), (2, "1234567"), (3, "123456789"), ], ["member_id", "empi"], ).createOrReplaceTempView("patients") source_df: DataFrame = spark_session.table("patients") df = source_df.select("member_id") df.createOrReplaceTempView("members") # Act mapper = AutoMapper( view="members", source_view="patients", keys=["member_id"]).columns( my_column=A.lpad(column=A.column("empi"), length=9, pad="0")) # Assert assert isinstance(mapper, AutoMapper) sql_expressions: Dict[str, Column] = mapper.get_column_specs( source_df=source_df) assert_compare_expressions( sql_expressions["my_column"], lpad(col=col("b.empi"), len=9, pad="0").alias("my_column"), ) result_df: DataFrame = mapper.transform(df=df) # noinspection SpellCheckingInspection assert (result_df.where("member_id == 1").select("my_column").collect()[0] [0] == "000001234") # noinspection SpellCheckingInspection assert (result_df.where("member_id == 2").select("my_column").collect()[0] [0] == "001234567") # noinspection SpellCheckingInspection assert (result_df.where("member_id == 3").select("my_column").collect()[0] [0] == "123456789")
def test_auto_mapper_schema_pruning_with_defined_class( spark_session: SparkSession, ) -> None: # Arrange clean_spark_session(spark_session) spark_session.createDataFrame( [ (1, "Qureshi", "Imran", 45), (2, "Vidal", "Michael", 35), ], ["member_id", "last_name", "first_name", "my_age"], ).createOrReplaceTempView("patients") source_df: DataFrame = spark_session.table("patients") # Act mapper = AutoMapper( view="members", source_view="patients", ).complex( MyClass(name=A.column("last_name"), age=A.number(A.column("my_age")))) assert isinstance(mapper, AutoMapper) sql_expressions: Dict[str, Column] = mapper.get_column_specs( source_df=source_df) for column_name, sql_expression in sql_expressions.items(): print(f"{column_name}: {sql_expression}") result_df: DataFrame = mapper.transform(df=source_df) # Assert assert_compare_expressions(sql_expressions["name"], col("b.last_name").cast("string").alias("name")) assert_compare_expressions(sql_expressions["age"], col("b.my_age").cast("long").alias("age")) result_df.printSchema() result_df.show() assert result_df.where("member_id == 1").select( "name").collect()[0][0] == "Qureshi" assert dict(result_df.dtypes)["age"] in ("int", "long", "bigint")
def test_automapper_select_one(spark_session: SparkSession) -> None: clean_spark_session(spark_session) data_dir: Path = Path(__file__).parent.joinpath("./") data_json_file: Path = data_dir.joinpath("data.json") source_df: DataFrame = spark_session.read.json(str(data_json_file), multiLine=True) source_df.createOrReplaceTempView("patients") source_df.show(truncate=False) # Act mapper = AutoMapper( view="members", source_view="patients").columns(age=A.column("identifier").filter( lambda x: x["system"] == "http://hl7.org/fhir/sid/us-npi"). select_one(A.field("_.value"))) assert isinstance(mapper, AutoMapper) sql_expressions: Dict[str, Column] = mapper.get_column_specs( source_df=source_df) for column_name, sql_expression in sql_expressions.items(): print(f"{column_name}: {sql_expression}") assert_compare_expressions( sql_expressions["age"], transform( filter( "b.identifier", lambda x: x["system"] == lit("http://hl7.org/fhir/sid/us-npi"), ), lambda x: x["value"], )[0].alias("age"), ) result_df: DataFrame = mapper.transform(df=source_df) result_df.show(truncate=False) assert result_df.select("age").collect()[0][0] == "1730325416" assert result_df.select("age").collect()[1][0] == "1467734301"
def test_auto_mapper_with_filter(spark_session: SparkSession) -> None: # Arrange spark_session.createDataFrame( [ (1, "Qureshi", "Imran"), (2, "Vidal", "Michael"), ], ["member_id", "last_name", "first_name"], ).createOrReplaceTempView("patients") source_df: DataFrame = spark_session.table("patients") df = source_df.select("member_id") df.createOrReplaceTempView("members") # Act mapper = AutoMapper( view="members", source_view="patients", keys=["member_id"], filter_by="left(last_name,2) == 'Vi'", ).columns(lname=A.column("last_name")) assert isinstance(mapper, AutoMapper) sql_expressions: Dict[str, Column] = mapper.get_column_specs(source_df=source_df) for column_name, sql_expression in sql_expressions.items(): print(f"{column_name}: {sql_expression}") assert_compare_expressions( sql_expressions["lname"], col("b.last_name").alias("lname") ) result_df: DataFrame = mapper.transform(df=df) # Assert result_df.printSchema() result_df.show() assert result_df.count() == 1 assert result_df.where("member_id == 2").select("lname").collect()[0][0] == "Vidal"
def test_automapper_transform(spark_session: SparkSession) -> None: clean_spark_session(spark_session) data_dir: Path = Path(__file__).parent.joinpath("./") data_json_file: Path = data_dir.joinpath("data.json") source_df: DataFrame = spark_session.read.json(str(data_json_file), multiLine=True) source_df.createOrReplaceTempView("patients") source_df.show(truncate=False) # Act mapper = AutoMapper(view="members", source_view="patients").complex( MyObject(age=A.transform( A.column("identifier"), A.complex(bar=A.field("value"), bar2=A.field("system")), ))) assert isinstance(mapper, AutoMapper) sql_expressions: Dict[str, Column] = mapper.get_column_specs( source_df=source_df) for column_name, sql_expression in sql_expressions.items(): print(f"{column_name}: {sql_expression}") assert_compare_expressions( sql_expressions["age"], transform( "b.identifier", lambda x: struct( col("x[value]").alias("bar"), col("x[system]").alias("bar2")), ).alias("age"), ) result_df: DataFrame = mapper.transform(df=source_df) result_df.show(truncate=False) assert result_df.select("age").collect()[0][0][0][0] == "123"
def test_auto_mapper_regex_replace(spark_session: SparkSession) -> None: # Arrange spark_session.createDataFrame( [ (1, "Qureshi", "Imran", "1970-01-01"), (2, "Vidal", "Michael", "1980-02-02"), ], ["member_id", "last_name", "first_name", "date_of_birth"], ).createOrReplaceTempView("patients") source_df: DataFrame = spark_session.table("patients") df = source_df.select("member_id") df.createOrReplaceTempView("members") # Act mapper = AutoMapper( view="members", source_view="patients", keys=["member_id"] ).columns(my_column=A.regex_extract(A.column("date_of_birth"), r"^(\d{4}).*", 1)) # Assert assert isinstance(mapper, AutoMapper) sql_expressions: Dict[str, Column] = mapper.get_column_specs(source_df=source_df) assert_compare_expressions( sql_expressions["my_column"], regexp_extract(col("b.date_of_birth"), r"^(\d{4}).*", 1).alias("my_column"), ) result_df: DataFrame = mapper.transform(df=df) # noinspection SpellCheckingInspection assert ( result_df.where("member_id == 1").select("my_column").collect()[0][0] == "1970" ) # noinspection SpellCheckingInspection assert ( result_df.where("member_id == 2").select("my_column").collect()[0][0] == "1980" )
def test_auto_mapper_join_using_delimiter(spark_session: SparkSession) -> None: # Arrange spark_session.createDataFrame( [ (123456789, "Gagan", "Chawla", ["MD", "PhD"]), ], ["npi", "first_name", "last_name", "suffix"], ).createOrReplaceTempView("practitioners") source_df: DataFrame = spark_session.table("practitioners") df = source_df.select("npi") df.createOrReplaceTempView("physicians") # Act mapper = AutoMapper( view="physicians", source_view="practitioners", keys=[ "npi" ]).columns(my_column=A.join_using_delimiter(A.column("suffix"), ", ")) assert isinstance(mapper, AutoMapper) sql_expressions: Dict[str, Column] = mapper.get_column_specs( source_df=source_df) for column_name, sql_expression in sql_expressions.items(): print(f"{column_name}: {sql_expression}") assert_compare_expressions( sql_expressions["my_column"], array_join(col("b.suffix"), ", ").alias("my_column"), ) result_df: DataFrame = mapper.transform(df=df) # Assert result_df.printSchema() result_df.show() assert (result_df.where("npi == 123456789").select("my_column").collect() [0][0] == "MD, PhD")
def test_auto_mapper_columns(spark_session: SparkSession) -> None: # Arrange spark_session.createDataFrame( [ (1, "Qureshi", "Imran"), (2, "Vidal", "Michael"), ], ["member_id", "last_name", "first_name"], ).createOrReplaceTempView("patients") source_df: DataFrame = spark_session.table("patients") df = source_df.select("member_id") df.createOrReplaceTempView("members") # Act mapper = AutoMapper( view="members", source_view="patients", keys=["member_id"], drop_key_columns=False, ).columns( dst1="src1", dst2=AutoMapperList(["address1"]), dst3=AutoMapperList(["address1", "address2"]), dst4=AutoMapperList( [A.complex(use="usual", family=A.column("last_name"))]), ) assert isinstance(mapper, AutoMapper) sql_expressions: Dict[str, Column] = mapper.get_column_specs( source_df=source_df) for column_name, sql_expression in sql_expressions.items(): print(f"{column_name}: {sql_expression}") # Assert assert len(sql_expressions) == 4 assert_compare_expressions(sql_expressions["dst1"], lit("src1").alias("dst1")) assert_compare_expressions( sql_expressions["dst2"], when( array(lit("address1")).isNotNull(), filter(coalesce(array(lit("address1")), array()), lambda x: x.isNotNull()), ).alias("dst2"), ) assert_compare_expressions( sql_expressions["dst3"], when( array(lit("address1"), lit("address2")).isNotNull(), filter( coalesce(array(lit("address1"), lit("address2")), array()), lambda x: x.isNotNull(), ), ).alias("dst3"), ) assert_compare_expressions( sql_expressions["dst4"], when( array( struct( lit("usual").alias("use"), col("b.last_name").alias("family"))).isNotNull(), filter( coalesce( array( struct( lit("usual").alias("use"), col("b.last_name").alias("family"), )), array(), ), lambda x: x.isNotNull(), ), ).alias("dst4"), ) result_df: DataFrame = mapper.transform(df=df) # Assert result_df.printSchema() result_df.show() assert len(result_df.columns) == 5 assert result_df.where("member_id == 1").select( "dst1").collect()[0][0] == "src1" assert (result_df.where("member_id == 1").select("dst2").collect()[0][0][0] == "address1") assert (result_df.where("member_id == 1").select("dst3").collect()[0][0][0] == "address1") assert (result_df.where("member_id == 1").select("dst3").collect()[0][0][1] == "address2") assert (result_df.where("member_id == 1").select("dst4").collect()[0][0][0] [0] == "usual") assert (result_df.where("member_id == 1").select("dst4").collect()[0][0][0] [1] == "Qureshi")