コード例 #1
0
 def test_insert_to_non_existent_table(self):
     dataset = SparkHiveDataSet(database="default_1",
                                table="table_not_yet_created",
                                write_mode="insert")
     dataset.save(_generate_spark_df_one())
     assert_df_equal(dataset.load().sort("name"),
                     _generate_spark_df_one().sort("name"))
コード例 #2
0
 def test_read_from_non_existent_table(self):
     dataset = SparkHiveDataSet(database="default_1",
                                table="table_doesnt_exist",
                                write_mode="insert")
     with pytest.raises(
             DataSetError,
             match="requested table not found: default_1.table_doesnt_exist",
     ):
         dataset.load()
コード例 #3
0
 def test_overwrite_empty_table(self, spark_hive_session):
     spark_hive_session.sql(
         "create table default_1.test_overwrite_empty_table (name string, age integer)"
     ).take(1)
     dataset = SparkHiveDataSet(
         database="default_1",
         table="test_overwrite_empty_table",
         write_mode="overwrite",
     )
     dataset.save(_generate_spark_df_one())
     assert_df_equal(dataset.load(), _generate_spark_df_one())
コード例 #4
0
 def test_invalid_pk_provided(self):
     dataset = SparkHiveDataSet(
         database="default_1",
         table="table_1",
         write_mode="upsert",
         table_pk=["column_doesnt_exist"],
     )
     with pytest.raises(
         DataSetError,
         match=r"Columns \[column_doesnt_exist\] selected as primary key\(s\) not "
         r"found in table default_1\.table_1",
     ):
         dataset.save(_generate_spark_df_one())
コード例 #5
0
 def test_upsert_empty_table(self, spark_session):
     spark_session.sql(
         "create table default_1.test_upsert_empty_table (name string, age integer)"
     ).take(1)
     dataset = SparkHiveDataSet(
         database="default_1",
         table="test_upsert_empty_table",
         write_mode="upsert",
         table_pk=["name"],
     )
     dataset.save(_generate_spark_df_one())
     assert_df_equal(
         dataset.load().sort("name"), _generate_spark_df_one().sort("name")
     )
コード例 #6
0
 def test_invalid_schema_insert(self, spark_hive_session):
     spark_hive_session.sql(
         "create table default_1.test_invalid_schema_insert "
         "(name string, additional_column_on_hive integer)").take(1)
     dataset = SparkHiveDataSet(
         database="default_1",
         table="test_invalid_schema_insert",
         write_mode="insert",
     )
     with pytest.raises(
             DataSetError,
             match=r"dataset does not match hive table schema\.\n"
             r"Present on insert only: \[\('age', 'int'\)\]\n"
             r"Present on schema only: \[\('additional_column_on_hive', 'int'\)\]",
     ):
         dataset.save(_generate_spark_df_one())
コード例 #7
0
 def test_upsert_config_err(self):
     # no pk provided should prompt config error
     with pytest.raises(
             DataSetError,
             match="table_pk must be set to utilise upsert read mode"):
         SparkHiveDataSet(database="default_1",
                          table="table_1",
                          write_mode="upsert")
コード例 #8
0
    def test_cant_pickle(self):
        import pickle  # pylint: disable=import-outside-toplevel

        with pytest.raises(pickle.PicklingError):
            pickle.dumps(
                SparkHiveDataSet(database="default_1",
                                 table="table_1",
                                 write_mode="overwrite"))
コード例 #9
0
 def test_invalid_write_mode_provided(self):
     with pytest.raises(
             DataSetError,
             match="Invalid write_mode provided: not_a_write_mode"):
         SparkHiveDataSet(
             database="default_1",
             table="table_1",
             write_mode="not_a_write_mode",
             table_pk=["name"],
         )
コード例 #10
0
 def test_invalid_write_mode_provided(self):
     pattern = (r"Invalid `write_mode` provided: not_a_write_mode\. "
                r"`write_mode` must be one of: insert, upsert, overwrite")
     with pytest.raises(DataSetError, match=pattern):
         SparkHiveDataSet(
             database="default_1",
             table="table_1",
             write_mode="not_a_write_mode",
             table_pk=["name"],
         )
コード例 #11
0
 def test_invalid_pk_provided(self):
     with pytest.raises(
             DataSetError,
             match=r"columns \[column_doesnt_exist\] selected as PK not "
             r"found in table default_1\.table_1",
     ):
         SparkHiveDataSet(
             database="default_1",
             table="table_1",
             write_mode="upsert",
             table_pk=["column_doesnt_exist"],
         )
コード例 #12
0
 def test_read_existing_table(self):
     dataset = SparkHiveDataSet(database="default_1",
                                table="table_1",
                                write_mode="overwrite")
     assert_df_equal(_generate_spark_df_one(), dataset.load())