示例#1
0
    def test_should_return_same_columns_when_column_not_exist_in_df(self):
        transformation = Transformation(self.test_data)
        transformation.rename({"dt": "date"})

        current_result = transformation.dataframe.columns
        expected_result = self.test_data.columns

        self.assertEqual(current_result, expected_result)
示例#2
0
    def test_should_return_same_columns_when_column_param_is_empty(self):
        transformation = Transformation(self.test_data)
        transformation.rename({})

        current_result = transformation.dataframe.columns
        expected_result = self.test_data.columns

        self.assertEqual(current_result, expected_result)
示例#3
0
    def test_should_replace_two_columns_name(self):
        transformation = Transformation(self.test_data)
        transformation.rename({"mag": "magnitude", "status": "new_status"})

        current_result = transformation.dataframe.columns
        expected_result = [
            "date", "place", "magnitude", "new_status", "coordinates", "alert"
        ]

        self.assertEqual(current_result, expected_result)
    def test_should_return_transformed_data_using_all_pipeline_components(
            self, mock_get_data):
        self.create_tmp_folder()

        fake_api_input = ApiInput(self.FAKE_URL)
        mock_get_data.return_value = self.FAKE_INPUT_DATA

        extraction_process = Extraction(fake_api_input)
        extraction_process.extract()

        raw_data = extraction_process.data
        raw_df = self.spark.createDataFrame(
            raw_data,
            ["date", "place", "mag", "status", "coordinates", "alert"])

        transformation_process = Transformation(raw_df)
        transformation_process.drop(["alert"])
        transformation_process.rename({"mag": "magnitude", "place": "city"})
        transformation_process.replace_null_values({"status": "Automatic"})
        transformation_process.lowercase(["status"])
        transformation_process.convert_data_type({"date": IntegerType()})
        transformation_process.split_content(
            "coordinates", ["longitude", "latitude", "depth"])
        transformed_df = transformation_process.dataframe

        csv_storage = CsvStorage(self.OUTPUT_FILEPATH)
        loading_process = Loading(csv_storage)
        loading_process.load(transformed_df)

        current_result = self.spark \
            .read \
            .csv(self.OUTPUT_FILEPATH, header=True, inferSchema=True) \
            .collect()
        expected_result = self.spark \
            .createDataFrame(self.FAKE_EXPECTED_DATA,
                             ["date", "city", "magnitude", "status", "longitude", "latitude", "depth"]) \
            .collect()

        self.assertEqual(current_result, expected_result)

        self.delete_test_file()