Ejemplo n.º 1
0
    def test_should_return_same_df_when_column_not_exists(self):
        transformation = Transformation(self.test_data)
        transformation.split_content("time", ["day", "month", "year"])

        current_result = transformation.dataframe.columns
        expected_result = self.test_data.columns

        self.assertEqual(current_result, expected_result)
Ejemplo n.º 2
0
    def test_should_return_same_df_when_columns_param_is_empty(self):
        transformation = Transformation(self.test_data)
        transformation.split_content("", [])

        current_result = transformation.dataframe.columns
        expected_result = self.test_data.columns

        self.assertEqual(current_result, expected_result)
Ejemplo n.º 3
0
    def test_should_split_column_content_into_three_new_columns(self):
        transformation = Transformation(self.test_data)
        transformation.split_content("coordinates",
                                     ["longitude", "latitude", "depth"])

        current_result = transformation.dataframe.columns
        expected_result = [
            "date", "place", "mag", "status", "longitude", "latitude", "depth",
            "alert"
        ]

        self.assertCountEqual(current_result, expected_result)
    def test_should_return_transformed_data_using_all_pipeline_components(
            self, mock_get_data):
        self.create_tmp_folder()

        fake_api_input = ApiInput(self.FAKE_URL)
        mock_get_data.return_value = self.FAKE_INPUT_DATA

        extraction_process = Extraction(fake_api_input)
        extraction_process.extract()

        raw_data = extraction_process.data
        raw_df = self.spark.createDataFrame(
            raw_data,
            ["date", "place", "mag", "status", "coordinates", "alert"])

        transformation_process = Transformation(raw_df)
        transformation_process.drop(["alert"])
        transformation_process.rename({"mag": "magnitude", "place": "city"})
        transformation_process.replace_null_values({"status": "Automatic"})
        transformation_process.lowercase(["status"])
        transformation_process.convert_data_type({"date": IntegerType()})
        transformation_process.split_content(
            "coordinates", ["longitude", "latitude", "depth"])
        transformed_df = transformation_process.dataframe

        csv_storage = CsvStorage(self.OUTPUT_FILEPATH)
        loading_process = Loading(csv_storage)
        loading_process.load(transformed_df)

        current_result = self.spark \
            .read \
            .csv(self.OUTPUT_FILEPATH, header=True, inferSchema=True) \
            .collect()
        expected_result = self.spark \
            .createDataFrame(self.FAKE_EXPECTED_DATA,
                             ["date", "city", "magnitude", "status", "longitude", "latitude", "depth"]) \
            .collect()

        self.assertEqual(current_result, expected_result)

        self.delete_test_file()