Ejemplo n.º 1
0
    def test_should_return_same_df_when_column_not_exists(self):
        transformation = Transformation(self.test_data)
        transformation.drop(["xpto"])

        transformed_df = transformation.dataframe.collect()

        self.assertEqual(transformed_df, self.test_data.collect())
Ejemplo n.º 2
0
    def test_should_have_same_df_when_column_list_is_empty(self):
        transformation = Transformation(self.test_data)
        transformation.drop([])

        transformed_df = transformation.dataframe.collect()

        self.assertEqual(transformed_df, self.test_data.collect())
Ejemplo n.º 3
0
    def test_should_remove_one_column_from_dataframe(self):
        transformation = Transformation(self.test_data)
        transformation.drop(["alert"])

        current_result = transformation.dataframe.columns
        expected_result = self.spark.createDataFrame([
            (1704567252, "California", 0.82, "Automatic",
             [-116.8, 33.3333333, 12.04]),
            (1391707828, "Alaska", 1.1, None, [-148.942, 64.9081, 10.6]),
            (1435498694, "Chile", 4.9, "Reviewed", [-70.6202, -21.4265, 52.24
                                                    ]),
            (1609879110, "Hawaii", 2.0099, "Automatic",
             [-155.429000854492, 19.2180004119873, 33.2999992370605]),
            (1224994646, "Indonesia", 4.8, "Reviewed", [126.419, 0.2661, 10]),
            (1801059964, "Nevada", 0.5, "Automatic", [-116.242, 36.7564, 0.8]),
            (1262739669, "Arkansas", 1.9, "Reviewed",
             [-91.4295, 35.863, 16.41]),
            (1890118874, "Montana", 1.33, "Reviewed",
             [-110.434, 44.4718333, 2.21]),
            (1025727100, "Oklahoma", 1.58, "Reviewed",
             [-98.53233333, 36.57083333, 6.31]),
            (1834567116, "Idaho", 2.6, "Reviewed", [-115.186, 44.2666, 10])
        ], ["date", "place", "mag", "status", "coordinates"]).columns

        self.assertEqual(current_result, expected_result)
Ejemplo n.º 4
0
    def test_should_remove_two_columns_from_dataframe(self):
        transformation = Transformation(self.test_data)
        transformation.drop(["coordinates", "alert"])

        current_result = transformation.dataframe.columns
        expected_result = self.spark.createDataFrame(
            [(1704567252, "California", 0.82, "Automatic"),
             (1391707828, "Alaska", 1.1, None),
             (1435498694, "Chile", 4.9, "Reviewed"),
             (1609879110, "Hawaii", 2.0099, "Automatic"),
             (1224994646, "Indonesia", 4.8, "Reviewed"),
             (1801059964, "Nevada", 0.5, "Automatic"),
             (1262739669, "Arkansas", 1.9, "Reviewed"),
             (1890118874, "Montana", 1.33, "Reviewed"),
             (1025727100, "Oklahoma", 1.58, "Reviewed"),
             (1834567116, "Idaho", 2.6, "Reviewed")],
            ["date", "place", "mag", "status"]).columns

        self.assertEqual(current_result, expected_result)
    def test_should_return_transformed_data_using_all_pipeline_components(
            self, mock_get_data):
        self.create_tmp_folder()

        fake_api_input = ApiInput(self.FAKE_URL)
        mock_get_data.return_value = self.FAKE_INPUT_DATA

        extraction_process = Extraction(fake_api_input)
        extraction_process.extract()

        raw_data = extraction_process.data
        raw_df = self.spark.createDataFrame(
            raw_data,
            ["date", "place", "mag", "status", "coordinates", "alert"])

        transformation_process = Transformation(raw_df)
        transformation_process.drop(["alert"])
        transformation_process.rename({"mag": "magnitude", "place": "city"})
        transformation_process.replace_null_values({"status": "Automatic"})
        transformation_process.lowercase(["status"])
        transformation_process.convert_data_type({"date": IntegerType()})
        transformation_process.split_content(
            "coordinates", ["longitude", "latitude", "depth"])
        transformed_df = transformation_process.dataframe

        csv_storage = CsvStorage(self.OUTPUT_FILEPATH)
        loading_process = Loading(csv_storage)
        loading_process.load(transformed_df)

        current_result = self.spark \
            .read \
            .csv(self.OUTPUT_FILEPATH, header=True, inferSchema=True) \
            .collect()
        expected_result = self.spark \
            .createDataFrame(self.FAKE_EXPECTED_DATA,
                             ["date", "city", "magnitude", "status", "longitude", "latitude", "depth"]) \
            .collect()

        self.assertEqual(current_result, expected_result)

        self.delete_test_file()