def test_pivot_with_single_column(self):
        """
        Make sure pivot with single column returns correct DataFrame
        """
        df = proc.pivot(
            df=categories_df,
            index=["name"],
            columns=["category"],
            aggregates=AGGREGATES_SINGLE,
        )
        self.assertListEqual(
            df.columns.tolist(),
            ["name", "cat0", "cat1", "cat2"],
        )
        self.assertEqual(len(df), 101)
        self.assertEqual(df.sum()[1], 315)

        df = proc.pivot(
            df=categories_df,
            index=["dept"],
            columns=["category"],
            aggregates=AGGREGATES_SINGLE,
        )
        self.assertListEqual(
            df.columns.tolist(),
            ["dept", "cat0", "cat1", "cat2"],
        )
        self.assertEqual(len(df), 5)
예제 #2
0
def test_pivot_with_single_column():
    """
    Make sure pivot with single column returns correct DataFrame
    """
    df = pivot(
        df=categories_df,
        index=["name"],
        columns=["category"],
        aggregates=AGGREGATES_SINGLE,
    )
    assert df.columns.tolist() == [
        ("idx_nulls", "cat0"),
        ("idx_nulls", "cat1"),
        ("idx_nulls", "cat2"),
    ]
    assert len(df) == 101
    assert df["idx_nulls"]["cat0"].sum() == 315

    df = pivot(
        df=categories_df,
        index=["dept"],
        columns=["category"],
        aggregates=AGGREGATES_SINGLE,
    )
    assert df.columns.tolist() == [
        ("idx_nulls", "cat0"),
        ("idx_nulls", "cat1"),
        ("idx_nulls", "cat2"),
    ]
    assert len(df) == 5
예제 #3
0
def test_pivot_eliminate_cartesian_product_columns():
    # single metric
    mock_df = DataFrame({
        "dttm": to_datetime(["2019-01-01", "2019-01-01"]),
        "a": [0, 1],
        "b": [0, 1],
        "metric": [9, np.NAN],
    })

    df = pivot(
        df=mock_df,
        index=["dttm"],
        columns=["a", "b"],
        aggregates={"metric": {
            "operator": "mean"
        }},
        drop_missing_columns=False,
    )
    df = flatten(df)
    assert list(df.columns) == ["dttm", "metric, 0, 0", "metric, 1, 1"]
    assert np.isnan(df["metric, 1, 1"][0])

    # multiple metrics
    mock_df = DataFrame({
        "dttm": to_datetime(["2019-01-01", "2019-01-01"]),
        "a": [0, 1],
        "b": [0, 1],
        "metric": [9, np.NAN],
        "metric2": [10, 11],
    })

    df = pivot(
        df=mock_df,
        index=["dttm"],
        columns=["a", "b"],
        aggregates={
            "metric": {
                "operator": "mean"
            },
            "metric2": {
                "operator": "mean"
            },
        },
        drop_missing_columns=False,
    )
    df = flatten(df)
    assert list(df.columns) == [
        "dttm",
        "metric, 0, 0",
        "metric, 1, 1",
        "metric2, 0, 0",
        "metric2, 1, 1",
    ]
    assert np.isnan(df["metric, 1, 1"][0])
예제 #4
0
def test_pivot_exceptions():
    """
    Make sure pivot raises correct Exceptions
    """
    # Missing index
    with pytest.raises(TypeError):
        pivot(df=categories_df, columns=["dept"], aggregates=AGGREGATES_SINGLE)

    # invalid index reference
    with pytest.raises(InvalidPostProcessingError):
        pivot(
            df=categories_df,
            index=["abc"],
            columns=["dept"],
            aggregates=AGGREGATES_SINGLE,
        )

    # invalid column reference
    with pytest.raises(InvalidPostProcessingError):
        pivot(
            df=categories_df,
            index=["dept"],
            columns=["abc"],
            aggregates=AGGREGATES_SINGLE,
        )

    # invalid aggregate options
    with pytest.raises(InvalidPostProcessingError):
        pivot(
            df=categories_df,
            index=["name"],
            columns=["category"],
            aggregates={"idx_nulls": {}},
        )
예제 #5
0
def test_rolling_with_pivot_df_and_single_metric():
    pivot_df = pivot(
        df=single_metric_df,
        index=["dttm"],
        columns=["country"],
        aggregates={"sum_metric": {
            "operator": "sum"
        }},
        flatten_columns=False,
        reset_index=False,
    )
    rolling_df = rolling(
        df=pivot_df,
        rolling_type="sum",
        window=2,
        min_periods=0,
        is_pivot_df=True,
    )
    #         dttm  UK  US
    # 0 2019-01-01   5   6
    # 1 2019-01-02  12  14
    assert rolling_df["UK"].to_list() == [5.0, 12.0]
    assert rolling_df["US"].to_list() == [6.0, 14.0]
    assert (rolling_df["dttm"].to_list() == to_datetime(
        ["2019-01-01", "2019-01-02"]).to_list())

    rolling_df = rolling(
        df=pivot_df,
        rolling_type="sum",
        window=2,
        min_periods=2,
        is_pivot_df=True,
    )
    assert rolling_df.empty is True
예제 #6
0
 def test_rolling_with_pivot_df_and_multiple_metrics(self):
     pivot_df = proc.pivot(
         df=multiple_metrics_df,
         index=["dttm"],
         columns=["country"],
         aggregates={
             "sum_metric": {
                 "operator": "sum"
             },
             "count_metric": {
                 "operator": "sum"
             },
         },
         flatten_columns=False,
         reset_index=False,
     )
     rolling_df = proc.rolling(
         df=pivot_df,
         rolling_type="sum",
         window=2,
         min_periods=0,
         is_pivot_df=True,
     )
     #         dttm  count_metric, UK  count_metric, US  sum_metric, UK  sum_metric, US
     # 0 2019-01-01               1.0               2.0             5.0             6.0
     # 1 2019-01-02               4.0               6.0            12.0            14.0
     assert rolling_df["count_metric, UK"].to_list() == [1.0, 4.0]
     assert rolling_df["count_metric, US"].to_list() == [2.0, 6.0]
     assert rolling_df["sum_metric, UK"].to_list() == [5.0, 12.0]
     assert rolling_df["sum_metric, US"].to_list() == [6.0, 14.0]
     assert (rolling_df["dttm"].to_list() == to_datetime([
         "2019-01-01",
         "2019-01-02",
     ]).to_list())
예제 #7
0
 def test_cum_with_pivot_df_and_single_metric(self):
     pivot_df = proc.pivot(
         df=single_metric_df,
         index=["dttm"],
         columns=["country"],
         aggregates={"sum_metric": {
             "operator": "sum"
         }},
         flatten_columns=False,
         reset_index=False,
     )
     cum_df = proc.cum(
         df=pivot_df,
         operator="sum",
         is_pivot_df=True,
     )
     #         dttm  UK  US
     # 0 2019-01-01   5   6
     # 1 2019-01-02  12  14
     assert cum_df["UK"].to_list() == [5.0, 12.0]
     assert cum_df["US"].to_list() == [6.0, 14.0]
     assert (cum_df["dttm"].to_list() == to_datetime([
         "2019-01-01",
         "2019-01-02",
     ]).to_list())
예제 #8
0
 def test_cum_with_pivot_df_and_multiple_metrics(self):
     pivot_df = proc.pivot(
         df=multiple_metrics_df,
         index=["dttm"],
         columns=["country"],
         aggregates={
             "sum_metric": {
                 "operator": "sum"
             },
             "count_metric": {
                 "operator": "sum"
             },
         },
         flatten_columns=False,
         reset_index=False,
     )
     cum_df = proc.cum(
         df=pivot_df,
         operator="sum",
         is_pivot_df=True,
     )
     #         dttm  count_metric, UK  count_metric, US  sum_metric, UK  sum_metric, US
     # 0 2019-01-01                 1                 2               5               6
     # 1 2019-01-02                 4                 6              12              14
     assert cum_df["count_metric, UK"].to_list() == [1.0, 4.0]
     assert cum_df["count_metric, US"].to_list() == [2.0, 6.0]
     assert cum_df["sum_metric, UK"].to_list() == [5.0, 12.0]
     assert cum_df["sum_metric, US"].to_list() == [6.0, 14.0]
     assert (cum_df["dttm"].to_list() == to_datetime([
         "2019-01-01",
         "2019-01-02",
     ]).to_list())
 def test_pivot_without_columns(self):
     """
     Make sure pivot without columns returns correct DataFrame
     """
     df = proc.pivot(df=categories_df, index=["name"], aggregates=AGGREGATES_SINGLE,)
     self.assertListEqual(
         df.columns.tolist(), ["name", "idx_nulls"],
     )
     self.assertEqual(len(df), 101)
     self.assertEqual(df.sum()[1], 1050)
예제 #10
0
def test_compare_after_pivot():
    pivot_df = pp.pivot(
        df=multiple_metrics_df,
        index=["dttm"],
        columns=["country"],
        aggregates={
            "sum_metric": {
                "operator": "sum"
            },
            "count_metric": {
                "operator": "sum"
            },
        },
        flatten_columns=False,
        reset_index=False,
    )
    """
                   count_metric    sum_metric
    country              UK US         UK US
    dttm
    2019-01-01            1  2          5  6
    2019-01-02            3  4          7  8
    """
    compared_df = pp.compare(
        pivot_df,
        source_columns=["count_metric"],
        compare_columns=["sum_metric"],
        compare_type=PPC.DIFF,
        drop_original_columns=True,
    )
    """
               difference__count_metric__sum_metric
    country                                      UK US
    dttm
    2019-01-01                                   -4 -4
    2019-01-02                                   -4 -4
    """
    flat_df = pp.flatten(compared_df)
    """
            dttm  difference__count_metric__sum_metric, UK  difference__count_metric__sum_metric, US
    0 2019-01-01                                        -4                                        -4
    1 2019-01-02                                        -4                                        -4
    """
    assert flat_df.equals(
        pd.DataFrame(
            data={
                "dttm":
                pd.to_datetime(["2019-01-01", "2019-01-02"]),
                FLAT_COLUMN_SEPARATOR.join([
                    "difference__count_metric__sum_metric", "UK"
                ]): [-4, -4],
                FLAT_COLUMN_SEPARATOR.join([
                    "difference__count_metric__sum_metric", "US"
                ]): [-4, -4],
            }))
예제 #11
0
def test_rolling_after_pivot_with_multiple_metrics():
    pivot_df = pp.pivot(
        df=multiple_metrics_df,
        index=["dttm"],
        columns=["country"],
        aggregates={
            "sum_metric": {
                "operator": "sum"
            },
            "count_metric": {
                "operator": "sum"
            },
        },
        flatten_columns=False,
        reset_index=False,
    )
    """
               count_metric    sum_metric
    country              UK US         UK US
    dttm
    2019-01-01            1  2          5  6
    2019-01-02            3  4          7  8
    """
    rolling_df = pp.rolling(
        df=pivot_df,
        columns={
            "count_metric": "count_metric",
            "sum_metric": "sum_metric",
        },
        rolling_type="sum",
        window=2,
        min_periods=0,
    )
    """
               count_metric      sum_metric
    country              UK   US         UK    US
    dttm
    2019-01-01          1.0  2.0        5.0   6.0
    2019-01-02          4.0  6.0       12.0  14.0
    """
    flat_df = pp.flatten(rolling_df)
    """
            dttm  count_metric, UK  count_metric, US  sum_metric, UK  sum_metric, US
    0 2019-01-01               1.0               2.0             5.0             6.0
    1 2019-01-02               4.0               6.0            12.0            14.0
    """
    assert flat_df.equals(
        pd.DataFrame(
            data={
                "dttm": pd.to_datetime(["2019-01-01", "2019-01-02"]),
                FLAT_COLUMN_SEPARATOR.join(["count_metric", "UK"]): [1.0, 4.0],
                FLAT_COLUMN_SEPARATOR.join(["count_metric", "US"]): [2.0, 6.0],
                FLAT_COLUMN_SEPARATOR.join(["sum_metric", "UK"]): [5.0, 12.0],
                FLAT_COLUMN_SEPARATOR.join(["sum_metric", "US"]): [6.0, 14.0],
            }))
 def test_pivot_with_multiple_columns(self):
     """
     Make sure pivot with multiple columns returns correct DataFrame
     """
     df = proc.pivot(
         df=categories_df,
         index=["name"],
         columns=["category", "dept"],
         aggregates=AGGREGATES_SINGLE,
     )
     self.assertEqual(len(df.columns), 1 + 3 * 5)  # index + possible permutations
 def test_pivot_fill_values(self):
     """
     Make sure pivot with fill values returns correct DataFrame
     """
     df = proc.pivot(
         df=categories_df,
         index=["name"],
         columns=["category"],
         metric_fill_value=1,
         aggregates={"idx_nulls": {"operator": "sum"}},
     )
     self.assertEqual(df.sum()[1], 382)
예제 #14
0
def test_pivot_with_multiple_columns():
    """
    Make sure pivot with multiple columns returns correct DataFrame
    """
    df = pivot(
        df=categories_df,
        index=["name"],
        columns=["category", "dept"],
        aggregates=AGGREGATES_SINGLE,
    )
    df = flatten(df)
    assert len(df.columns) == 1 + 3 * 5  # index + possible permutations
예제 #15
0
def test_pivot_without_columns():
    """
    Make sure pivot without columns returns correct DataFrame
    """
    df = pivot(
        df=categories_df,
        index=["name"],
        aggregates=AGGREGATES_SINGLE,
    )
    assert df.columns.tolist() == ["idx_nulls"]
    assert len(df) == 101
    assert df["idx_nulls"].sum() == 1050
예제 #16
0
def test_cum_after_pivot_with_multiple_metrics():
    pivot_df = pp.pivot(
        df=multiple_metrics_df,
        index=["dttm"],
        columns=["country"],
        aggregates={
            "sum_metric": {
                "operator": "sum"
            },
            "count_metric": {
                "operator": "sum"
            },
        },
        flatten_columns=False,
        reset_index=False,
    )
    """
                   count_metric    sum_metric
    country              UK US         UK US
    dttm
    2019-01-01            1  2          5  6
    2019-01-02            3  4          7  8
    """
    cum_df = pp.cum(
        df=pivot_df,
        operator="sum",
        columns={
            "sum_metric": "sum_metric",
            "count_metric": "count_metric"
        },
    )
    """
               count_metric    sum_metric
    country              UK US         UK  US
    dttm
    2019-01-01            1  2          5   6
    2019-01-02            4  6         12  14
    """
    flat_df = pp.flatten(cum_df)
    """
            dttm  count_metric, UK  count_metric, US  sum_metric, UK  sum_metric, US
    0 2019-01-01                 1                 2               5               6
    1 2019-01-02                 4                 6              12              14
    """
    assert flat_df.equals(
        pd.DataFrame({
            "dttm": pd.to_datetime(["2019-01-01", "2019-01-02"]),
            FLAT_COLUMN_SEPARATOR.join(["count_metric", "UK"]): [1, 4],
            FLAT_COLUMN_SEPARATOR.join(["count_metric", "US"]): [2, 6],
            FLAT_COLUMN_SEPARATOR.join(["sum_metric", "UK"]): [5, 12],
            FLAT_COLUMN_SEPARATOR.join(["sum_metric", "US"]): [6, 14],
        }))
 def test_pivot_fill_column_values(self):
     """
     Make sure pivot witn null column names returns correct DataFrame
     """
     df_copy = categories_df.copy()
     df_copy["category"] = None
     df = proc.pivot(
         df=df_copy,
         index=["name"],
         columns=["category"],
         aggregates={"idx_nulls": {"operator": "sum"}},
     )
     assert len(df) == 101
     assert df.columns.tolist() == ["name", "<NULL>"]
예제 #18
0
def test_pivot_fill_values():
    """
    Make sure pivot with fill values returns correct DataFrame
    """
    df = pivot(
        df=categories_df,
        index=["name"],
        columns=["category"],
        metric_fill_value=1,
        aggregates={"idx_nulls": {
            "operator": "sum"
        }},
    )
    assert df["idx_nulls"]["cat0"].sum() == 382
 def test_pivot_without_flatten_columns_and_reset_index(self):
     df = proc.pivot(
         df=single_metric_df,
         index=["dttm"],
         columns=["country"],
         aggregates={"sum_metric": {"operator": "sum"}},
         flatten_columns=False,
         reset_index=False,
     )
     #                metric
     # country        UK US
     # dttm
     # 2019-01-01      5  6
     # 2019-01-02      7  8
     assert df.columns.to_list() == [("sum_metric", "UK"), ("sum_metric", "US")]
     assert df.index.to_list() == to_datetime(["2019-01-01", "2019-01-02"]).to_list()
예제 #20
0
def test_rolling_should_empty_df():
    pivot_df = pp.pivot(
        df=single_metric_df,
        index=["dttm"],
        columns=["country"],
        aggregates={"sum_metric": {
            "operator": "sum"
        }},
    )
    rolling_df = pp.rolling(
        df=pivot_df,
        rolling_type="sum",
        window=2,
        min_periods=2,
        columns={"sum_metric": "sum_metric"},
    )
    assert rolling_df.empty is True
예제 #21
0
def test_rolling_after_pivot_with_single_metric():
    pivot_df = pp.pivot(
        df=single_metric_df,
        index=["dttm"],
        columns=["country"],
        aggregates={"sum_metric": {
            "operator": "sum"
        }},
    )
    """
                   sum_metric
    country            UK US
    dttm
    2019-01-01          5  6
    2019-01-02          7  8
    """
    rolling_df = pp.rolling(
        df=pivot_df,
        columns={"sum_metric": "sum_metric"},
        rolling_type="sum",
        window=2,
        min_periods=0,
    )
    """
               sum_metric
    country            UK    US
    dttm
    2019-01-01        5.0   6.0
    2019-01-02       12.0  14.0
    """
    flat_df = pp.flatten(rolling_df)
    """
            dttm  sum_metric, UK  sum_metric, US
    0 2019-01-01             5.0             6.0
    1 2019-01-02            12.0            14.0
    """
    assert flat_df.equals(
        pd.DataFrame(
            data={
                "dttm": pd.to_datetime(["2019-01-01", "2019-01-02"]),
                FLAT_COLUMN_SEPARATOR.join(["sum_metric", "UK"]): [5.0, 12.0],
                FLAT_COLUMN_SEPARATOR.join(["sum_metric", "US"]): [6.0, 14.0],
            }))
예제 #22
0
def test_cum_after_pivot_with_single_metric():
    pivot_df = pp.pivot(
        df=single_metric_df,
        index=["dttm"],
        columns=["country"],
        aggregates={"sum_metric": {
            "operator": "sum"
        }},
        flatten_columns=False,
        reset_index=False,
    )
    """
               sum_metric
    country            UK US
    dttm
    2019-01-01          5  6
    2019-01-02          7  8
    """
    cum_df = pp.cum(df=pivot_df,
                    operator="sum",
                    columns={"sum_metric": "sum_metric"})
    """
               sum_metric
    country            UK  US
    dttm
    2019-01-01          5   6
    2019-01-02         12  14
    """
    cum_and_flat_df = pp.flatten(cum_df)
    """
                dttm  sum_metric, UK  sum_metric, US
    0 2019-01-01               5               6
    1 2019-01-02              12              14
    """
    assert cum_and_flat_df.equals(
        pd.DataFrame({
            "dttm": pd.to_datetime(["2019-01-01", "2019-01-02"]),
            FLAT_COLUMN_SEPARATOR.join(["sum_metric", "UK"]): [5, 12],
            FLAT_COLUMN_SEPARATOR.join(["sum_metric", "US"]): [6, 14],
        }))
    def test_pivot(self):
        aggregates = {"idx_nulls": {"operator": "sum"}}

        # regular pivot
        df = proc.pivot(
            df=categories_df,
            index=["name"],
            columns=["category"],
            aggregates=aggregates,
        )
        self.assertListEqual(
            df.columns.tolist(),
            [("idx_nulls", "cat0"), ("idx_nulls", "cat1"),
             ("idx_nulls", "cat2")],
        )
        self.assertEqual(len(df), 101)
        self.assertEqual(df.sum()[0], 315)

        # regular pivot
        df = proc.pivot(
            df=categories_df,
            index=["dept"],
            columns=["category"],
            aggregates=aggregates,
        )
        self.assertEqual(len(df), 5)

        # fill value
        df = proc.pivot(
            df=categories_df,
            index=["name"],
            columns=["category"],
            metric_fill_value=1,
            aggregates={"idx_nulls": {
                "operator": "sum"
            }},
        )
        self.assertEqual(df.sum()[0], 382)

        # invalid index reference
        self.assertRaises(
            QueryObjectValidationError,
            proc.pivot,
            df=categories_df,
            index=["abc"],
            columns=["dept"],
            aggregates=aggregates,
        )

        # invalid column reference
        self.assertRaises(
            QueryObjectValidationError,
            proc.pivot,
            df=categories_df,
            index=["dept"],
            columns=["abc"],
            aggregates=aggregates,
        )

        # invalid aggregate options
        self.assertRaises(
            QueryObjectValidationError,
            proc.pivot,
            df=categories_df,
            index=["name"],
            columns=["category"],
            aggregates={"idx_nulls": {}},
        )
예제 #24
0
def test_resample_after_pivot():
    df = pd.DataFrame(
        data={
            "__timestamp": pd.to_datetime(
                [
                    "2022-01-13",
                    "2022-01-13",
                    "2022-01-13",
                    "2022-01-11",
                    "2022-01-11",
                    "2022-01-11",
                ]
            ),
            "city": ["Chicago", "LA", "NY", "Chicago", "LA", "NY"],
            "val": [6.0, 5.0, 4.0, 3.0, 2.0, 1.0],
        }
    )
    pivot_df = pp.pivot(
        df=df,
        index=["__timestamp"],
        columns=["city"],
        aggregates={
            "val": {"operator": "sum"},
        },
        flatten_columns=False,
        reset_index=False,
    )
    """
                    val
    city        Chicago   LA   NY
    __timestamp
    2022-01-11      3.0  2.0  1.0
    2022-01-13      6.0  5.0  4.0
    """
    resample_df = pp.resample(
        df=pivot_df,
        rule="1D",
        method="asfreq",
        fill_value=0,
    )
    """
                    val
    city        Chicago   LA   NY
    __timestamp
    2022-01-11      3.0  2.0  1.0
    2022-01-12      0.0  0.0  0.0
    2022-01-13      6.0  5.0  4.0
    """
    flat_df = pp.flatten(resample_df)
    """
      __timestamp  val, Chicago  val, LA  val, NY
    0  2022-01-11           3.0      2.0      1.0
    1  2022-01-12           0.0      0.0      0.0
    2  2022-01-13           6.0      5.0      4.0
    """
    assert flat_df.equals(
        pd.DataFrame(
            data={
                "__timestamp": pd.to_datetime(
                    ["2022-01-11", "2022-01-12", "2022-01-13"]
                ),
                "val, Chicago": [3.0, 0, 6.0],
                "val, LA": [2.0, 0, 5.0],
                "val, NY": [1.0, 0, 4.0],
            }
        )
    )