Exemplo n.º 1
0
def test_flows_geojson_correct():
    """
    Test that flows outputs expected geojson.
    """
    dl1 = locate_subscribers("2016-01-01",
                             "2016-01-02",
                             level="admin3",
                             method="last")
    dl2 = locate_subscribers("2016-01-02",
                             "2016-01-03",
                             level="admin3",
                             method="last")
    flow = Flows(dl1, dl2)
    fl_json = flow.to_geojson()
    directory = os.path.dirname(os.path.os.path.realpath(__file__))
    reference_file = os.path.join(directory, "./data/", "flows_reference.json")
    with open(reference_file) as fin:
        assert fl_json == json.load(fin)
def test_can_be_aggregated_admin3(get_dataframe):
    """
    Query can be aggregated to a spatial level with admin3 data.
    """
    mfl = locate_subscribers(
        "2016-01-01", "2016-01-02", level="admin3", method="most-common"
    )
    agg = mfl.aggregate()
    df = get_dataframe(agg)
    assert ["pcod", "total"] == list(df.columns)
Exemplo n.º 3
0
def test_can_be_joined(get_dataframe):
    """
    RadiusOfGyration() can be joined with a location type metric.
    """
    RoG = RadiusOfGyration("2016-01-01", "2016-01-02")
    dl = locate_subscribers("2016-01-01", "2016-01-02", level="admin3")
    rog_JA = RoG.join_aggregate(dl)
    df = get_dataframe(rog_JA)
    assert isinstance(df, pd.DataFrame)
    assert rog_JA.column_names == ["name", "rog"]
def test_can_be_joined(get_dataframe):
    """
    TopUpBalance() can be joined with a location type metric.
    """
    topup_balance = TopUpBalance("2016-01-01", "2016-01-02", statistic="avg")
    dl = locate_subscribers("2016-01-01",
                            "2016-01-02",
                            spatial_unit=make_spatial_unit("admin", level=3))
    topup_balance_JA = topup_balance.join_aggregate(dl)
    df = get_dataframe(topup_balance_JA)
    assert topup_balance_JA.column_names == ["pcod", "value"]
Exemplo n.º 5
0
def test_calculates_flows(get_dataframe):
    """
    Flows() are correctly calculated
    """
    dl1 = locate_subscribers("2016-01-01",
                             "2016-01-02",
                             level="admin3",
                             method="last")
    dl2 = locate_subscribers("2016-01-02",
                             "2016-01-03",
                             level="admin3",
                             method="last")
    flow = Flows(dl1, dl2)
    df = get_dataframe(flow)
    assert (df[(df.pcod_from == "524 3 09 50")
               & (df.pcod_to == "524 5 14 73")]["count"].values[0] == 2)
    assert (df[(df.pcod_from == "524 4 10 53")
               & (df.pcod_to == "524 2 05 24")]["count"].values[0] == 2)
    assert (df[(df.pcod_from == "524 1 02 09")
               & (df.pcod_to == "524 3 08 44")]["count"].values[0] == 4)
Exemplo n.º 6
0
def test_calculates_flows(get_dataframe):
    """
    Flows() are correctly calculated
    """
    dl1 = locate_subscribers("2016-01-01",
                             "2016-01-02",
                             level="admin3",
                             method="last")
    dl2 = locate_subscribers("2016-01-02",
                             "2016-01-03",
                             level="admin3",
                             method="last")
    flow = Flows(dl1, dl2)
    df = get_dataframe(flow)
    assert (df[(df.name_from == "Arghakhanchi")
               & (df.name_to == "Dadeldhura")]["count"].values[0] == 2)
    assert (df[(df.name_from == "Salyan")
               & (df.name_to == "Kavrepalanchok")]["count"].values[0] == 2)
    assert (df[(df.name_from == "Sankhuwasabha")
               & (df.name_to == "Myagdi")]["count"].values[0] == 4)
Exemplo n.º 7
0
 def test_can_be_aggregated_admin3(self):
     """
     Query can be aggregated to a spatial level with admin3 data.
     """
     mfl = locate_subscribers("2016-01-01",
                              "2016-01-02",
                              level="admin3",
                              method="most-common")
     agg = mfl.aggregate()
     df = agg.get_dataframe()
     self.assertIs(type(df), pd.DataFrame)
     self.assertEqual(list(df.columns), ["name", "total"])
Exemplo n.º 8
0
def test_most_fequent_admin(get_dataframe):
    """
    Test that the most frequent admin3 is correctly calculated.
    """

    mfl = locate_subscribers(
        "2016-01-01", "2016-01-02", level="admin3", method="most-common"
    )
    df = get_dataframe(mfl)
    # A few hand picked values
    df_set = df.set_index("subscriber")["name"]
    assert "Dolpa" == df_set["0gmvwzMAYbz5We1E"]
    assert "Rukum" == df_set["1QBlwRo4Kd5v3Ogz"]
    assert "Arghakhanchi" == df_set["2Dq97XmPqvL6noGk"]
Exemplo n.º 9
0
def test_most_fequent_admin(get_dataframe):
    """
    Test that the most frequent admin3 is correctly calculated.
    """
    mfl = locate_subscribers(
        "2016-01-01",
        "2016-01-02",
        spatial_unit=make_spatial_unit("admin", level=3),
        method="most-common",
    )
    df = get_dataframe(mfl)
    # A few hand picked values
    df_set = df.set_index("subscriber")["pcod"]
    assert "524 4 12 62" == df_set["0gmvwzMAYbz5We1E"]
    assert "524 4 10 52" == df_set["1QBlwRo4Kd5v3Ogz"]
    assert "524 3 09 50" == df_set["2Dq97XmPqvL6noGk"]
Exemplo n.º 10
0
def test_can_be_aggregated_admin3_distribution(get_dataframe):
    """
    Categorical queries can be aggregated to a spatial level with 'distribution' method.
    """
    locations = locate_subscribers(
        "2016-01-01",
        "2016-01-02",
        spatial_unit=make_spatial_unit("admin", level=3),
        method="most-common",
    )
    metric = SubscriberHandsetCharacteristic("2016-01-01",
                                             "2016-01-02",
                                             characteristic="hnd_type")
    agg = JoinedSpatialAggregate(metric=metric,
                                 locations=locations,
                                 method="distr")
    df = get_dataframe(agg)
    assert ["pcod", "metric", "key", "value"] == list(df.columns)
    assert all(df[df.metric == "value"].groupby("pcod").sum() == 1.0)