Пример #1
0
def test_find_matching_scenarios_get_precise_values():
    # We should get out explicit numbers if we ask for them
    all_data = find_matching_scenarios(
        df_to_test,
        simple_df,
        variable_follower,
        variable_leaders,
        ["right_scenario", "wrong_scenario", "scen_a", "scen_b"],
        return_all_info=True,
    )
    assert all_data[0][1] == all_data[1][1]
    assert all_data[0][1] == 0
    assert all_data[2][1] > 0
    unsplit_model_c = all_data[2][1]
    # And if we separate out by model, we no longer combine the high and low values
    # to produce a good match
    all_data = find_matching_scenarios(
        df_to_test,
        simple_df,
        variable_follower,
        variable_leaders,
        ["right_scenario", "wrong_scenario", "scen_a", "scen_b"],
        classify_models=["high_model", _mc],
        return_all_info=True,
    )
    assert all_data[-1][0][0] == "high_model"
    assert all_data[0][0][0] == _mc
    assert all_data[0][1] == unsplit_model_c
Пример #2
0
def test_find_matching_scenarios_no_data_for_time():
    time_col = simple_df.time_col
    half_simple_df = simple_df.filter(scenario="scen_a")
    half_simple_df.data[time_col].loc[0] = 0
    with pytest.raises(ValueError):
        find_matching_scenarios(
            simple_df,
            half_simple_df,
            variable_follower,
            variable_leaders,
            ["scen_a", "scen_b"],
        )
Пример #3
0
def test_find_matching_scenarios_dual_region():
    multiregion_df = simple_df.data.append(
        pd.DataFrame(
            [[_mc, _sa, "Country", _eco2, _gtc, 2010, 2]],
            columns=_msrvu + [simple_df.time_col, "value"],
        ))
    multiregion_df = pyam.IamDataFrame(multiregion_df)
    with pytest.raises(AssertionError):
        find_matching_scenarios(
            df_to_test,
            multiregion_df,
            variable_follower,
            variable_leaders,
            ["right_scenario", "wrong_scenario", "scen_a", "scen_b"],
            return_all_info=True,
        )
Пример #4
0
def test_find_matching_scenarios_complicated(options, expected):
    # This is similar to the above case except with multiple models involved and
    # requiring specific interpolation. Tests:
    # 1) Closest option chosen
    # 2) Invalid options ignored, if tied the earlier option is selected instead
    # 3) This reverses as expected
    scenario = find_matching_scenarios(df_to_test, simple_df,
                                       variable_follower, variable_leaders,
                                       options)
    assert scenario == ("*", expected)
Пример #5
0
def test_find_matching_scenarios_differential():
    # If we use a differential measurement, they should all be the same in this case
    all_data = find_matching_scenarios(
        df_to_test,
        simple_df,
        variable_follower,
        variable_leaders,
        ["right_scenario", "wrong_scenario", "scen_a", "scen_b"],
        classify_models=["high_model", _mc],
        return_all_info=True,
        use_change_not_abs=True,
    )
    assert all_data[0][0] == ("high_model", "right_scenario")
    assert all_data[0][1] == all_data[5][1]
    # But if we add a small amount to only one point in the differential, it will
    # be downgraded
    df_to_test["value"].iloc[0] = df_to_test["value"].iloc[0] + 0.1
    all_data = find_matching_scenarios(
        df_to_test,
        simple_df,
        variable_follower,
        variable_leaders,
        ["right_scenario", "wrong_scenario", "scen_a", "scen_b"],
        classify_models=["high_model", _mc],
        return_all_info=True,
        use_change_not_abs=True,
    )
    assert all_data[0][0] == ("high_model", "right_scenario")
    assert all_data[0][1] != all_data[1][1]
    df_to_test["value"].iloc[0] = df_to_test["value"].iloc[0] - 0.6
    all_data = find_matching_scenarios(
        df_to_test,
        simple_df,
        variable_follower,
        variable_leaders,
        ["right_scenario", "wrong_scenario", "scen_a", "scen_b"],
        classify_models=["high_model", _mc],
        return_all_info=True,
        use_change_not_abs=True,
    )
    assert all_data[0][0] == (_mc, "right_scenario")
    assert all_data[0][1] == all_data[1][1]
Пример #6
0
def test_find_matching_scenarios_empty():
    noregion_df = simple_df.filter(scenario="impossible")
    nothing = find_matching_scenarios(
        df_to_test,
        noregion_df,
        variable_follower,
        variable_leaders,
        ["right_scenario", "wrong_scenario", "scen_a", "scen_b"],
        return_all_info=True,
    )
    assert nothing is None
Пример #7
0
def test_find_matching_scenarios_use_change_instead_of_absolute():
    # In this case, we will ignore any offset
    half_simple_df = simple_df.filter(scenario="scen_a")
    half_simple_df.data["value"] = half_simple_df.data["value"] + 10000
    scenarios = find_matching_scenarios(
        simple_df,
        half_simple_df,
        variable_follower,
        variable_leaders,
        ["scen_a", "scen_b"],
        use_change_not_abs=True,
    )
    assert scenarios == ("*", "scen_a")
Пример #8
0
def test_find_matching_scenarios_matched(half_val, expected):
    # Tests
    # 1) that 1st option is used in the case of equality
    # 2) and if it's closer,
    # 3) But not if it's further away
    half_simple_df = simple_df.filter(scenario="scen_a")
    half_simple_df.data["value"].loc[0] = half_val
    scenarios = find_matching_scenarios(
        simple_df,
        half_simple_df,
        variable_follower,
        variable_leaders,
        ["scen_a", "scen_b"],
    )
    assert scenarios == ("*", expected)