Пример #1
0
def test_convert_units_to_mtco2_equiv_fails_with_month_units(
        check_aggregate_df):
    limited_check_agg = check_aggregate_df.filter(variable="Primary Energy*",
                                                  keep=False)
    limited_check_agg.data["unit"].iloc[0] = "Mt CH4/mo"
    limited_check_agg = pyam.IamDataFrame(limited_check_agg.data)
    err_msg = "'mo' is not defined in the unit registry"
    with pytest.raises(UndefinedUnitError, match=err_msg):
        convert_units_to_MtCO2_equiv(limited_check_agg)
Пример #2
0
def test_convert_units_to_mtco2_equiv_fails_with_oom_units(check_aggregate_df):
    limited_check_agg = check_aggregate_df.filter(variable="Primary Energy*",
                                                  keep=False)
    limited_check_agg.data["unit"].iloc[0] = "Tt CO2"
    limited_check_agg = pyam.IamDataFrame(limited_check_agg.data)
    err_msg = re.escape(
        "Cannot convert from Tt CO2 (cleaned is: Tt CO2) to Mt CO2-equiv/yr (cleaned is: Mt CO2/yr)"
    )
    with pytest.raises(ValueError, match=err_msg):
        convert_units_to_MtCO2_equiv(limited_check_agg)
Пример #3
0
def test_convert_units_to_mtco2_equiv_fails_with_bad_units(check_aggregate_df):
    err_msg = "'y' is not defined in the unit registry"
    with pytest.raises(UndefinedUnitError, match=err_msg):
        convert_units_to_MtCO2_equiv(check_aggregate_df)

    limited_check_agg = check_aggregate_df.filter(variable="Primary Energy*",
                                                  keep=False)
    limited_check_agg.data["unit"].iloc[0] = "bad unit"
    err_msg = "'bad' is not defined in the unit registry"
    with pytest.raises(UndefinedUnitError, match=err_msg):
        convert_units_to_MtCO2_equiv(limited_check_agg)
Пример #4
0
 def test_infill_composite_values_works(self, larger_df, caplog):
     # Ensure that the code performs correctly
     larger_df_copy = larger_df.copy()
     larger_df_copy.append(
         infill_composite_values(
             larger_df_copy,
             composite_dic={"Emissions|CO2": ["Emissions|CO2|*"]}),
         inplace=True,
     )
     larger_df_copy = convert_units_to_MtCO2_equiv(larger_df_copy)
     infilled = infill_composite_values(larger_df_copy)
     assert np.allclose(
         infilled.filter(model="model_C", scenario="scen_C").data["value"],
         2.5)
     assert np.allclose(
         infilled.filter(model="model_D",
                         scenario="scen_C",
                         variable="Emissions|CO2").data["value"],
         2,
     )
     assert np.allclose(
         infilled.filter(
             model="model_D",
             scenario="scen_F",
             variable="Emissions|Kyoto Gases (AR5-GWP100)",
         ).data["value"],
         4 + 2 *
         28,  # The 2*28 comes from the CH4, converted to CO2 equiv using AR5.
     )
Пример #5
0
 def test_infill_composite_values_subtraction(self, larger_df, caplog):
     # Ensure that the code performs correctly when we subtract emissions too
     larger_df_copy = larger_df.copy()
     larger_df_copy.append(
         infill_composite_values(
             larger_df_copy,
             composite_dic={"Emissions|CO2": ["Emissions|CO2|*"]}),
         inplace=True,
     )
     AFOLU = "Emissions|CO2|AFOLU"
     larger_df_copy = convert_units_to_MtCO2_equiv(larger_df_copy)
     forgot_AFOLU = larger_df_copy.filter(variable=AFOLU, keep=False)
     infilled = infill_composite_values(
         forgot_AFOLU,
         composite_dic={
             AFOLU: {
                 "Emissions|CO2": 1,
                 "Emissions|CO2|Industry": -1
             }
         },
     )
     # We should have reconstructed the original data where it existed and also have
     # 0s now
     assert infilled.filter(model="model_C").data.equals(
         larger_df_copy.filter(variable=AFOLU).data.reset_index(drop=True))
     assert np.allclose(
         infilled.filter(model="model_C", keep=False)["value"], 0)
Пример #6
0
def test_convert_units_to_MtCO2_equiv_doesnt_change(check_aggregate_df):
    # Check that it does nothing when nothing needs doing
    limited_check_agg = check_aggregate_df.filter(variable="Primary Energy*",
                                                  keep=False)
    limited_check_agg.data["unit"] = "Mt CO2-equiv/yr"
    converted_data = convert_units_to_MtCO2_equiv(limited_check_agg)
    assert (converted_data.data["unit"] == "Mt CO2-equiv/yr").all()
    assert converted_data.data.equals(limited_check_agg.data)
 def test_relationship_usage_works_multiple(self, test_db, test_downscale_df):
     # Test that the decomposer function works for slightly more complicated data
     # (two components).
     # Get matching times
     test_downscale_df = _adjust_time_style_to_match(test_downscale_df, test_db)
     if test_db.time_col == "year":
         test_downscale_df.filter(
             year=test_db.data[test_db.time_col].values, inplace=True
         )
     else:
         test_downscale_df.filter(time=test_db.data[test_db.time_col], inplace=True)
     # Make the variables work for our case
     components = ["Emissions|HFC|C5F12", "Emissions|HFC|C2F6"]
     aggregate = "Emissions|HFC"
     test_downscale_df.data["variable"] = aggregate
     tcruncher = self.tclass(test_db)
     with pytest.raises(ValueError):
         filled = tcruncher.infill_components(
             aggregate, components, test_downscale_df
         )
     test_downscale_df = convert_units_to_MtCO2_equiv(test_downscale_df)
     filled = tcruncher.infill_components(aggregate, components, test_downscale_df)
     # The value returned should be a dataframe with 2 entries per original entry (4)
     assert len(filled.data) == 8
     assert all(y in filled.variables().values for y in components)
     # We also expect the amount of the variables to be conserved
     if test_db.time_col == "year":
         assert np.allclose(
             test_downscale_df.data.groupby("year").sum()["value"].values,
             convert_units_to_MtCO2_equiv(filled)
             .data.groupby("year")
             .sum()["value"]
             .values,
         )
     else:
         assert np.allclose(
             test_downscale_df.data.groupby("time").sum()["value"].values,
             convert_units_to_MtCO2_equiv(filled)
             .data.groupby("time")
             .sum()["value"]
             .values,
         )
Пример #8
0
def test_convert_units_to_MtCO2_equiv_equiv_start(check_aggregate_df,
                                                  unit_start):
    limited_check_agg = check_aggregate_df.filter(variable="Primary Energy*",
                                                  keep=False)
    limited_check_agg.data["unit"] = unit_start
    converted_data = convert_units_to_MtCO2_equiv(limited_check_agg)

    assert (converted_data.data["unit"] == "Mt CO2-equiv/yr").all()

    with _ur.context("AR5GWP100"):
        exp_conv_factor = _ur("kt CF4/yr").to("Mt CO2/yr").magnitude
    assert converted_data.data["value"].equals(
        limited_check_agg.data["value"] * exp_conv_factor)
Пример #9
0
def test__construct_consistent_values():
    test_db_co2 = convert_units_to_MtCO2_equiv(test_db)
    aggregate_name = "agg"
    assert aggregate_name not in test_db_co2.variables().values
    component_ratio = ["Emissions|HFC|C2F6", "Emissions|HFC|C5F12"]
    consistent_vals = _construct_consistent_values(aggregate_name,
                                                   component_ratio,
                                                   test_db_co2)
    assert aggregate_name in consistent_vals["variable"].values
    consistent_vals = consistent_vals.timeseries()
    timeseries_data = test_db_co2.timeseries()
    assert all([
        np.allclose(
            consistent_vals.iloc[0].iloc[ind],
            timeseries_data.iloc[0].iloc[ind] +
            timeseries_data.iloc[1].iloc[ind],
        ) for ind in range(len(timeseries_data.iloc[0]))
    ])
Пример #10
0
def test_convert_units_to_MtCO2_equiv_works(check_aggregate_df, ARoption,
                                            expected):
    # ARoption turns the use of AR4 on, rather than AR5 (the default)
    limited_check_agg = check_aggregate_df.filter(variable="Primary Energy*",
                                                  keep=False)
    converted_units = convert_units_to_MtCO2_equiv(limited_check_agg, ARoption)
    assert all(y[:6] == "Mt CO2"
               for y in converted_units.data["unit"].unique())
    # Index 1 is already in CO2
    assert (converted_units.data["value"].loc[1] ==
            limited_check_agg.data["value"].loc[1])
    # At index 122 we are in units of Mt methane, rate 28* higher in AR5
    assert np.isclose(
        converted_units.data["value"].loc[122],
        limited_check_agg.data["value"].loc[122] * expected[0],
    )
    # At index 142 we have kt CF4, 6630 times more effective/kg but / 1000 for k -> G
    assert np.isclose(
        converted_units.data["value"].loc[142],
        limited_check_agg.data["value"].loc[142] * expected[1],
    )
Пример #11
0
def test__construct_consistent_values_with_equiv():
    test_db_co2 = convert_units_to_MtCO2_equiv(test_db)
    test_db_co2.data["unit"].loc[0:1] = "Mt CO2/yr"
    aggregate_name = "agg"
    assert aggregate_name not in test_db_co2.variables().values
    component_ratio = ["Emissions|HFC|C2F6", "Emissions|HFC|C5F12"]
    consistent_vals = _construct_consistent_values(aggregate_name,
                                                   component_ratio,
                                                   test_db_co2)
    assert aggregate_name in consistent_vals["variable"].values
    consistent_vals = consistent_vals.timeseries()
    timeseries_data = test_db_co2.timeseries()
    assert all([
        np.allclose(
            consistent_vals.iloc[0].iloc[ind],
            timeseries_data.iloc[0].iloc[ind] +
            timeseries_data.iloc[1].iloc[ind],
        ) for ind in range(len(timeseries_data.iloc[0]))
    ])
    # We also require that the output units are '-equiv'
    assert all(y == "Mt CO2-equiv/yr"
               for y in consistent_vals.index.get_level_values("unit"))
Пример #12
0
    def infill_components(self,
                          aggregate,
                          components,
                          to_infill_df,
                          use_ar4_data=False):
        """
        Derive the relationship between the composite variables and their sum, then use
        this to deconstruct the sum.

        Parameters
        ----------
        aggregate : str
            The variable for which we want to calculate timeseries (e.g.
            ``"Emissions|CO2"``). Unlike in most crunchers, we do not expect the
            database to already contain this data.

        components : list[str]
            The variables whose sum should be equal to the timeseries of the aggregate
             (e.g. ``["Emissions|CO2|AFOLU", "Emissions|CO2|Energy"]``).

        to_infill_df : :obj:`pyam.IamDataFrame`
            The dataframe that already contains the ``aggregate`` variable, but needs
            the ``components`` to be infilled.

        use_ar4_data : bool
            If true, we convert all values to Mt CO2 equivalent using the IPCC AR4
            GWP100 data, otherwise (by default) we use the GWP100 data from AR5.

        Returns
        -------
        :obj:`pyam.IamDataFrame`
            The infilled data resulting from the calculation.

        Raises
        ------
        ValueError
            There is no data for ``variable_leaders`` or ``variable_follower`` in the
            database.
        """
        assert (
            aggregate in to_infill_df.variables().values
        ), "The database to infill does not have the aggregate variable"
        assert all(
            y not in components for y in to_infill_df.variables().values
        ), "The database to infill already has some component variables"
        assert len(to_infill_df.data.columns) == len(
            self._db.data.columns
        ) and all(to_infill_df.data.columns == self._db.data.columns), (
            "The database and to_infill_db fed into this have inconsistent columns, "
            "which will prevent adding the data together properly.")
        self._db.filter(variable=components, inplace=True)
        # We only want to reference cases where all the required components are found
        combinations = self._db.data[["model", "scenario",
                                      "region"]].drop_duplicates()
        for ind in range(len(combinations)):
            model, scenario, region = combinations.iloc[ind]
            found_vars = self._db.filter(model=model,
                                         scenario=scenario,
                                         region=region).variables()
            if any(comp not in found_vars.values for comp in components):
                self._db.filter(model=model,
                                scenario=scenario,
                                keep=False,
                                inplace=True)
        if len(self._set_of_units_without_equiv(self._db)) > 1:
            db_to_generate = convert_units_to_MtCO2_equiv(
                self._db, use_ar4_data=use_ar4_data)
        else:
            db_to_generate = self._db
        consistent_composite = self._construct_consistent_values(
            aggregate, components, db_to_generate)
        self._db.append(consistent_composite, inplace=True)
        cruncher = TimeDepRatio(self._db)
        if self._set_of_units_without_equiv(
                to_infill_df) != self._set_of_units_without_equiv(
                    consistent_composite):
            raise ValueError(
                "The units of the aggregate variable are inconsistent between the "
                "input and constructed data. We input {} and constructed {}.".
                format(
                    self._set_of_units_without_equiv(to_infill_df),
                    self._set_of_units_without_equiv(consistent_composite),
                ))
        for leader in components:
            to_add = cruncher.derive_relationship(leader,
                                                  [aggregate])(to_infill_df)
            try:
                df_to_append.append(to_add, inplace=True)
            except NameError:
                df_to_append = to_add
        return df_to_append