def test_run_scenarios_multiple(self):
        ts1_erf = np.linspace(0, 4, 101)
        ts2_erf = np.sin(np.linspace(0, 4, 101))

        inp = ScmRun(
            data=np.vstack([ts1_erf, ts2_erf]).T,
            index=np.linspace(1750, 1850, 101).astype(int),
            columns={
                "scenario": ["test_scenario_1", "test_scenario_2"],
                "model": "unspecified",
                "climate_model": "junk input",
                "variable": "Effective Radiative Forcing",
                "unit": "W/m^2",
                "region": "World",
            },
        )

        model = self.tmodel()

        res = model.run_scenarios(inp)

        for scenario_ts in inp.groupby("scenario"):
            scenario = scenario_ts.get_unique_meta("scenario",
                                                   no_duplicates=True)

            model.set_drivers(
                scenario_ts.values.squeeze() *
                ur(inp.get_unique_meta("unit", no_duplicates=True)))
            model.reset()
            model.run()

            res_scen = res.filter(scenario=scenario)

            npt.assert_allclose(
                res_scen.filter(
                    variable="Surface Temperature|Upper").values.squeeze(),
                model._temp_upper_mag,
            )
            assert (res.filter(
                variable="Surface Temperature|Upper").get_unique_meta(
                    "unit", no_duplicates=True) == "delta_degC")

            npt.assert_allclose(
                res_scen.filter(
                    variable="Surface Temperature|Lower").values.squeeze(),
                model._temp_lower_mag,
            )
            assert (res.filter(
                variable="Surface Temperature|Lower").get_unique_meta(
                    "unit", no_duplicates=True) == "delta_degC")

            npt.assert_allclose(
                res_scen.filter(variable="Heat Uptake").values.squeeze(),
                model._rndt_mag,
            )
            assert (res.filter(variable="Heat Uptake").get_unique_meta(
                "unit", no_duplicates=True) == "W/m^2")
Ejemplo n.º 2
0
def test_groupby_integer_metadata():
    def increment_ensemble_member(scmrun):
        scmrun["ensemble_member"] += 10

        return scmrun

    start = ScmRun(
        data=[[1, 2], [0, 1]],
        index=[2010, 2020],
        columns={
            "model": "model",
            "scenario": "scenario",
            "variable": "variable",
            "unit": "unit",
            "region": "region",
            "ensemble_member": [0, 1],
        },
    )

    res = start.groupby(["variable", "region", "scenario",
                         "ensemble_member"]).map(increment_ensemble_member)

    assert (res["ensemble_member"] == start["ensemble_member"] + 10).all()
Ejemplo n.º 3
0
def scmdf_to_emissions(scmdf, include_cfcs=True, startyear=1765, endyear=2100):
    """
    Opens an ScmDataFrame and extracts the data. Interpolates linearly
    between non-consecutive years in the SCEN file. Fills in chlorinated gases
    from a specified SSP scenario.

    Note this is a temporary fix for FaIR 1.6.

    Inputs:
        scmdf: ScmDataFrame

    Keywords:
        include_cfcs: bool
            MAGICC files do not come loaded with CFCs (indices 24-39).
            - if True, use the values from RCMIP for SSPs (all scenarios are
                the same).
            - Use False to ignore and create a 23-species emission file.
        startyear: First year of output file.
        endyear: Last year of output file.

    Returns:
        nt x 40 numpy emissions array (nt x 23 if ``include_cfcs`` is ``False``)
    """

    # We expect that aeneris and silicone are going to give us a nicely
    # formatted ScmDataFrame with all 23 species present and correct at
    # timesteps 2015, 2020 and ten-yearly to 2100.
    # We also implicitly assume that data up until 2014 will follow SSP
    # historical.
    # This adapter will not be tested on anything else!

    n_cols = 40
    nt = endyear - startyear + 1

    data_out = np.ones((nt, n_cols)) * np.nan
    data_out[:, 0] = np.arange(startyear, endyear + 1)

    if not has_scmdata:
        raise ImportError(
            "This is not going to work without having scmdata installed")

    if not isinstance(scmdf, ScmDataFrame):
        raise TypeError("scmdf must be an scmdata.ScmDataFrame instance")

    if not include_cfcs:
        raise NotImplementedError("include_cfcs equal to False")

    if scmdf[["model", "scenario"]].drop_duplicates().shape[0] != 1:
        raise AssertionError("Should only have one model-scenario pair")

    scen_start_year = 2015

    scmdf = ScmRun(scmdf.timeseries()).interpolate(
        [dt.datetime(y, 1, 1) for y in range(scen_start_year, endyear + 1)])

    years = scmdf["year"].values
    first_scenyear = years[0]
    first_scen_row = int(first_scenyear - startyear)

    # if correct units and interpolation were guaranteed we could do this for scenario too which is quicker
    hist_df = ssp245_world_emms_holder.values_fair_units.filter(
        year=range(startyear, 2015)).timeseries()

    future_ssp245_df = ssp245_world_emms_holder.values_fair_units.filter(
        year=range(2015, endyear + 1)).timeseries()

    for species in EMISSIONS_SPECIES_UNITS_CONTEXT["species"]:
        fair_col, _, _ = _get_fair_col_unit_context(species)

        hist_df_row = hist_df.index.get_level_values("variable").str.endswith(
            species)

        data_out[:first_scen_row,
                 fair_col] = hist_df[hist_df_row].values.squeeze()

        future_ssp245_df_row = future_ssp245_df.index.get_level_values(
            "variable").str.endswith(species)

        data_out[first_scen_row:, fair_col] = future_ssp245_df[
            future_ssp245_df_row].values.squeeze()

    for var_df in scmdf.groupby("variable"):
        variable = var_df.get_unique_meta("variable", no_duplicates=True)
        in_unit = var_df.get_unique_meta("unit", no_duplicates=True)
        fair_col, fair_unit, context = _get_fair_col_unit_context(variable)

        if in_unit != fair_unit:
            var_df_fair_unit = var_df.convert_unit(fair_unit, context=context)
        else:
            var_df_fair_unit = var_df

        data_out[first_scen_row:, fair_col] = var_df_fair_unit.values.squeeze()

    return data_out