Exemple #1
0
def test_add_spatial_single(test_mp):
    scen = Scenario(test_mp, **SCENARIO['dantzig'], version='new')
    data = {'country': 'Austria'}
    scen.add_spatial_sets(data)

    exp = ['World', 'Austria']
    obs = scen.set('node')
    npt.assert_array_equal(obs, exp)

    exp = ['World', 'global', 'country']
    obs = scen.set('lvl_spatial')
    npt.assert_array_equal(obs, exp)

    exp = [['country', 'Austria', 'World']]
    obs = scen.set('map_spatial_hierarchy')
    npt.assert_array_equal(obs, exp)
Exemple #2
0
def test_add_spatial_multiple(test_mp):
    scen = Scenario(test_mp, *msg_args, version='new')
    data = {'country': ['Austria', 'Germany']}
    scen.add_spatial_sets(data)

    exp = ['World', 'Austria', 'Germany']
    obs = scen.set('node')
    npt.assert_array_equal(obs, exp)

    exp = ['World', 'global', 'country']
    obs = scen.set('lvl_spatial')
    npt.assert_array_equal(obs, exp)

    exp = [['country', 'Austria', 'World'], ['country', 'Germany', 'World']]
    obs = scen.set('map_spatial_hierarchy')
    npt.assert_array_equal(obs, exp)
Exemple #3
0
def test_add_spatial_multiple(test_mp):
    scen = Scenario(test_mp, **SCENARIO["dantzig"], version="new")
    data = {"country": ["Austria", "Germany"]}
    scen.add_spatial_sets(data)

    exp = ["World", "Austria", "Germany"]
    obs = scen.set("node")
    npt.assert_array_equal(obs, exp)

    exp = ["World", "global", "country"]
    obs = scen.set("lvl_spatial")
    npt.assert_array_equal(obs, exp)

    exp = [["country", "Austria", "World"], ["country", "Germany", "World"]]
    obs = scen.set("map_spatial_hierarchy")
    npt.assert_array_equal(obs, exp)
Exemple #4
0
def test_multi_db_run(tmpdir):
    # create a new instance of the transport problem and solve it
    mp1 = Platform(driver="hsqldb", path=tmpdir / "mp1")
    scen1 = make_dantzig(mp1, solve=True)

    mp2 = Platform(driver="hsqldb", path=tmpdir / "mp2")
    # add other unit to make sure that the mapping is correct during clone
    mp2.add_unit("wrong_unit")
    mp2.add_region("wrong_region", "country")

    # check that cloning across platforms must copy the full solution
    dest = dict(platform=mp2)
    pytest.raises(NotImplementedError,
                  scen1.clone,
                  keep_solution=False,
                  **dest)
    pytest.raises(NotImplementedError,
                  scen1.clone,
                  shift_first_model_year=1964,
                  **dest)

    # clone solved model across platforms (with default settings)
    scen1.clone(platform=mp2, keep_solution=True)

    # close the db to ensure that data and solution of the clone are saved
    mp2.close_db()
    del mp2

    # reopen the connection to the second platform and reload scenario
    _mp2 = Platform(driver="hsqldb", path=tmpdir / "mp2")
    scen2 = Scenario(_mp2, **SCENARIO["dantzig"])
    assert_multi_db(mp1, _mp2)

    # check that sets, variables and parameter were copied correctly
    npt.assert_array_equal(scen1.set("node"), scen2.set("node"))
    scen2.firstmodelyear == 1963
    assert_frame_equal(scen1.par("var_cost"), scen2.par("var_cost"))
    assert np.isclose(scen2.var("OBJ")["lvl"], 153.675)
    assert_frame_equal(scen1.var("ACT"), scen2.var("ACT"))

    # check that custom unit, region and timeseries are migrated correctly
    assert_frame_equal(scen2.timeseries(iamc=True), TS_DF)
Exemple #5
0
def test_add_spatial_hierarchy(test_mp):
    scen = Scenario(test_mp, *msg_args, version='new')
    data = {'country': {'Austria': {'state': ['Vienna', 'Lower Austria']}}}
    scen.add_spatial_sets(data)

    exp = ['World', 'Vienna', 'Lower Austria', 'Austria']
    obs = scen.set('node')
    npt.assert_array_equal(obs, exp)

    exp = ['World', 'global', 'state', 'country']
    obs = scen.set('lvl_spatial')
    npt.assert_array_equal(obs, exp)

    exp = [
        ['state', 'Vienna', 'Austria'],
        ['state', 'Lower Austria', 'Austria'],
        ['country', 'Austria', 'World'],
    ]
    obs = scen.set('map_spatial_hierarchy')
    npt.assert_array_equal(obs, exp)
Exemple #6
0
def test_add_spatial_hierarchy(test_mp):
    scen = Scenario(test_mp, **SCENARIO["dantzig"], version="new")
    data = {"country": {"Austria": {"state": ["Vienna", "Lower Austria"]}}}
    scen.add_spatial_sets(data)

    exp = ["World", "Vienna", "Lower Austria", "Austria"]
    obs = scen.set("node")
    npt.assert_array_equal(obs, exp)

    exp = ["World", "global", "state", "country"]
    obs = scen.set("lvl_spatial")
    npt.assert_array_equal(obs, exp)

    exp = [
        ["state", "Vienna", "Austria"],
        ["state", "Lower Austria", "Austria"],
        ["country", "Austria", "World"],
    ]
    obs = scen.set("map_spatial_hierarchy")
    npt.assert_array_equal(obs, exp)
Exemple #7
0
def test_add_bound_activity_up_modes(test_mp):
    def calculate(scen):
        return (
            scen
            .var('ACT')
            .groupby(['technology', 'mode'])['lvl']
            .sum()
            .loc['transport_from_seattle']
        )

    scen = Scenario(test_mp, *msg_args)
    scen.solve()

    # data for act bound
    data = pd.DataFrame({
        'node_loc': 'seattle',
        'technology': 'transport_from_seattle',
        'year_act': 2010,
        'time': 'year',
        'unit': 'cases',
    }, index=[0])

    # test limiting one mode
    clone = scen.clone('foo', 'bar', keep_solution=False)
    clone.check_out()
    exp = 0.5 * calculate(scen).sum()
    data['mode'] = 'to_chicago'
    data['value'] = exp
    clone.add_par('bound_activity_up', data)
    clone.commit('foo')
    clone.solve()
    obs = calculate(clone).loc['to_chicago']
    assert np.isclose(obs, exp)

    # test limiting all modes
    clone2 = scen.clone('foo', 'baz', keep_solution=False)
    clone2.check_out()
    exp = 0.95 * calculate(scen).sum()
    data['mode'] = 'all'
    data['value'] = exp
    clone2.add_par('bound_activity_up', data)
    clone2.commit('foo')
    clone2.solve()
    obs = calculate(clone2).sum()
    assert np.isclose(obs, exp)
def test_add_share_mode_lo(message_test_mp):
    scen = Scenario(message_test_mp, **SCENARIO["dantzig"]).clone()
    scen.solve(quiet=True)

    # data for share bound
    def calc_share(s):
        a = calculate_activity(
            s, tec="transport_from_san-diego").loc["to_new-york"]
        b = calculate_activity(s, tec="transport_from_san-diego").sum()
        return a / b

    exp = 1.05 * calc_share(scen)

    # add share constraints
    clone = scen.clone("foo", "baz", keep_solution=False)
    clone.check_out()
    clone.add_set("shares", "test-share")
    clone.add_par(
        "share_mode_lo",
        pd.DataFrame(
            {
                "shares": "test-share",
                "node_share": "san-diego",
                "technology": "transport_from_san-diego",
                "mode": "to_new-york",
                "year_act": _year,
                "time": "year",
                "unit": "cases",
                "value": exp,
            },
            index=[0],
        ),
    )
    clone.commit("foo")
    clone.solve()

    obs = calc_share(clone)
    assert np.isclose(obs, exp)

    orig_obj = scen.var("OBJ")["lvl"]
    new_obj = clone.var("OBJ")["lvl"]
    assert new_obj >= orig_obj
Exemple #9
0
def test_multi_db_run(tmpdir):
    # create a new instance of the transport problem and solve it
    mp1 = Platform(tmpdir / 'mp1', dbtype='HSQLDB')
    scen1 = make_dantzig(mp1, solve=True)

    mp2 = Platform(tmpdir / 'mp2', dbtype='HSQLDB')
    # add other unit to make sure that the mapping is correct during clone
    mp2.add_unit('wrong_unit')
    mp2.add_region('wrong_region', 'country')

    # check that cloning across platforms must copy the full solution
    dest = dict(platform=mp2)
    pytest.raises(ValueError, scen1.clone, keep_solution=False, **dest)
    pytest.raises(ValueError, scen1.clone, shift_first_model_year=1964, **dest)

    # clone solved model across platforms (with default settings)
    scen1.clone(platform=mp2, keep_solution=True)

    # close the db to ensure that data and solution of the clone are saved
    mp2.close_db()
    del mp2

    # reopen the connection to the second platform and reload scenario
    _mp2 = Platform(tmpdir / 'mp2', dbtype='HSQLDB')
    scen2 = Scenario(_mp2, **models['dantzig'])
    assert_multi_db(mp1, _mp2)

    # check that sets, variables and parameter were copied correctly
    npt.assert_array_equal(scen1.set('node'), scen2.set('node'))
    scen2.firstmodelyear == 1963
    pdt.assert_frame_equal(scen1.par('var_cost'), scen2.par('var_cost'))
    assert np.isclose(scen2.var('OBJ')['lvl'], 153.675)
    pdt.assert_frame_equal(scen1.var('ACT'), scen2.var('ACT'))

    # check that custom unit, region and timeseries are migrated correctly
    pdt.assert_frame_equal(scen2.timeseries(iamc=True), TS_DF)
Exemple #10
0
def test_add_share_mode_lo(message_test_mp):
    scen = Scenario(message_test_mp, **SCENARIO['dantzig']).clone()
    scen.solve()

    # data for share bound
    def calc_share(s):
        a = calculate_activity(
            s, tec='transport_from_san-diego').loc['to_new-york']
        b = calculate_activity(s, tec='transport_from_san-diego').sum()
        return a / b

    exp = 1.05 * calc_share(scen)

    # add share constraints
    clone = scen.clone('foo', 'baz', keep_solution=False)
    clone.check_out()
    clone.add_set('shares', 'test-share')
    clone.add_par(
        'share_mode_lo',
        pd.DataFrame(
            {
                'shares': 'test-share',
                'node_share': 'san-diego',
                'technology': 'transport_from_san-diego',
                'mode': 'to_new-york',
                'year_act': _year,
                'time': 'year',
                'unit': 'cases',
                'value': exp,
            },
            index=[0]))
    clone.commit('foo')
    clone.solve()

    obs = calc_share(clone)
    assert np.isclose(obs, exp)

    orig_obj = scen.var('OBJ')['lvl']
    new_obj = clone.var('OBJ')['lvl']
    assert new_obj >= orig_obj
Exemple #11
0
def test_add_share_mode_up(test_mp):
    scen = Scenario(test_mp, *msg_args).clone()
    scen.solve()

    # data for share bound
    def calc_share(s):
        a = calculate_activity(s,
                               tec='transport_from_seattle').loc['to_chicago']
        b = calculate_activity(s, tec='transport_from_seattle').sum()
        return a / b

    exp = 0.95 * calc_share(scen)

    # add share constraints
    clone = scen.clone(scenario='share_mode_up', keep_solution=False)
    clone.check_out()
    clone.add_set('shares', 'test-share')
    clone.add_par(
        'share_mode_up',
        pd.DataFrame(
            {
                'shares': 'test-share',
                'node_share': 'seattle',
                'technology': 'transport_from_seattle',
                'mode': 'to_chicago',
                'year_act': 2010,
                'time': 'year',
                'unit': 'cases',
                'value': exp,
            },
            index=[0]))
    clone.commit('foo')
    clone.solve()
    obs = calc_share(clone)
    assert np.isclose(obs, exp)

    orig_obj = scen.var('OBJ')['lvl']
    new_obj = clone.var('OBJ')['lvl']
    assert new_obj >= orig_obj
Exemple #12
0
def create_timeseries_df(results: message_ix.Scenario) -> message_ix.Scenario:
    logger.info('Create timeseries')
    results.check_out(timeseries_only=True)
    for var in ['ACT', 'CAP', 'CAP_NEW', 'EMISS']:
        df = group_data(var, results)
        if var != 'EMISS':
            df['variable'] = ([
                f'{df.loc[i, "technology"]}|{df.loc[i, "variable"]}'
                for i in df.index
            ])
        else:
            df['variable'] = [
                f'{df.loc[i, "emission"]}|{df.loc[i, "variable"]}'
                for i in df.index
            ]
        df['node'] = 'World'  # TODO: wenn #6 gelöst, dann implementieren
        df = df.rename(columns={'node': 'region'})
        ts = pd.pivot_table(df,
                            values='lvl',
                            index=['region', 'variable', 'unit'],
                            columns=['year']).reset_index(drop=False)
        results.add_timeseries(ts)
    results.commit('timeseries added')
    return results
Exemple #13
0
def test_vintage_and_active_years(test_mp):
    scen = Scenario(test_mp, **SCENARIO["dantzig"], version="new")

    years = [2000, 2010, 2020]
    scen.add_horizon(year=years, firstmodelyear=2010)
    obs = scen.vintage_and_active_years()
    exp = pd.DataFrame(
        {
            "year_vtg": (2000, 2000, 2010, 2010, 2020),
            "year_act": (2010, 2020, 2010, 2020, 2020),
        }
    )
    pdt.assert_frame_equal(exp, obs, check_like=True)  # ignore col order

    # Add a technology, its lifetime, and period durations
    scen.add_set("node", "foo")
    scen.add_set("technology", "bar")
    scen.add_par(
        "duration_period", pd.DataFrame({"unit": "???", "value": 10, "year": years})
    )
    scen.add_par(
        "technical_lifetime",
        pd.DataFrame(
            {
                "node_loc": "foo",
                "technology": "bar",
                "unit": "???",
                "value": 20,
                "year_vtg": years,
            }
        ),
    )

    # part is before horizon
    obs = scen.vintage_and_active_years(ya_args=("foo", "bar", "2000"))
    exp = pd.DataFrame({"year_vtg": (2000,), "year_act": (2010,)})
    pdt.assert_frame_equal(exp, obs, check_like=True)  # ignore col order

    obs = scen.vintage_and_active_years(
        ya_args=("foo", "bar", "2000"), in_horizon=False
    )
    exp = pd.DataFrame({"year_vtg": (2000, 2000), "year_act": (2000, 2010)})
    pdt.assert_frame_equal(exp, obs, check_like=True)  # ignore col order

    # fully in horizon
    obs = scen.vintage_and_active_years(ya_args=("foo", "bar", "2010"))
    exp = pd.DataFrame({"year_vtg": (2010, 2010), "year_act": (2010, 2020)})
    pdt.assert_frame_equal(exp, obs, check_like=True)  # ignore col order

    # part after horizon
    obs = scen.vintage_and_active_years(ya_args=("foo", "bar", "2020"))
    exp = pd.DataFrame({"year_vtg": (2020,), "year_act": (2020,)})
    pdt.assert_frame_equal(exp, obs, check_like=True)  # ignore col order

    # Advance the first model year
    scen.add_cat("year", "firstmodelyear", years[-1], is_unique=True)

    # Empty data frame: only 2000 and 2010 valid year_act for this node/tec;
    # but both are before the first model year
    obs = scen.vintage_and_active_years(
        ya_args=("foo", "bar", years[0]), in_horizon=True
    )
    pdt.assert_frame_equal(pd.DataFrame(columns=["year_vtg", "year_act"]), obs)

    # Exception is raised for incorrect arguments
    with pytest.raises(ValueError, match="3 arguments are required if using `ya_args`"):
        scen.vintage_and_active_years(ya_args=("foo", "bar"))
Exemple #14
0
def test_clone(tmpdir):
    # Two local platforms
    mp1 = ixmp.Platform(driver="hsqldb", path=tmpdir / "mp1")
    mp2 = ixmp.Platform(driver="hsqldb", path=tmpdir / "mp2")

    # A minimal scenario
    scen1 = Scenario(mp1, model="model", scenario="scenario", version="new")
    scen1.add_spatial_sets({"country": "Austria"})
    scen1.add_set("technology", "bar")
    scen1.add_horizon(year=[2010, 2020])
    scen1.commit("add minimal sets for testing")

    assert len(mp1.scenario_list(default=False)) == 1

    # Clone
    scen2 = scen1.clone(platform=mp2)

    # Return type of ixmp.Scenario.clone is message_ix.Scenario
    assert isinstance(scen2, Scenario)

    # Close and re-open both databases
    mp1.close_db()  # TODO this should be done automatically on del
    mp2.close_db()  # TODO this should be done automatically on del
    del mp1, mp2
    mp1 = ixmp.Platform(driver="hsqldb", path=tmpdir / "mp1")
    mp2 = ixmp.Platform(driver="hsqldb", path=tmpdir / "mp2")

    # Same scenarios present in each database
    assert all(mp1.scenario_list(default=False) == mp2.scenario_list(default=False))

    # Load both scenarios
    scen1 = Scenario(mp1, "model", "scenario")
    scen2 = Scenario(mp2, "model", "scenario")

    # Contents are identical
    assert all(scen1.set("node") == scen2.set("node"))
    assert all(scen1.set("year") == scen2.set("year"))
Exemple #15
0
def test_excel_read_write(message_test_mp, tmp_path):
    # Path to temporary file
    tmp_path /= "excel_read_write.xlsx"
    # Convert to string to ensure this can be handled
    fname = str(tmp_path)

    scen1 = Scenario(message_test_mp, **SCENARIO["dantzig"])
    scen1 = scen1.clone(keep_solution=False)
    scen1.check_out()
    scen1.init_set("new_set")
    scen1.add_set("new_set", "member")
    scen1.init_par("new_par", idx_sets=["new_set"])
    scen1.add_par("new_par", "member", 2, "-")
    scen1.commit("new set and parameter added.")

    # Writing to Excel without solving
    scen1.to_excel(fname)

    # Writing to Excel when scenario has a solution
    scen1.solve(quiet=True)
    scen1.to_excel(fname)

    scen2 = Scenario(message_test_mp, model="foo", scenario="bar", version="new")

    # Fails without init_items=True
    with pytest.raises(ValueError, match="no set 'new_set'"):
        scen2.read_excel(fname)

    # Succeeds with init_items=True
    scen2.read_excel(fname, init_items=True, commit_steps=True)

    exp = scen1.par("input")
    obs = scen2.par("input")
    pdt.assert_frame_equal(exp, obs)

    assert scen2.has_par("new_par")
    assert float(scen2.par("new_par")["value"]) == 2

    scen2.solve(quiet=True)
    assert np.isclose(scen2.var("OBJ")["lvl"], scen1.var("OBJ")["lvl"])
Exemple #16
0
def test_years_active_extend3(test_mp):
    test_mp.add_unit("year")
    scen = Scenario(test_mp, **SCENARIO["dantzig"], version="new")
    scen.add_set("node", "foo")
    scen.add_set("technology", "bar")

    # Periods of uneven length
    years = [1990, 1995, 2000, 2005, 2010, 2020, 2030]

    scen.add_horizon(year=years, firstmodelyear=2010)

    scen.add_set("year", [1992])
    scen.add_par("duration_period", "1992", 2, "y")
    scen.add_par("duration_period", "1995", 3, "y")

    scen.add_par(
        "technical_lifetime",
        pd.DataFrame(
            dict(
                node_loc="foo",
                technology="bar",
                unit="year",
                value=[20],
                year_vtg=1990,
            ),
        ),
    )

    obs = scen.years_active("foo", "bar", 1990)

    assert obs == [1990, 1992, 1995, 2000, 2005]
Exemple #17
0
def make_westeros(mp, emissions=False, solve=False):
    """Return an :class:`message_ix.Scenario` for the Westeros model.

    This is the same model used in the ``westeros_baseline.ipynb`` tutorial.

    Parameters
    ----------
    mp : ixmp.Platform
        Platform on which to create the scenario.
    emissions : bool, optional
        If True, the ``emissions_factor`` parameter is also populated for CO2.
    solve : bool, optional
        If True, the scenario is solved.
    """
    scen = Scenario(mp, version='new', **SCENARIO['westeros'])

    # Sets

    history = [690]
    model_horizon = [700, 710, 720]
    scen.add_horizon({
        'year': history + model_horizon,
        'firstmodelyear': model_horizon[0]
    })

    country = 'Westeros'
    scen.add_spatial_sets({'country': country})

    sets = {
        'technology': 'coal_ppl wind_ppl grid bulb'.split(),
        'mode': ['standard'],
        'level': 'secondary final useful'.split(),
        'commodity': 'electricity light'.split(),
    }

    for name, values in sets.items():
        scen.add_set(name, values)

    # Parameters — copy & paste from the tutorial notebook

    gdp_profile = pd.Series([1., 1.5, 1.9],
                            index=pd.Index(model_horizon, name='Time'))
    demand_per_year = 40 * 12 * 1000 / 8760
    light_demand = pd.DataFrame({
        'node': country,
        'commodity': 'light',
        'level': 'useful',
        'year': model_horizon,
        'time': 'year',
        'value': (100 * gdp_profile).round(),
        'unit': 'GWa',
    })
    scen.add_par("demand", light_demand)

    year_df = scen.vintage_and_active_years()
    vintage_years, act_years = year_df['year_vtg'], year_df['year_act']

    base = {
        'node_loc': country,
        'year_vtg': vintage_years,
        'year_act': act_years,
        'mode': 'standard',
        'time': 'year',
        'unit': '-',
    }

    base_input = make_df(base, node_origin=country, time_origin='year')
    base_output = make_df(base, node_dest=country, time_dest='year')

    bulb_out = make_df(base_output,
                       technology='bulb',
                       commodity='light',
                       level='useful',
                       value=1.0)
    scen.add_par('output', bulb_out)

    bulb_in = make_df(base_input,
                      technology='bulb',
                      commodity='electricity',
                      level='final',
                      value=1.0)
    scen.add_par('input', bulb_in)

    grid_efficiency = 0.9
    grid_out = make_df(base_output,
                       technology='grid',
                       commodity='electricity',
                       level='final',
                       value=grid_efficiency)
    scen.add_par('output', grid_out)

    grid_in = make_df(base_input,
                      technology='grid',
                      commodity='electricity',
                      level='secondary',
                      value=1.0)
    scen.add_par('input', grid_in)

    coal_out = make_df(base_output,
                       technology='coal_ppl',
                       commodity='electricity',
                       level='secondary',
                       value=1.)
    scen.add_par('output', coal_out)

    wind_out = make_df(base_output,
                       technology='wind_ppl',
                       commodity='electricity',
                       level='secondary',
                       value=1.)
    scen.add_par('output', wind_out)

    base_capacity_factor = {
        'node_loc': country,
        'year_vtg': vintage_years,
        'year_act': act_years,
        'time': 'year',
        'unit': '-',
    }

    capacity_factor = {
        'coal_ppl': 1,
        'wind_ppl': 1,
        'bulb': 1,
    }

    for tec, val in capacity_factor.items():
        df = make_df(base_capacity_factor, technology=tec, value=val)
        scen.add_par('capacity_factor', df)

    base_technical_lifetime = {
        'node_loc': country,
        'year_vtg': model_horizon,
        'unit': 'y',
    }

    lifetime = {
        'coal_ppl': 20,
        'wind_ppl': 20,
        'bulb': 1,
    }

    for tec, val in lifetime.items():
        df = make_df(base_technical_lifetime, technology=tec, value=val)
        scen.add_par('technical_lifetime', df)

    base_growth = {
        'node_loc': country,
        'year_act': model_horizon,
        'time': 'year',
        'unit': '-',
    }

    growth_technologies = [
        "coal_ppl",
        "wind_ppl",
    ]

    for tec in growth_technologies:
        df = make_df(base_growth, technology=tec, value=0.1)
        scen.add_par('growth_activity_up', df)

    historic_demand = 0.85 * demand_per_year
    historic_generation = historic_demand / grid_efficiency
    coal_fraction = 0.6

    base_capacity = {
        'node_loc': country,
        'year_vtg': history,
        'unit': 'GWa',
    }

    base_activity = {
        'node_loc': country,
        'year_act': history,
        'mode': 'standard',
        'time': 'year',
        'unit': 'GWa',
    }

    old_activity = {
        'coal_ppl': coal_fraction * historic_generation,
        'wind_ppl': (1 - coal_fraction) * historic_generation,
    }

    for tec, val in old_activity.items():
        df = make_df(base_activity, technology=tec, value=val)
        scen.add_par('historical_activity', df)

    act_to_cap = {
        # 20 year lifetime
        'coal_ppl': 1 / 10 / capacity_factor['coal_ppl'] / 2,
        'wind_ppl': 1 / 10 / capacity_factor['wind_ppl'] / 2,
    }

    for tec in act_to_cap:
        value = old_activity[tec] * act_to_cap[tec]
        df = make_df(base_capacity, technology=tec, value=value)
        scen.add_par('historical_new_capacity', df)

    rate = [0.05] * len(model_horizon)
    unit = ['-'] * len(model_horizon)
    scen.add_par("interestrate", model_horizon, rate, unit)

    base_inv_cost = {
        'node_loc': country,
        'year_vtg': model_horizon,
        'unit': 'USD/GWa',
    }

    # in $ / kW
    costs = {
        'coal_ppl': 500,
        'wind_ppl': 1500,
        'bulb': 5,
    }

    for tec, val in costs.items():
        df = make_df(base_inv_cost, technology=tec, value=val)
        scen.add_par('inv_cost', df)

    base_fix_cost = {
        'node_loc': country,
        'year_vtg': vintage_years,
        'year_act': act_years,
        'unit': 'USD/GWa',
    }

    # in $ / kW
    costs = {
        'coal_ppl': 30,
        'wind_ppl': 10,
    }

    for tec, val in costs.items():
        df = make_df(base_fix_cost, technology=tec, value=val)
        scen.add_par('fix_cost', df)

    base_var_cost = {
        'node_loc': country,
        'year_vtg': vintage_years,
        'year_act': act_years,
        'mode': 'standard',
        'time': 'year',
        'unit': 'USD/GWa',
    }

    # in $ / MWh
    costs = {
        'coal_ppl': 30,
        'grid': 50,
    }

    for tec, val in costs.items():
        df = make_df(base_var_cost, technology=tec, value=val)
        scen.add_par('var_cost', df)

    scen.commit('basic model of Westerosi electrification')
    scen.set_as_default()

    if emissions:
        scen.check_out()

        # Introduce the emission species CO2 and the emission category GHG
        scen.add_set('emission', 'CO2')
        scen.add_cat('emission', 'GHG', 'CO2')

        # we now add CO2 emissions to the coal powerplant
        base_emission_factor = {
            'node_loc': country,
            'year_vtg': vintage_years,
            'year_act': act_years,
            'mode': 'standard',
            'unit': 'USD/GWa',
        }

        emission_factor = make_df(base_emission_factor,
                                  technology='coal_ppl',
                                  emission='CO2',
                                  value=100.)
        scen.add_par('emission_factor', emission_factor)

        scen.commit('Added emissions sets/params to Westeros model.')

    if solve:
        scen.solve()

    return scen
def test_reporter_no_solution(message_test_mp):
    scen = Scenario(message_test_mp, **SCENARIO["dantzig"])

    pytest.raises(RuntimeError, Reporter.from_scenario, scen)
Exemple #19
0
def test_years_active_extend(test_mp):
    scen = Scenario(test_mp, *msg_multiyear_args)
    scen = scen.clone(keep_solution=False)
    scen.check_out()
    scen.add_set('year', ['2040', '2050'])
    scen.add_par('duration_period', '2040', 10, 'y')
    scen.add_par('duration_period', '2050', 10, 'y')
    df = scen.years_active('seattle', 'canning_plant', '2020')
    npt.assert_array_equal(df, [2020, 2030, 2040])
    scen.discard_changes()
Exemple #20
0
def test_cat_all(test_mp):
    scen = Scenario(test_mp, *msg_args)
    df = scen.cat('technology', 'all')
    npt.assert_array_equal(df, [
        'canning_plant', 'transport_from_seattle', 'transport_from_san-diego'
    ])
Exemple #21
0
def test_years_active(test_mp):
    scen = Scenario(test_mp, *msg_multiyear_args)
    df = scen.years_active('seattle', 'canning_plant', '2020')
    npt.assert_array_equal(df, [2020, 2030])
Exemple #22
0
def test_cat_list(test_mp):
    scen = Scenario(test_mp, **SCENARIO["dantzig"], version="new")

    # cat_list() returns default 'year' categories in a new message_ix.Scenario
    exp = ["firstmodelyear", "lastmodelyear", "initializeyear_macro"]
    assert exp == scen.cat_list("year")
Exemple #23
0
def test_excel_read_write(test_mp):
    fname = 'test_excel_read_write.xlsx'

    scen1 = Scenario(test_mp, *msg_args)
    scen1.to_excel(fname)

    scen2 = Scenario(test_mp, model='foo', scenario='bar', version='new')
    scen2.read_excel(fname)

    exp = scen1.par('input')
    obs = scen2.par('input')
    pdt.assert_frame_equal(exp, obs)

    scen1.solve()
    scen2.commit('foo')  # must be checked in
    scen2.solve()
    exp = scen1.var('OBJ')['lvl']
    obs = scen2.var('OBJ')['lvl']
    assert exp == obs

    os.remove(fname)
Exemple #24
0
def test_add_cat_unique(message_test_mp):
    scen = Scenario(message_test_mp, **SCENARIO["dantzig multi-year"])
    scen2 = scen.clone(keep_solution=False)
    scen2.check_out()
    scen2.add_cat("year", "firstmodelyear", 1963, True)
    assert [1963] == scen2.cat("year", "firstmodelyear")
Exemple #25
0
def test_vintage_and_active_years_with_lifetime(test_mp):
    scen = Scenario(test_mp, *msg_args, version='new')
    years = ['2000', '2010', '2020']
    scen.add_horizon({'year': years, 'firstmodelyear': '2010'})
    scen.add_set('node', 'foo')
    scen.add_set('technology', 'bar')
    scen.add_par('duration_period',
                 pd.DataFrame({
                     'unit': '???',
                     'value': 10,
                     'year': years
                 }))
    scen.add_par(
        'technical_lifetime',
        pd.DataFrame({
            'node_loc': 'foo',
            'technology': 'bar',
            'unit': '???',
            'value': 20,
            'year_vtg': years,
        }))

    # part is before horizon
    obs = scen.vintage_and_active_years(ya_args=('foo', 'bar', '2000'))
    exp = pd.DataFrame({'year_vtg': (2000, ), 'year_act': (2010, )})
    pdt.assert_frame_equal(exp, obs, check_like=True)  # ignore col order

    obs = scen.vintage_and_active_years(ya_args=('foo', 'bar', '2000'),
                                        in_horizon=False)
    exp = pd.DataFrame({'year_vtg': (2000, 2000), 'year_act': (2000, 2010)})
    pdt.assert_frame_equal(exp, obs, check_like=True)  # ignore col order

    # fully in horizon
    obs = scen.vintage_and_active_years(ya_args=('foo', 'bar', '2010'))
    exp = pd.DataFrame({'year_vtg': (2010, 2010), 'year_act': (2010, 2020)})
    pdt.assert_frame_equal(exp, obs, check_like=True)  # ignore col order

    # part after horizon
    obs = scen.vintage_and_active_years(ya_args=('foo', 'bar', '2020'))
    exp = pd.DataFrame({'year_vtg': (2020, ), 'year_act': (2020, )})
    pdt.assert_frame_equal(exp, obs, check_like=True)  # ignore col order
Exemple #26
0
def test_years_active(test_mp):
    test_mp.add_unit("year")
    scen = Scenario(test_mp, **SCENARIO["dantzig"], version="new")
    scen.add_set("node", "foo")
    scen.add_set("technology", "bar")

    # Periods of uneven length
    years = [1990, 1995, 2000, 2005, 2010, 2020, 2030]

    # First period length is immaterial
    duration = [1900, 5, 5, 5, 5, 10, 10]
    scen.add_horizon(year=years, firstmodelyear=years[-1])
    scen.add_par(
        "duration_period", pd.DataFrame(zip(years, duration), columns=["year", "value"])
    )

    # 'bar' built in period '1995' with 25-year lifetime:
    # - is constructed in 1991-01-01.
    # - by 1995-12-31, has operated 5 years.
    # - operates until 2015-12-31. This is within the period '2020'.
    scen.add_par(
        "technical_lifetime",
        pd.DataFrame(
            dict(
                node_loc="foo",
                technology="bar",
                unit="year",
                value=25,
                year_vtg=years[1],
            ),
            index=[0],
        ),
    )

    result = scen.years_active("foo", "bar", years[1])

    # Correct return type
    assert isinstance(result, list)
    assert isinstance(result[0], int)

    # Years 1995 through 2020
    npt.assert_array_equal(result, years[1:-1])
def test_init(message_test_mp):
    scen = Scenario(message_test_mp, **SCENARIO["dantzig"])

    scen = scen.clone("foo", "bar")
    scen.check_out()
    MACRO.initialize(scen)
    scen.commit("foo")
    scen.solve(quiet=True)

    assert np.isclose(scen.var("OBJ")["lvl"], 153.675)
    assert "mapping_macro_sector" in scen.set_list()
    assert "aeei" in scen.par_list()
    assert "DEMAND" in scen.var_list()
    assert "COST_ACCOUNTING_NODAL" in scen.equ_list()
Exemple #28
0
def test_years_active_extend(message_test_mp):
    scen = Scenario(message_test_mp, **SCENARIO["dantzig multi-year"])

    # Existing time horizon
    years = [1963, 1964, 1965]
    result = scen.years_active("seattle", "canning_plant", years[1])
    npt.assert_array_equal(result, years[1:])

    # Add years to the scenario
    years.extend([1993, 1995])
    scen.check_out()
    scen.add_set("year", years[-2:])
    scen.add_par("duration_period", "1993", 28, "y")
    scen.add_par("duration_period", "1995", 2, "y")

    # technical_lifetime of seattle/canning_plant/1964 is 30 years.
    # - constructed in 1964-01-01.
    # - by 1964-12-31, has operated 1 year.
    # - by 1965-12-31, has operated 2 years.
    # - operates until 1993-12-31.
    # - is NOT active within the period '1995' (1994-01-01 to 1995-12-31)
    result = scen.years_active("seattle", "canning_plant", 1964)
    npt.assert_array_equal(result, years[1:-1])
Exemple #29
0
def make_dantzig(mp, solve=False, multi_year=False, **solve_opts):
    """Return an :class:`message_ix.Scenario` for Dantzig's canning problem.

    Parameters
    ----------
    mp : ixmp.Platform
        Platform on which to create the scenario.
    solve : bool, optional
        If True, the scenario is solved.
    multi_year : bool, optional
        If True, the scenario has years 1963--1965 inclusive. Otherwise, the
        scenario has the single year 1963.
    """
    # add custom units and region for timeseries data
    mp.add_unit('USD/case')
    mp.add_unit('case')
    mp.add_region('DantzigLand', 'country')

    # initialize a new (empty) instance of an `ixmp.Scenario`
    scen = Scenario(
        mp,
        model=SCENARIO['dantzig']['model'],
        scenario='multi-year' if multi_year else 'standard',
        annotation="Dantzig's canning problem as a MESSAGE-scheme Scenario",
        version='new')

    # Sets
    # NB commit() is refused if technology and year are not given
    t = ['canning_plant', 'transport_from_seattle', 'transport_from_san-diego']
    sets = {
        'technology': t,
        'node': 'seattle san-diego new-york chicago topeka'.split(),
        'mode': 'production to_new-york to_chicago to_topeka'.split(),
        'level': 'supply consumption'.split(),
        'commodity': ['cases'],
    }

    for name, values in sets.items():
        scen.add_set(name, values)

    scen.add_horizon({'year': [1962, 1963], 'firstmodelyear': 1963})

    # Parameters
    par = {}

    demand = {
        'node': 'new-york chicago topeka'.split(),
        'value': [325, 300, 275]
    }
    par['demand'] = make_df(pd.DataFrame.from_dict(demand),
                            commodity='cases',
                            level='consumption',
                            time='year',
                            unit='case',
                            year=1963)

    b_a_u = {'node_loc': ['seattle', 'san-diego'], 'value': [350, 600]}
    par['bound_activity_up'] = make_df(pd.DataFrame.from_dict(b_a_u),
                                       mode='production',
                                       technology='canning_plant',
                                       time='year',
                                       unit='case',
                                       year_act=1963)
    par['ref_activity'] = par['bound_activity_up'].copy()

    input = pd.DataFrame(
        [
            ['to_new-york', 'seattle', 'seattle', t[1]],
            ['to_chicago', 'seattle', 'seattle', t[1]],
            ['to_topeka', 'seattle', 'seattle', t[1]],
            ['to_new-york', 'san-diego', 'san-diego', t[2]],
            ['to_chicago', 'san-diego', 'san-diego', t[2]],
            ['to_topeka', 'san-diego', 'san-diego', t[2]],
        ],
        columns=['mode', 'node_loc', 'node_origin', 'technology'])
    par['input'] = make_df(input,
                           commodity='cases',
                           level='supply',
                           time='year',
                           time_origin='year',
                           unit='case',
                           value=1,
                           year_act=1963,
                           year_vtg=1963)

    output = pd.DataFrame(
        [
            ['supply', 'production', 'seattle', 'seattle', t[0]],
            ['supply', 'production', 'san-diego', 'san-diego', t[0]],
            ['consumption', 'to_new-york', 'new-york', 'seattle', t[1]],
            ['consumption', 'to_chicago', 'chicago', 'seattle', t[1]],
            ['consumption', 'to_topeka', 'topeka', 'seattle', t[1]],
            ['consumption', 'to_new-york', 'new-york', 'san-diego', t[2]],
            ['consumption', 'to_chicago', 'chicago', 'san-diego', t[2]],
            ['consumption', 'to_topeka', 'topeka', 'san-diego', t[2]],
        ],
        columns=['level', 'mode', 'node_dest', 'node_loc', 'technology'])
    par['output'] = make_df(output,
                            commodity='cases',
                            time='year',
                            time_dest='year',
                            unit='case',
                            value=1,
                            year_act=1963,
                            year_vtg=1963)

    # Variable cost: cost per kilometre × distance (neither parametrized
    # explicitly)
    var_cost = pd.DataFrame(
        [
            ['to_new-york', 'seattle', 'transport_from_seattle', 0.225],
            ['to_chicago', 'seattle', 'transport_from_seattle', 0.153],
            ['to_topeka', 'seattle', 'transport_from_seattle', 0.162],
            ['to_new-york', 'san-diego', 'transport_from_san-diego', 0.225],
            ['to_chicago', 'san-diego', 'transport_from_san-diego', 0.162],
            ['to_topeka', 'san-diego', 'transport_from_san-diego', 0.126],
        ],
        columns=['mode', 'node_loc', 'technology', 'value'])
    par['var_cost'] = make_df(var_cost,
                              time='year',
                              unit='USD/case',
                              year_act=1963,
                              year_vtg=1963)

    for name, value in par.items():
        scen.add_par(name, value)

    if multi_year:
        scen.add_set('year', [1964, 1965])
        scen.add_par('technical_lifetime', ['seattle', 'canning_plant', 1964],
                     3, 'y')

    if solve:
        # Always read one equation. Used by test_core.test_year_int.
        scen.init_equ('COMMODITY_BALANCE_GT',
                      ['node', 'commodity', 'level', 'year', 'time'])
        solve_opts['equ_list'] = solve_opts.get('equ_list', []) \
            + ['COMMODITY_BALANCE_GT']

    scen.commit('Created a MESSAGE-scheme version of the transport problem.')
    scen.set_as_default()

    if solve:
        scen.solve(**solve_opts)

    scen.check_out(timeseries_only=True)
    scen.add_timeseries(HIST_DF, meta=True)
    scen.add_timeseries(INP_DF)
    scen.commit("Import Dantzig's transport problem for testing.")

    return scen
Exemple #30
0
def test_years_active_extended2(test_mp):
    test_mp.add_unit("year")
    scen = Scenario(test_mp, **SCENARIO["dantzig"], version="new")
    scen.add_set("node", "foo")
    scen.add_set("technology", "bar")

    # Periods of uneven length
    years = [1990, 1995, 2000, 2005, 2010, 2020, 2030]

    # First period length is immaterial
    duration = [1900, 5, 5, 5, 5, 10, 10]
    scen.add_horizon(year=years, firstmodelyear=years[-1])
    scen.add_par(
        "duration_period", pd.DataFrame(zip(years, duration), columns=["year", "value"])
    )

    # 'bar' built in period '2020' with 10-year lifetime:
    # - is constructed in 2011-01-01.
    # - by 2020-12-31, has operated 10 years.
    # - operates until 2020-12-31. This is within the period '2020'.
    # The test ensures that the correct lifetime value is retrieved,
    # i.e. the lifetime for the vintage 2020.
    scen.add_par(
        "technical_lifetime",
        pd.DataFrame(
            dict(
                node_loc="foo",
                technology="bar",
                unit="year",
                value=[20, 20, 20, 20, 20, 10, 10],
                year_vtg=years,
            ),
        ),
    )

    result = scen.years_active("foo", "bar", years[-2])

    # Correct return type
    assert isinstance(result, list)
    assert isinstance(result[0], int)

    # Years 2020
    npt.assert_array_equal(result, years[-2])