def make_dantzig(mp: Platform, solve: bool = False, quiet: bool = False) -> Scenario: """Return :class:`ixmp.Scenario` of Dantzig's canning/transport problem. Parameters ---------- mp : .Platform Platform on which to create the scenario. solve : bool, optional If :obj:`True`. then solve the scenario before returning. Default :obj:`False`. quiet : bool, optional If :obj:`True`, suppress console output when solving. Returns ------- .Scenario See also -------- .DantzigModel """ # add custom units and region for timeseries data try: mp.add_unit("USD/km") except Exception: # Unit already exists. Pending bugfix from zikolach pass mp.add_region("DantzigLand", "country") # Initialize a new Scenario, and use the DantzigModel class' initialize() # method to populate it annot = "Dantzig's transportation problem for illustration and testing" scen = Scenario( mp, **models["dantzig"], # type: ignore [arg-type] version="new", annotation=annot, scheme="dantzig", with_data=True, ) # commit the scenario scen.commit("Import Dantzig's transport problem for testing.") # set this new scenario as the default version for the model/scenario name scen.set_as_default() if solve: # Solve the model using the GAMS code provided in the `tests` folder scen.solve(model="dantzig", case="transport_standard", quiet=quiet) # add timeseries data for testing `clone(keep_solution=False)` # and `remove_solution()` scen.check_out(timeseries_only=True) scen.add_timeseries(HIST_DF, meta=True) scen.add_timeseries(INP_DF) scen.commit("Import Dantzig's transport problem for testing.") return scen
def test_multi_db_run(tmpdir): # create a new instance of the transport problem and solve it mp1 = Platform(driver="hsqldb", path=tmpdir / "mp1") scen1 = make_dantzig(mp1, solve=True) mp2 = Platform(driver="hsqldb", path=tmpdir / "mp2") # add other unit to make sure that the mapping is correct during clone mp2.add_unit("wrong_unit") mp2.add_region("wrong_region", "country") # check that cloning across platforms must copy the full solution dest = dict(platform=mp2) pytest.raises(NotImplementedError, scen1.clone, keep_solution=False, **dest) pytest.raises(NotImplementedError, scen1.clone, shift_first_model_year=1964, **dest) # clone solved model across platforms (with default settings) scen1.clone(platform=mp2, keep_solution=True) # close the db to ensure that data and solution of the clone are saved mp2.close_db() del mp2 # reopen the connection to the second platform and reload scenario _mp2 = Platform(driver="hsqldb", path=tmpdir / "mp2") scen2 = Scenario(_mp2, **SCENARIO["dantzig"]) assert_multi_db(mp1, _mp2) # check that sets, variables and parameter were copied correctly npt.assert_array_equal(scen1.set("node"), scen2.set("node")) scen2.firstmodelyear == 1963 assert_frame_equal(scen1.par("var_cost"), scen2.par("var_cost")) assert np.isclose(scen2.var("OBJ")["lvl"], 153.675) assert_frame_equal(scen1.var("ACT"), scen2.var("ACT")) # check that custom unit, region and timeseries are migrated correctly assert_frame_equal(scen2.timeseries(iamc=True), TS_DF)
def test_multi_db_run(tmpdir): # create a new instance of the transport problem and solve it mp1 = Platform(tmpdir / 'mp1', dbtype='HSQLDB') scen1 = make_dantzig(mp1, solve=True) mp2 = Platform(tmpdir / 'mp2', dbtype='HSQLDB') # add other unit to make sure that the mapping is correct during clone mp2.add_unit('wrong_unit') mp2.add_region('wrong_region', 'country') # check that cloning across platforms must copy the full solution dest = dict(platform=mp2) pytest.raises(ValueError, scen1.clone, keep_solution=False, **dest) pytest.raises(ValueError, scen1.clone, shift_first_model_year=1964, **dest) # clone solved model across platforms (with default settings) scen1.clone(platform=mp2, keep_solution=True) # close the db to ensure that data and solution of the clone are saved mp2.close_db() del mp2 # reopen the connection to the second platform and reload scenario _mp2 = Platform(tmpdir / 'mp2', dbtype='HSQLDB') scen2 = Scenario(_mp2, **models['dantzig']) assert_multi_db(mp1, _mp2) # check that sets, variables and parameter were copied correctly npt.assert_array_equal(scen1.set('node'), scen2.set('node')) scen2.firstmodelyear == 1963 pdt.assert_frame_equal(scen1.par('var_cost'), scen2.par('var_cost')) assert np.isclose(scen2.var('OBJ')['lvl'], 153.675) pdt.assert_frame_equal(scen1.var('ACT'), scen2.var('ACT')) # check that custom unit, region and timeseries are migrated correctly pdt.assert_frame_equal(scen2.timeseries(iamc=True), TS_DF)
tmp[year] = tmp[year] * ghg_conversion[s] ghg = tmp.combine_first(ghg) ghg = ghg.groupby(['Technology', 'Parameter', 'Region', 'Mode', 'Units']).sum() ghg['Species'] = 'GHG' ghg = ghg.reset_index() ap.add_tec_emi_fac(ghg) # ## Add Carbon price (in INR per MtCO2e/yr) if price_carbon: if type(price) != float: print('Please ensure that the price is specified as a float') else: unit = 'USD/MtCO2e' if unit not in mp.units(): mp.add_unit(unit, comment="Adding new unit required for emission tax") years = [ y for y in ds.set("year") if int(y) >= ds.set("cat_year", filters={"type_year": ['firstmodelyear']}) ['year'][0] ] vals = [] for y in years: if y not in scenario.set('type_year'): scenario.add_set('type_year', y) if y == '2015': val = price else: val = val * pow( scenario.par("interestrate", filters={'year': ['2015']