def test_add_spatial_multiple(test_mp): scen = Scenario(test_mp, *msg_args, version='new') data = {'country': ['Austria', 'Germany']} scen.add_spatial_sets(data) exp = ['World', 'Austria', 'Germany'] obs = scen.set('node') npt.assert_array_equal(obs, exp) exp = ['World', 'global', 'country'] obs = scen.set('lvl_spatial') npt.assert_array_equal(obs, exp) exp = [['country', 'Austria', 'World'], ['country', 'Germany', 'World']] obs = scen.set('map_spatial_hierarchy') npt.assert_array_equal(obs, exp)
def test_add_spatial_multiple(test_mp): scen = Scenario(test_mp, **SCENARIO["dantzig"], version="new") data = {"country": ["Austria", "Germany"]} scen.add_spatial_sets(data) exp = ["World", "Austria", "Germany"] obs = scen.set("node") npt.assert_array_equal(obs, exp) exp = ["World", "global", "country"] obs = scen.set("lvl_spatial") npt.assert_array_equal(obs, exp) exp = [["country", "Austria", "World"], ["country", "Germany", "World"]] obs = scen.set("map_spatial_hierarchy") npt.assert_array_equal(obs, exp)
def test_add_spatial_single(test_mp): scen = Scenario(test_mp, **SCENARIO['dantzig'], version='new') data = {'country': 'Austria'} scen.add_spatial_sets(data) exp = ['World', 'Austria'] obs = scen.set('node') npt.assert_array_equal(obs, exp) exp = ['World', 'global', 'country'] obs = scen.set('lvl_spatial') npt.assert_array_equal(obs, exp) exp = [['country', 'Austria', 'World']] obs = scen.set('map_spatial_hierarchy') npt.assert_array_equal(obs, exp)
def test_add_spatial_hierarchy(test_mp): scen = Scenario(test_mp, *msg_args, version='new') data = {'country': {'Austria': {'state': ['Vienna', 'Lower Austria']}}} scen.add_spatial_sets(data) exp = ['World', 'Vienna', 'Lower Austria', 'Austria'] obs = scen.set('node') npt.assert_array_equal(obs, exp) exp = ['World', 'global', 'state', 'country'] obs = scen.set('lvl_spatial') npt.assert_array_equal(obs, exp) exp = [ ['state', 'Vienna', 'Austria'], ['state', 'Lower Austria', 'Austria'], ['country', 'Austria', 'World'], ] obs = scen.set('map_spatial_hierarchy') npt.assert_array_equal(obs, exp)
def test_add_spatial_hierarchy(test_mp): scen = Scenario(test_mp, **SCENARIO["dantzig"], version="new") data = {"country": {"Austria": {"state": ["Vienna", "Lower Austria"]}}} scen.add_spatial_sets(data) exp = ["World", "Vienna", "Lower Austria", "Austria"] obs = scen.set("node") npt.assert_array_equal(obs, exp) exp = ["World", "global", "state", "country"] obs = scen.set("lvl_spatial") npt.assert_array_equal(obs, exp) exp = [ ["state", "Vienna", "Austria"], ["state", "Lower Austria", "Austria"], ["country", "Austria", "World"], ] obs = scen.set("map_spatial_hierarchy") npt.assert_array_equal(obs, exp)
def test_clone(tmpdir): # Two local platforms mp1 = ixmp.Platform(tmpdir / 'mp1', dbtype='HSQLDB') mp2 = ixmp.Platform(tmpdir / 'mp2', dbtype='HSQLDB') # A minimal scenario scen1 = Scenario(mp1, model='model', scenario='scenario', version='new') scen1.add_spatial_sets({'country': 'Austria'}) scen1.add_set('technology', 'bar') scen1.add_horizon({'year': [2010, 2020]}) scen1.commit('add minimal sets for testing') assert len(mp1.scenario_list(default=False)) == 1 # Clone scen2 = scen1.clone(platform=mp2) # Return type of ixmp.Scenario.clone is message_ix.Scenario assert isinstance(scen2, Scenario) # Close and re-open both databases mp1.close_db() # TODO this should be done automatically on del mp2.close_db() # TODO this should be done automatically on del del mp1, mp2 mp1 = ixmp.Platform(tmpdir / 'mp1', dbtype='HSQLDB') mp2 = ixmp.Platform(tmpdir / 'mp2', dbtype='HSQLDB') # Same scenarios present in each database assert all( mp1.scenario_list(default=False) == mp2.scenario_list(default=False)) # Load both scenarios scen1 = Scenario(mp1, 'model', 'scenario') scen2 = Scenario(mp2, 'model', 'scenario') # Contents are identical assert all(scen1.set('node') == scen2.set('node')) assert all(scen1.set('year') == scen2.set('year'))
def test_clone(tmpdir): # Two local platforms mp1 = ixmp.Platform(driver="hsqldb", path=tmpdir / "mp1") mp2 = ixmp.Platform(driver="hsqldb", path=tmpdir / "mp2") # A minimal scenario scen1 = Scenario(mp1, model="model", scenario="scenario", version="new") scen1.add_spatial_sets({"country": "Austria"}) scen1.add_set("technology", "bar") scen1.add_horizon(year=[2010, 2020]) scen1.commit("add minimal sets for testing") assert len(mp1.scenario_list(default=False)) == 1 # Clone scen2 = scen1.clone(platform=mp2) # Return type of ixmp.Scenario.clone is message_ix.Scenario assert isinstance(scen2, Scenario) # Close and re-open both databases mp1.close_db() # TODO this should be done automatically on del mp2.close_db() # TODO this should be done automatically on del del mp1, mp2 mp1 = ixmp.Platform(driver="hsqldb", path=tmpdir / "mp1") mp2 = ixmp.Platform(driver="hsqldb", path=tmpdir / "mp2") # Same scenarios present in each database assert all( mp1.scenario_list(default=False) == mp2.scenario_list(default=False)) # Load both scenarios scen1 = Scenario(mp1, "model", "scenario") scen2 = Scenario(mp2, "model", "scenario") # Contents are identical assert all(scen1.set("node") == scen2.set("node")) assert all(scen1.set("year") == scen2.set("year"))
def identify_nodes(scenario: Scenario) -> str: """Return the ID of a node codelist given the contents of `scenario`. Returns ------- str The ID of the :doc:`/pkg-data/node` containing the regions of `scenario`. Raises ------ ValueError if no codelist can be identified, or the nodes in the scenario do not match the children of the “World” node in the codelist. """ from message_ix_models.model.structure import get_codes nodes = sorted(scenario.set("node")) # Candidate ID: split e.g. "R14_AFR" to "R14" id = nodes[0].split("_")[0] try: # Get the corresponding codelist codes = get_codes(f"node/{id}") except FileNotFoundError: raise ValueError(f"Couldn't identify node codelist from {repr(nodes)}") glb_node = [n.endswith("_GLB") for n in nodes] if any(glb_node): omit = nodes.pop(glb_node.index(True)) log.info(f"Omit known, non-standard node '{omit}' from set to match") # Expected list of nodes world = codes[codes.index("World")] # type: ignore [arg-type] codes = [world] + world.child try: assert set(nodes) == set(map(str, codes)) except AssertionError: raise ValueError("\n".join([ f"Node IDs suggest codelist {repr(id)}, values do not match:", repr(nodes), repr(codes), ])) else: log.info(f"Identified node codelist {repr(id)}") return id
def test_add_horizon(test_mp, args, kwargs, exp): scen = Scenario(test_mp, **SCENARIO['dantzig'], version='new') # Call completes successfully if isinstance(args[0], dict): with pytest.warns( DeprecationWarning, match=(r"dict\(\) argument to add_horizon\(\); use year= and " "firstmodelyear=")): scen.add_horizon(*args, **kwargs) else: scen.add_horizon(*args, **kwargs) # Sets and parameters have the expected contents npt.assert_array_equal(exp["year"], scen.set("year")) npt.assert_array_equal(exp["fmy"], scen.cat("year", "firstmodelyear")) npt.assert_array_equal(exp["dp"], scen.par("duration_period")["value"])
def test_multi_db_run(tmpdir): # create a new instance of the transport problem and solve it mp1 = Platform(driver="hsqldb", path=tmpdir / "mp1") scen1 = make_dantzig(mp1, solve=True) mp2 = Platform(driver="hsqldb", path=tmpdir / "mp2") # add other unit to make sure that the mapping is correct during clone mp2.add_unit("wrong_unit") mp2.add_region("wrong_region", "country") # check that cloning across platforms must copy the full solution dest = dict(platform=mp2) pytest.raises(NotImplementedError, scen1.clone, keep_solution=False, **dest) pytest.raises(NotImplementedError, scen1.clone, shift_first_model_year=1964, **dest) # clone solved model across platforms (with default settings) scen1.clone(platform=mp2, keep_solution=True) # close the db to ensure that data and solution of the clone are saved mp2.close_db() del mp2 # reopen the connection to the second platform and reload scenario _mp2 = Platform(driver="hsqldb", path=tmpdir / "mp2") scen2 = Scenario(_mp2, **SCENARIO["dantzig"]) assert_multi_db(mp1, _mp2) # check that sets, variables and parameter were copied correctly npt.assert_array_equal(scen1.set("node"), scen2.set("node")) scen2.firstmodelyear == 1963 assert_frame_equal(scen1.par("var_cost"), scen2.par("var_cost")) assert np.isclose(scen2.var("OBJ")["lvl"], 153.675) assert_frame_equal(scen1.var("ACT"), scen2.var("ACT")) # check that custom unit, region and timeseries are migrated correctly assert_frame_equal(scen2.timeseries(iamc=True), TS_DF)
def test_multi_db_run(tmpdir): # create a new instance of the transport problem and solve it mp1 = Platform(tmpdir / 'mp1', dbtype='HSQLDB') scen1 = make_dantzig(mp1, solve=True) mp2 = Platform(tmpdir / 'mp2', dbtype='HSQLDB') # add other unit to make sure that the mapping is correct during clone mp2.add_unit('wrong_unit') mp2.add_region('wrong_region', 'country') # check that cloning across platforms must copy the full solution dest = dict(platform=mp2) pytest.raises(ValueError, scen1.clone, keep_solution=False, **dest) pytest.raises(ValueError, scen1.clone, shift_first_model_year=1964, **dest) # clone solved model across platforms (with default settings) scen1.clone(platform=mp2, keep_solution=True) # close the db to ensure that data and solution of the clone are saved mp2.close_db() del mp2 # reopen the connection to the second platform and reload scenario _mp2 = Platform(tmpdir / 'mp2', dbtype='HSQLDB') scen2 = Scenario(_mp2, **models['dantzig']) assert_multi_db(mp1, _mp2) # check that sets, variables and parameter were copied correctly npt.assert_array_equal(scen1.set('node'), scen2.set('node')) scen2.firstmodelyear == 1963 pdt.assert_frame_equal(scen1.par('var_cost'), scen2.par('var_cost')) assert np.isclose(scen2.var('OBJ')['lvl'], 153.675) pdt.assert_frame_equal(scen1.var('ACT'), scen2.var('ACT')) # check that custom unit, region and timeseries are migrated correctly pdt.assert_frame_equal(scen2.timeseries(iamc=True), TS_DF)
def model_generator( test_mp, comment, tec_time, demand_time, time_steps, com_dict, yr=2020, ): """ Generates a simple model with a few technologies, and a flexible number of time slices. Parameters ---------- comment : string Annotation for saving different scenarios and comparing their results. tec_time : dict A dictionary for mapping a technology to its input/output temporal levels. demand_time : dict A dictionary for mapping the total "demand" specified at a temporal level. time_steps : list of tuples Information about each time slice, packed in a tuple with three elements, including: "temporal_lvl", number of time slices, and the parent time slice. com_dict : dict A dictionary for specifying "input" and "output" commodities. yr : int, optional Model year. The default is 2020. """ # Building an empty scenario scen = Scenario(test_mp, "test_duration_time", comment, version="new") # Adding required sets scen.add_set("node", "fairyland") for c in com_dict.values(): scen.add_set("commodity", [x for x in list(c.values()) if x]) scen.add_set("level", "final") scen.add_set("year", yr) scen.add_set("type_year", yr) scen.add_set("technology", list(tec_time.keys())) scen.add_set("mode", "standard") # Adding "time" related info to the model: "lvl_temporal", "time", # "map_temporal_hierarchy", and "duration_time" map_time = {} for [tmp_lvl, number, parent] in time_steps: scen.add_set("lvl_temporal", tmp_lvl) if parent == "year": times = [tmp_lvl[0] + "-" + str(x + 1) for x in range(number)] else: times = [ p + "_" + tmp_lvl[0] + "-" + str(x + 1) for (p, x) in product(map_time[parent], range(number)) ] map_time[tmp_lvl] = times scen.add_set("time", times) # Adding "map_temporal_hierarchy" and "duration_time" for h in times: if parent == "year": p = "year" else: p = h.split("_" + tmp_lvl[0])[0] # Temporal hierarchy (order: temporal level, time, parent time) scen.add_set("map_temporal_hierarchy", [tmp_lvl, h, p]) # Duration time is relative to the duration of the parent temporal level dur_parent = float(scen.par("duration_time", {"time": p})["value"]) scen.add_par("duration_time", [h], dur_parent / number, "-") # Adding "demand" at a temporal level (total demand divided by the number of # time slices in that temporal level) for tmp_lvl, value in demand_time.items(): times = scen.set("map_temporal_hierarchy", {"lvl_temporal": tmp_lvl})["time"] for h in times: scen.add_par( "demand", ["fairyland", "electr", "final", yr, h], value / len(times), "GWa", ) # Adding "input" and "output" parameters of technologies for tec, [tmp_lvl_in, tmp_lvl_out] in tec_time.items(): times_in = scen.set("map_temporal_hierarchy", {"lvl_temporal": tmp_lvl_in})[ "time" ] times_out = scen.set("map_temporal_hierarchy", {"lvl_temporal": tmp_lvl_out})[ "time" ] # If technology is linking two different temporal levels if tmp_lvl_in != tmp_lvl_out: time_pairs = product(times_in, times_out) else: time_pairs = zip(times_in, times_out) # Configuring data for "time_origin" and "time" in "input" for (h_in, h_act) in time_pairs: # "input" inp = com_dict[tec]["input"] if inp: inp_spec = [yr, yr, "standard", "fairyland", inp, "final", h_act, h_in] scen.add_par("input", ["fairyland", tec] + inp_spec, 1, "-") # "output" for h in times_out: out = com_dict[tec]["output"] out_spec = [yr, yr, "standard", "fairyland", out, "final", h, h] scen.add_par("output", ["fairyland", tec] + out_spec, 1, "-") # Committing scen.commit("scenario was set up.") # Testing if the model solves in GAMS scen.solve(case=comment) # Testing if sum of "duration_time" is almost 1 for tmp_lvl in scen.set("lvl_temporal"): times = scen.set("map_temporal_hierarchy", {"lvl_temporal": tmp_lvl})[ "time" ].to_list() assert ( abs(sum(scen.par("duration_time", {"time": times})["value"]) - 1.0) < 1e-12 )
def apply_spec( scenario: Scenario, spec: Mapping[str, ScenarioInfo], data: Callable = None, **options, ): """Apply `spec` to `scenario`. Parameters ---------- spec A 'specification': :class:`dict` with 'require', 'remove', and 'add' keys and :class:`.ScenarioInfo` objects as values. data : callable, optional Function to add data to `scenario`. `data` can either manipulate the scenario directly, or return a :class:`dict` compatible with :func:`.add_par_data`. Other parameters ---------------- dry_run : bool Don't modify `scenario`; only show what would be done. Default :obj:`False`. Exceptions will still be raised if the elements from ``spec['required']`` are missing; this serves as a check that the scenario has the required features for applying the spec. fast : bool Do not remove existing parameter data; increases speed on large scenarios. quiet : bool Only show log messages at level ``ERROR`` and higher. If :obj:`False` (default), show log messages at level ``DEBUG`` and higher. message : str Commit message. See also -------- .add_par_data .strip_par_data .Code .ScenarioInfo """ dry_run = options.get("dry_run", False) log.setLevel(logging.ERROR if options.get("quiet", False) else logging.DEBUG) if not dry_run: try: scenario.remove_solution() except ValueError: pass maybe_check_out(scenario) dump: Dict[str, pd.DataFrame] = {} # Removed data for set_name in scenario.set_list(): # Check whether this set is mentioned at all in the spec if 0 == sum(map(lambda info: len(info.set[set_name]), spec.values())): # Not mentioned; don't do anything continue log.info(f"Set {repr(set_name)}") # Base contents of the set base_set = scenario.set(set_name) # Unpack a multi-dimensional/indexed set to a list of tuples base = ( list(base_set.itertuples(index=False)) if isinstance(base_set, pd.DataFrame) else base_set.tolist() ) log.info(f" {len(base)} elements") # log.debug(', '.join(map(repr, base))) # All elements; verbose # Check for required elements require = spec["require"].set[set_name] log.info(f" Check {len(require)} required elements") # Raise an exception about the first missing element missing = list(filter(lambda e: e not in base, require)) if len(missing): log.error(f" {len(missing)} elements not found: {repr(missing)}") raise ValueError # Remove elements and associated parameter values remove = spec["remove"].set[set_name] for element in remove: msg = f"{repr(element)} and associated parameter elements" if options.get("fast", False): log.info(f" Skip removing {msg} (fast=True)") continue log.info(f" Remove {msg}") strip_par_data(scenario, set_name, element, dry_run=dry_run, dump=dump) # Add elements add = [] if dry_run else spec["add"].set[set_name] for element in add: scenario.add_set( set_name, element.id if isinstance(element, Code) else element, ) if len(add): log.info(f" Add {len(add)} element(s)") log.debug(" " + ellipsize(add)) log.info(" ---") N_removed = sum(len(d) for d in dump.values()) log.info(f"{N_removed} parameter elements removed") # Add units to the Platform before adding data for unit in spec["add"].set["unit"]: unit = unit if isinstance(unit, Code) else Code(id=unit, name=unit) log.info(f"Add unit {repr(unit)}") scenario.platform.add_unit(unit.id, comment=str(unit.name)) # Add data if callable(data): result = data(scenario, dry_run=dry_run) if result: # `data` function returned some data; use add_par_data() add_par_data(scenario, result, dry_run=dry_run) # Finalize log.info("Commit results.") maybe_commit( scenario, condition=not dry_run, message=options.get("message", f"{__name__}.apply_spec()"), )