def test_import_ts(ixmp_cli, test_mp, test_data_path): # Ensure the 'canning problem'/'standard' TimeSeries exists populate_test_platform(test_mp) # Invoke the CLI to import data to version 1 of the TimeSeries result = ixmp_cli.invoke([ "--platform", test_mp.name, "--model", models["dantzig"]["model"], "--scenario", models["dantzig"]["scenario"], "--version", "1", "import", "timeseries", "--firstyear", "2020", "--lastyear", "2200", str(test_data_path / "timeseries_canning.csv"), ]) assert result.exit_code == 0, result.output # Expected data exp = pd.DataFrame.from_dict({ "region": ["World"], "variable": ["Testing"], "unit": ["???"], "year": [2020], "value": [28.3], "model": ["canning problem"], "scenario": ["standard"], }) # The specified TimeSeries version contains the expected data scen = ixmp.Scenario(test_mp, **models["dantzig"], version=1) assert_frame_equal(scen.timeseries(variable=["Testing"]), exp) # The data is not present in other versions scen = ixmp.Scenario(test_mp, **models["dantzig"], version=2) assert len(scen.timeseries(variable=["Testing"])) == 0
def test_timeseries_remove_all_data(test_mp): args_all = ('Douglas Adams', 'test_remove_all') scen = ixmp.Scenario(test_mp, *args_all, version='new', annotation='fo') scen.add_timeseries(TS_DF.pivot_table(values='value', index=cols_str)) scen.commit('importing a testing timeseries') scen = ixmp.Scenario(test_mp, *args_all) assert_timeseries(scen, TS_DF) exp = TS_DF.copy() exp['variable'] = 'Testing2' scen.check_out() scen.add_timeseries(exp) scen.remove_timeseries(TS_DF) scen.commit('testing for removing a full timeseries row') assert scen.timeseries(region='World', variable='Testing').empty assert_timeseries(scen, exp)
def test_clone(self, mp): scen = ixmp.Scenario(mp, **models['dantzig'], version=1) scen.remove_solution() scen.check_out() scen.init_set('h') scen.add_set('h', 'test') scen.commit("adding an index set 'h', with element 'test'") scen2 = scen.clone(keep_solution=False) # Cloned scenario contains added set obs = scen2.set('h') npt.assert_array_equal(obs, ['test'])
def main(ctx, dbprops, model, scenario, version): """Command interface, e.g. $ ixmp COMMAND """ # Load the indicated Platform if dbprops: mp = ixmp.Platform(dbprops) ctx.obj = dict(mp=mp) # With a Platform, load the indicated Scenario if model and scenario: scen = ixmp.Scenario(mp, model, scenario, version=version) ctx.obj['scen'] = scen
def test_clone_edit(test_mp): scen = ixmp.Scenario(test_mp, *can_args) scen2 = scen.clone(keep_solution=False) scen2.check_out() scen2.change_scalar('f', 95.0, 'USD/km') scen2.commit('change transport cost') obs = scen.scalar('f') exp = {'unit': 'USD/km', 'value': 90} assert obs == exp obs = scen2.scalar('f') exp = {'unit': 'USD/km', 'value': 95} assert obs == exp
def test_unique_meta_scenario(mp, meta): """ When setting a meta key on a specific Scenario run, setting the same key on an higher level (Model or Model+Scenario) should fail. """ scen = ixmp.Scenario(mp, **DANTZIG) scen.set_meta(meta) # add a second scenario and verify that setting+getting Meta works scen2 = ixmp.Scenario(mp, **DANTZIG, version="new") scen2.commit("save dummy scenario") scen2.set_meta(meta) assert scen2.get_meta() == scen.get_meta() expected = ( r"The meta category .* is already used at another level: " r"model canning problem, scenario standard, " ) with pytest.raises(Exception, match=expected): mp.set_meta(meta, **DANTZIG) with pytest.raises(Exception, match=expected): mp.set_meta(meta, model=DANTZIG["model"])
def test_verbose_exception(test_mp, exception_verbose_true): # Exception stack trace is logged for debugging with pytest.raises(RuntimeError) as exc_info: ixmp.Scenario(test_mp, model="foo", scenario="bar", version=-1) exc_msg = exc_info.value.args[0] assert ( "There exists no Scenario 'foo|bar' " "(version: -1) in the database!" in exc_msg ) assert "at.ac.iiasa.ixmp.database.DbDAO.getRunId" in exc_msg assert "at.ac.iiasa.ixmp.Platform.getScenario" in exc_msg
def test_range(test_mp): scen = ixmp.Scenario(test_mp, *can_args, version='new') scen.init_set('ii') ii = range(1, 20, 2) # range instance is automatically converted to list of str in add_set scen.add_set('ii', ii) scen.init_par('new_par', idx_sets='ii') # range instance is a valid key argument to add_par scen.add_par('new_par', ii, [1.2] * len(ii))
def test_read_excel_big(test_mp, tmp_path): """Excel files with model items split across sheets can be read. https://github.com/iiasa/ixmp/pull/345. """ tmp_path /= 'output.xlsx' # Write a 25-element parameter with max_row=10 → split across 3 sheets scen = ixmp.Scenario(test_mp, **models['dantzig'], version="new") add_random_model_data(scen, 25) scen.to_excel(tmp_path, items=ixmp.ItemType.MODEL, max_row=10) # Initialize target scenario for reading scen_empty = ixmp.Scenario(test_mp, "foo", "bar", version="new") scen_empty.init_set("random_set") scen_empty.init_par("random_par", scen.idx_sets("random_par"), scen.idx_names("random_par")) # File can be read scen_empty.read_excel(tmp_path) assert len(scen_empty.par("random_par")) == 25
def test_meta_partial_overwrite(mp): meta1 = { "sample_string": 3.0, "another_string": "string_value", "sample_bool": False, } meta2 = {"sample_string": 5.0, "yet_another_string": "hello", "sample_bool": True} scen = ixmp.Scenario(mp, **DANTZIG) scen.set_meta(meta1) scen.set_meta(meta2) expected = copy.copy(meta1) expected.update(meta2) obs = scen.get_meta() assert obs == expected
def test_meta_bulk(self, mp, test_dict): scen = ixmp.Scenario(mp, **models["dantzig"], version=1) scen.set_meta(test_dict) # test all obs_dict = scen.get_meta() for k, exp in test_dict.items(): obs = obs_dict[k] assert obs == exp # check updating metadata (replace and append) scen.set_meta({"test_int": 1234567, "new_attr": "new_attr"}) assert scen.get_meta("test_int") == 1234567 assert scen.get_meta("new_attr") == "new_attr"
def test_unique_meta(mp, meta): """ When setting a meta category on two distinct levels, a uniqueness error is expected. """ scenario = ixmp.Scenario(mp, **DANTZIG, version="new") scenario.commit("save dummy scenario") mp.set_meta(meta, model=DANTZIG["model"]) expected = (r"The meta category .* is already used at another level: " r"model canning problem, scenario null, version null") with pytest.raises(Exception, match=expected): mp.set_meta(meta, **DANTZIG, version=scenario.version) scen = ixmp.Scenario(mp, **DANTZIG) with pytest.raises(Exception, match=expected): scen.set_meta(meta) # changing the category value type of an entry should also raise an error meta = {"sample_entry": 3} mp.set_meta(meta, **DANTZIG) meta["sample_entry"] = "test-string" expected = (r"The meta category .* is already used at another level: " r"model canning problem, scenario standard, version null") with pytest.raises(Exception, match=expected): mp.set_meta(meta, **DANTZIG, version=scenario.version)
def test_gh_210(test_mp): scen = ixmp.Scenario(test_mp, *can_args, version='new') i = ['i0', 'i1', 'i2'] scen.init_set('i') scen.add_set('i', i) scen.init_par('foo', idx_sets='i') columns = ['i', 'value'] foo_data = pd.DataFrame(zip(i, [10, 20, 30]), columns=columns) # foo_data is not modified by add_par() scen.add_par('foo', foo_data) assert all(foo_data.columns == columns)
def test_meta_bulk(self, mp, test_dict): scen = ixmp.Scenario(mp, **models['dantzig'], version=1) scen.set_meta(test_dict) # test all obs_dict = scen.get_meta() for k, exp in test_dict.items(): obs = obs_dict[k] assert obs == exp # check updating metadata (replace and append) scen.set_meta({'test_int': 1234567, 'new_attr': 'new_attr'}) assert scen.get_meta('test_int') == 1234567 assert scen.get_meta('new_attr') == 'new_attr'
def test_reporting_filters(test_mp, tmp_path): """Reporting can be filtered ex ante.""" scen = ixmp.Scenario(test_mp, 'Reporting filters', 'Reporting filters', 'new') t, t_foo, t_bar, x = add_test_data(scen) rep = Reporter.from_scenario(scen) x_key = rep.full_key('x') def assert_t_indices(labels): assert set(rep.get(x_key).index.levels[0]) == set(labels) # 1. Set filters directly rep.graph['filters'] = {'t': t_foo} assert_t_indices(t_foo) # Reporter can be re-used by changing filters rep.graph['filters'] = {'t': t_bar} assert_t_indices(t_bar) rep.graph['filters'] = {} assert_t_indices(t) # 2. Set filters using a convenience method rep = Reporter.from_scenario(scen) rep.set_filters(t=t_foo) assert_t_indices(t_foo) # Clear filters using the convenience method rep.set_filters(t=None) assert_t_indices(t) # 3. Set filters via configuration keys # NB passes through from_scenario() -> __init__() -> configure() rep = Reporter.from_scenario(scen, filters={'t': t_foo}) assert_t_indices(t_foo) # Configuration key can also be read from file rep = Reporter.from_scenario(scen) # Write a temporary file containing the desired labels config_file = tmp_path / 'config.yaml' config_file.write_text('\n'.join([ 'filters:', ' t: {!r}'.format(t_bar), ])) rep.configure(config_file) assert_t_indices(t_bar)
def test_reporting_aggregate(test_mp): scen = ixmp.Scenario(test_mp, 'Group reporting', 'group reporting', 'new') t, t_foo, t_bar, x = add_test_data(scen) # Reporter rep = Reporter.from_scenario(scen) # Define some groups t_groups = {'foo': t_foo, 'bar': t_bar, 'baz': ['foo1', 'bar5', 'bar6']} # Add aggregates key1 = rep.aggregate('x:t-y', 'agg1', {'t': t_groups}, keep=True) # Group has expected key and contents assert key1 == 'x:t-y:agg1' # Aggregate is computed without error agg1 = rep.get(key1) # Expected set of keys along the aggregated dimension assert set(agg1.coords['t'].values) == set(t) | set(t_groups.keys()) # Sums are as expected # TODO: the check_dtype arg assumes Quantity backend is a AttrSeries, # should that be made default in assert_qty_allclose? assert_qty_allclose(agg1.sel(t='foo', drop=True), x.sel(t=t_foo).sum('t'), check_dtype=False) assert_qty_allclose(agg1.sel(t='bar', drop=True), x.sel(t=t_bar).sum('t'), check_dtype=False) assert_qty_allclose(agg1.sel(t='baz', drop=True), x.sel(t=['foo1', 'bar5', 'bar6']).sum('t'), check_dtype=False) # Add aggregates, without keeping originals key2 = rep.aggregate('x:t-y', 'agg2', {'t': t_groups}, keep=False) # Distinct keys assert key2 != key1 # Only the aggregated and no original keys along the aggregated dimension agg2 = rep.get(key2) assert set(agg2.coords['t'].values) == set(t_groups.keys()) with pytest.raises(NotImplementedError): # Not yet supported; requires two separate operations rep.aggregate('x:t-y', 'agg3', {'t': t_groups, 'y': [2000, 2010]})
def test_reporter_add_product(test_mp): scen = ixmp.Scenario(test_mp, 'reporter_add_product', 'reporter_add_product', 'new') *_, x = add_test_data(scen) rep = Reporter.from_scenario(scen) # add_product() works key = rep.add_product('x squared', 'x', 'x', sums=True) # Product has the expected dimensions assert key == 'x squared:t-y' # Product has the expected value exp = as_quantity(x * x) exp.attrs['_unit'] = UNITS('kilogram ** 2').units assert_qty_equal(exp, rep.get(key))
def test_init_set(test_mp): """Test ixmp.Scenario.init_set().""" scen = ixmp.Scenario(test_mp, *can_args) # Add set on a locked scenario with pytest.raises(RuntimeError, match="This Scenario cannot be edited, do a checkout " "first!"): scen.init_set('foo') scen = scen.clone(keep_solution=False) scen.check_out() scen.init_set('foo') # Initialize an already-existing set with pytest.raises(ValueError, match="'foo' already exists"): scen.init_set('foo')
def test_initialize(test_mp): # Expected numbers of items by type exp = defaultdict(list) for name, spec in MESSAGE_ITEMS.items(): exp[spec["ix_type"]].append(name) # Use ixmp.Scenario to avoid invoking ixmp_source/Java code that # automatically populates empty scenarios s = ixmp.Scenario(test_mp, "test_initialize", "test_initialize", version="new") # Initialization succeeds on a totally empty scenario MESSAGE.initialize(s) # The expected items exist for ix_type, exp_names in exp.items(): obs_names = getattr(s, f"{ix_type}_list")() assert sorted(obs_names) == sorted(exp_names)
def test_aggregate(test_mp): scen = ixmp.Scenario(test_mp, 'Group reporting', 'group reporting', 'new') t, t_foo, t_bar, x = add_test_data(scen) # Reporter rep = Reporter.from_scenario(scen) # Define some groups t_groups = {'foo': t_foo, 'bar': t_bar, 'baz': ['foo1', 'bar5', 'bar6']} # Use the computation directly agg1 = computations.aggregate(as_quantity(x), {'t': t_groups}, True) # Expected set of keys along the aggregated dimension assert set(agg1.coords['t'].values) == set(t) | set(t_groups.keys()) # Sums are as expected assert_qty_allclose(agg1.sel(t='foo', drop=True), x.sel(t=t_foo).sum('t')) assert_qty_allclose(agg1.sel(t='bar', drop=True), x.sel(t=t_bar).sum('t')) assert_qty_allclose(agg1.sel(t='baz', drop=True), x.sel(t=['foo1', 'bar5', 'bar6']).sum('t')) # Use Reporter convenience method key2 = rep.aggregate('x:t-y', 'agg2', {'t': t_groups}, keep=True) # Group has expected key and contents assert key2 == 'x:t-y:agg2' # Aggregate is computed without error agg2 = rep.get(key2) assert_qty_equal(agg1, agg2) # Add aggregates, without keeping originals key3 = rep.aggregate('x:t-y', 'agg3', {'t': t_groups}, keep=False) # Distinct keys assert key3 != key2 # Only the aggregated and no original keys along the aggregated dimension agg3 = rep.get(key3) assert set(agg3.coords['t'].values) == set(t_groups.keys()) with pytest.raises(NotImplementedError): # Not yet supported; requires two separate operations rep.aggregate('x:t-y', 'agg3', {'t': t_groups, 'y': [2000, 2010]})
def test_scenario_delete_meta_warning(mp): """ Scenario.delete_meta works but raises a deprecation warning. This test can be removed once Scenario.delete_meta is removed. """ scen = ixmp.Scenario(mp, **DANTZIG) meta = {"sample_int": 3, "sample_string": "string_value"} remove_key = "sample_string" scen.set_meta(meta) with pytest.warns(DeprecationWarning): scen.delete_meta(remove_key) expected = copy.copy(meta) del expected[remove_key] obs = scen.get_meta() assert obs == expected
def test_meta_partial_overwrite(mp): meta1 = { 'sample_string': 3.0, 'another_string': 'string_value', 'sample_bool': False } meta2 = { 'sample_string': 5.0, 'yet_another_string': 'hello', 'sample_bool': True } scen = ixmp.Scenario(mp, **DANTZIG) scen.set_meta(meta1) scen.set_meta(meta2) expected = copy.copy(meta1) expected.update(meta2) obs = scen.get_meta() assert obs == expected
def test_init_set(test_mp): """Test ixmp.Scenario.init_set().""" scen = ixmp.Scenario(test_mp, *can_args) # Add set on a locked scenario with pytest.raises(jpype.JException, match="This Scenario cannot be edited, do a checkout " "first!"): scen.init_set('foo') scen = scen.clone(keep_solution=False) scen.check_out() scen.init_set('foo') # Initialize an already-existing set with pytest.raises(jpype.JException, match="An Item with the name 'foo' already exists!"): scen.init_set('foo')
def test_import_timeseries(test_mp_props, test_data_path): fname = test_data_path / 'timeseries_canning.csv' cmd = ('import-timeseries --dbprops="{}" --data="{}" --model="{}" ' '--scenario="{}" --version="{}" --firstyear="{}"').format( test_mp_props, fname, 'canning problem', 'standard', 1, 2020) win = os.name == 'nt' subprocess.check_call(cmd, shell=not win) mp = ix.Platform(test_mp_props) scen = ix.Scenario(mp, 'canning problem', 'standard', 1) obs = scen.timeseries() df = {'region': ['World'], 'variable': ['Testing'], 'unit': ['???'], 'year': [2020], 'value': [28.3]} exp = pd.DataFrame.from_dict(df) cols_str = ['region', 'variable', 'unit', 'year'] npt.assert_array_equal(exp[cols_str], obs[cols_str]) npt.assert_array_almost_equal(exp['value'], obs['value'])
def test_load_scenario_data(self, mp): """load_scenario_data() caches all data.""" scen = ixmp.Scenario(mp, **models["dantzig"]) scen.load_scenario_data() cache_key = scen.platform._backend._cache_key(scen, "par", "d") # Item exists in cache assert cache_key in scen.platform._backend._cache # Cache has not been used hits_before = scen.platform._backend._cache_hit.get(cache_key, 0) assert hits_before == 0 # Retrieving the expected value assert "km" == scen.par("d", filters={"i": ["seattle"]}).loc[0, "unit"] # Cache was used to return the value hits_after = scen.platform._backend._cache_hit[cache_key] assert hits_after == hits_before + 1
def test_load_scenario_data(test_mp): """load_scenario_data() caches all data.""" scen = ixmp.Scenario(test_mp, *can_args) scen.load_scenario_data() cache_key = scen.platform._backend._cache_key(scen, 'par', 'd') # Item exists in cache assert cache_key in scen.platform._backend._cache # Cache has not been used hits_before = scen.platform._backend._cache_hit.get(cache_key, 0) assert hits_before == 0 # Retrieving the expected value assert 'km' == scen.par('d', filters={'i': ['seattle']}).loc[0, 'unit'] # Cache was used to return the value hits_after = scen.platform._backend._cache_hit[cache_key] assert hits_after == hits_before + 1
def test_set_and_remove_meta_scenario(mp): """ Test partial overwriting and meta deletion on scenario level. """ meta1 = {"sample_string": 3.0, "another_string": "string_value"} meta2 = {"sample_string": 5.0, "yet_another_string": "hello"} remove_key = "another_string" scen = ixmp.Scenario(mp, **DANTZIG) scen.set_meta(meta1) scen.set_meta(meta2) expected = copy.copy(meta1) expected.update(meta2) obs = scen.get_meta() assert expected == obs scen.remove_meta(remove_key) del expected[remove_key] obs = scen.get_meta() assert obs == expected
def test_excel_io_filters(ixmp_cli, test_mp, tmp_path): populate_test_platform(test_mp) tmp_path /= "dantzig.xlsx" url = ( f"ixmp://{test_mp.name}/{models['dantzig']['model']}/" f"{models['dantzig']['scenario']}" ) # Invoke the CLI to export data to Excel, with filters cmd = [ "--url", url, "export", str(tmp_path), "--", "i=seattle", ] result = ixmp_cli.invoke(cmd) assert result.exit_code == 0, result.output # Import into a new model name url = f"ixmp://{test_mp.name}/foo model/bar scenario#new" cmd = [ "--url", url, "import", "scenario", "--init-items", str(tmp_path), ] result = ixmp_cli.invoke(cmd) assert result.exit_code == 0, result.output # Load one of the imported parameters scen = ixmp.Scenario(test_mp, "foo model", "bar scenario") d = scen.par("d") # Data in (imported from) file has only filtered elements assert set(d["i"].unique()) == {"seattle"} assert len(d) == 3
def test_weakref(): """Weak references allow Platforms to be del'd while Scenarios live.""" mp = ixmp.Platform( backend='jdbc', driver='hsqldb', url='jdbc:hsqldb:mem:test_weakref', ) # There is one reference to the Platform, and zero weak references assert getrefcount(mp) - 1 == 1 assert getweakrefcount(mp) == 0 # Create a single Scenario s = ixmp.Scenario(mp, 'foo', 'bar', version='new') # Still one reference to the Platform assert getrefcount(mp) - 1 == 1 # …but additionally one weak reference assert getweakrefcount(mp) == 1 # Make a local reference to the backend backend = mp._backend # Delete the Platform. Note that this only has an effect if there are no # existing references to it del mp # s.platform is a dead weak reference, so it can't be accessed with pytest.raises(ReferenceError): s.platform._backend # There is only one remaining reference to the backend: the *backend* name # in the local scope assert getrefcount(backend) - 1 == 1 # The backend is garbage-collected at this point # The Scenario object still lives, but can't be used for anything assert s.model == 'foo'
def test_multi_db_run(tmpdir, test_data_path): # create a new instance of the transport problem and solve it mp1 = ixmp.Platform(backend='jdbc', driver='hsqldb', path=tmpdir / 'mp1') scen1 = make_dantzig(mp1, solve=test_data_path) mp2 = ixmp.Platform(backend='jdbc', driver='hsqldb', path=tmpdir / 'mp2') # add other unit to make sure that the mapping is correct during clone mp2.add_unit('wrong_unit') mp2.add_region('wrong_region', 'country') # check that cloning across platforms must copy the full solution pytest.raises(NotImplementedError, scen1.clone, platform=mp2, keep_solution=False) # clone solved model across platforms (with default settings) scen1.clone(platform=mp2, keep_solution=True) # close the db to ensure that data and solution of the clone are saved mp2.close_db() del mp2 # reopen the connection to the second platform and reload scenario _mp2 = ixmp.Platform(backend='jdbc', driver='hsqldb', path=tmpdir / 'mp2') assert_multi_db(mp1, _mp2) scen2 = ixmp.Scenario(_mp2, **models['dantzig']) # check that sets, variables and parameter were copied correctly assert_array_equal(scen1.set('i'), scen2.set('i')) assert_frame_equal(scen1.par('d'), scen2.par('d')) assert np.isclose(scen2.var('z')['lvl'], 153.675) assert_frame_equal(scen1.var('x'), scen2.var('x')) # check that custom unit, region and timeseries are migrated correctly assert scen2.par('f')['value'] == 90.0 assert scen2.par('f')['unit'] == 'USD_per_km' assert_frame_equal(scen2.timeseries(iamc=True), TS_DF)