def test_add(data, operands, size): scen, rep, t, t_foo, t_bar, x = data y = scen.set("y").tolist() x = rep.get("x:t-y") a = Quantity( xr.DataArray( np.random.rand(len(t_foo), len(y)), coords=[t_foo, y], dims=['t', 'y'] ), units=x.attrs['_unit'], ) b = Quantity( xr.DataArray( np.random.rand(len(t_bar), len(y)), coords=[t_bar, y], dims=['t', 'y'] ), units=x.attrs['_unit'], ) rep.add("a:t-y", a) rep.add("b:t-y", b) key = rep.add( "result", tuple([computations.add] + [f"{name}:t-y" for name in operands]) ) result = rep.get(key) assert size == result.size, result.to_series()
def add_test_data(scen): # New sets t_foo = ['foo{}'.format(i) for i in (1, 2, 3)] t_bar = ['bar{}'.format(i) for i in (4, 5, 6)] t = t_foo + t_bar y = list(map(str, range(2000, 2051, 10))) # Add to scenario scen.init_set('t') scen.add_set('t', t) scen.init_set('y') scen.add_set('y', y) # Data ureg = pint.get_application_registry() x = xr.DataArray(np.random.rand(len(t), len(y)), coords=[t, y], dims=['t', 'y'], attrs={'_unit': ureg.Unit('kg')}) x = Quantity(x) # As a pd.DataFrame with units x_df = x.to_series().rename('value').reset_index() x_df['unit'] = 'kg' scen.init_par('x', ['t', 'y']) scen.add_par('x', x_df) return t, t_foo, t_bar, x
def test_plot_cumulative(tmp_path): x = pd.Series( { ("region", "a"): 500, ("region", "b"): 1000, } ) x.index.names = ["n", "g"] y = pd.Series( { ("region", "a", 2020): 1.1, ("region", "b", 2020): 2.2, ("region", "a", 2021): 3.3, ("region", "b", 2021): 4.4, } ) y.index.names = ["n", "g", "y"] result = computations.plot_cumulative( Quantity(x, units="GW a"), Quantity(y, units="mole / kW a"), labels=("Fossil supply", "Resource volume", "Cost"), ) assert isinstance(result, matplotlib.axes.Axes) matplotlib.pyplot.savefig(tmp_path / "plot_cumulative.svg")
def add_test_data(scen: Scenario): # New sets t_foo = ["foo{}".format(i) for i in (1, 2, 3)] t_bar = ["bar{}".format(i) for i in (4, 5, 6)] t = t_foo + t_bar y = list(map(str, range(2000, 2051, 10))) # Add to scenario scen.init_set("t") scen.add_set("t", t) scen.init_set("y") scen.add_set("y", y) # Data ureg = pint.get_application_registry() x = Quantity( xr.DataArray(np.random.rand(len(t), len(y)), coords=[("t", t), ("y", y)]), units=ureg.kg, ) # As a pd.DataFrame with units x_df = x.to_series().rename("value").reset_index() x_df["unit"] = "kg" scen.init_par("x", ["t", "y"]) scen.add_par("x", x_df) return t, t_foo, t_bar, x
def test_product0(): A = Quantity( xr.DataArray([1, 2], coords=[["a0", "a1"]], dims=["a"]) ) B = Quantity( xr.DataArray([3, 4], coords=[["b0", "b1"]], dims=["b"]) ) exp = Quantity( xr.DataArray( [[3, 4], [6, 8]], coords=[["a0", "a1"], ["b0", "b1"]], dims=["a", "b"], ), units="1", ) assert_qty_equal(exp, computations.product(A, B)) computations.product(exp, B)
def test_reporter_add_product(test_mp, ureg): scen = ixmp.Scenario(test_mp, 'reporter_add_product', 'reporter_add_product', 'new') *_, x = add_test_data(scen) rep = Reporter.from_scenario(scen) # add_product() works key = rep.add_product('x squared', 'x', 'x', sums=True) # Product has the expected dimensions assert key == 'x squared:t-y' # Product has the expected value exp = Quantity(x * x, name='x') exp.attrs['_unit'] = ureg('kilogram ** 2').units assert_qty_equal(exp, rep.get(key)) # add('product', ...) works key = rep.add('product', 'x_squared', 'x', 'x', sums=True)
def test_stacked_bar(): data = pd.Series({ ("region", "foo", 2020): 1.0, ("region", "bar", 2020): 2.0, ("region", "foo", 2021): 3.0, ("region", "bar", 2021): 4.0, }) data.index.names = ["r", "t", "year"] result = computations.stacked_bar(Quantity(data), dims=["r", "t", "year"]) assert isinstance(result, matplotlib.axes.Axes)
def test_units(ureg): """Test handling of units within Reporter computations.""" r = Reporter() # Create some dummy data dims = dict(coords=['a b c'.split()], dims=['x']) r.add('energy:x', Quantity(xr.DataArray([1., 3, 8], **dims), units='MJ')) r.add('time', Quantity(xr.DataArray([5., 6, 8], **dims), units='hour')) r.add('efficiency', Quantity(xr.DataArray([0.9, 0.8, 0.95], **dims))) # Aggregation preserves units r.add('energy', (computations.sum, 'energy:x', None, ['x'])) assert r.get('energy').attrs['_unit'] == ureg.parse_units('MJ') # Units are derived for a ratio of two quantities r.add('power', (computations.ratio, 'energy:x', 'time')) assert r.get('power').attrs['_unit'] == ureg.parse_units('MJ/hour') # Product of dimensioned and dimensionless quantities keeps the former r.add('energy2', (computations.product, 'energy:x', 'efficiency')) assert r.get('energy2').attrs['_unit'] == ureg.parse_units('MJ')
def test_reporter(message_test_mp): scen = Scenario(message_test_mp, **SCENARIO["dantzig"]) # Varies between local & CI contexts # DEBUG may be due to reuse of test_mp in a non-deterministic order if not scen.has_solution(): scen.solve() # IXMPReporter can be initialized on a MESSAGE Scenario rep_ix = ixmp_Reporter.from_scenario(scen) # message_ix.Reporter can also be initialized rep = Reporter.from_scenario(scen) # Number of quantities available in a rudimentary MESSAGEix Scenario assert len(rep.graph["all"]) == 123 # Quantities have short dimension names assert "demand:n-c-l-y-h" in rep.graph # Aggregates are available assert "demand:n-l-h" in rep.graph # Quantities contain expected data dims = dict(coords=["chicago new-york topeka".split()], dims=["n"]) demand = Quantity(xr.DataArray([300, 325, 275], **dims), name="demand") # NB the call to squeeze() drops the length-1 dimensions c-l-y-h obs = rep.get("demand:n-c-l-y-h").squeeze(drop=True) # check_attrs False because we don't get the unit addition in bare xarray assert_qty_equal(obs, demand, check_attrs=False) # ixmp.Reporter pre-populated with only model quantities and aggregates assert len(rep_ix.graph) == 5223 # message_ix.Reporter pre-populated with additional, derived quantities # This is the same value as in test_tutorials.py assert len(rep.graph) == 12688 # Derived quantities have expected dimensions vom_key = rep.full_key("vom") assert vom_key not in rep_ix assert vom_key == "vom:nl-t-yv-ya-m-h" # …and expected values var_cost = rep.get(rep.full_key("var_cost")) ACT = rep.get(rep.full_key("ACT")) vom = computations.product(var_cost, ACT) # check_attrs false because `vom` multiply above does not add units assert_qty_equal(vom, rep.get(vom_key))
def test_aggregate(test_mp): scen = ixmp.Scenario(test_mp, 'Group reporting', 'group reporting', 'new') t, t_foo, t_bar, x = add_test_data(scen) # Reporter rep = Reporter.from_scenario(scen) # Define some groups t_groups = {'foo': t_foo, 'bar': t_bar, 'baz': ['foo1', 'bar5', 'bar6']} # Use the computation directly agg1 = computations.aggregate(Quantity(x), {'t': t_groups}, True) # Expected set of keys along the aggregated dimension assert set(agg1.coords['t'].values) == set(t) | set(t_groups.keys()) # Sums are as expected assert_qty_allclose(agg1.sel(t='foo', drop=True), x.sel(t=t_foo).sum('t')) assert_qty_allclose(agg1.sel(t='bar', drop=True), x.sel(t=t_bar).sum('t')) assert_qty_allclose(agg1.sel(t='baz', drop=True), x.sel(t=['foo1', 'bar5', 'bar6']).sum('t')) # Use Reporter convenience method key2 = rep.aggregate('x:t-y', 'agg2', {'t': t_groups}, keep=True) # Group has expected key and contents assert key2 == 'x:t-y:agg2' # Aggregate is computed without error agg2 = rep.get(key2) assert_qty_equal(agg1, agg2) # Add aggregates, without keeping originals key3 = rep.aggregate('x:t-y', 'agg3', {'t': t_groups}, keep=False) # Distinct keys assert key3 != key2 # Only the aggregated and no original keys along the aggregated dimension agg3 = rep.get(key3) assert set(agg3.coords['t'].values) == set(t_groups.keys()) with pytest.raises(NotImplementedError): # Not yet supported; requires two separate operations rep.aggregate('x:t-y', 'agg3', {'t': t_groups, 'y': [2000, 2010]})
def test_assert(self, a): """Test assertions about Quantity. These are tests without `attr` property, in which case direct pd.Series and xr.DataArray comparisons are possible. """ # Convert to pd.Series b = a.to_series() assert_qty_equal(a, b, check_type=False) assert_qty_equal(b, a, check_type=False) assert_qty_allclose(a, b, check_type=False) assert_qty_allclose(b, a, check_type=False) c = Quantity(a) assert_qty_equal(a, c, check_type=True) assert_qty_equal(c, a, check_type=True) assert_qty_allclose(a, c, check_type=True) assert_qty_allclose(c, a, check_type=True)
def test_select(data): # Unpack *_, t_foo, t_bar, x = data x = Quantity(x) assert x.size == 6 * 6 # Selection with inverse=False indexers = {'t': t_foo[0:1] + t_bar[0:1]} result_0 = computations.select(x, indexers=indexers) assert result_0.size == 2 * 6 # Single indexer along one dimension results in 1D data indexers['y'] = '2010' result_1 = computations.select(x, indexers=indexers) assert result_1.size == 2 * 1 # Selection with inverse=True result_2 = computations.select(x, indexers=indexers, inverse=True) assert result_2.size == 4 * 5
def test_assert_with_attrs(self, a): """Test assertions about Quantity with attrs. Here direct pd.Series and xr.DataArray comparisons are *not* possible. """ attrs = {'foo': 'bar'} a.attrs = attrs b = Quantity(a) # make sure it has the correct property assert a.attrs == attrs assert b.attrs == attrs assert_qty_equal(a, b) assert_qty_equal(b, a) assert_qty_allclose(a, b) assert_qty_allclose(b, a) # check_attrs=False allows a successful equals assertion even when the # attrs are different a.attrs = {'bar': 'foo'} assert_qty_equal(a, b, check_attrs=False)
def test_file_formats(test_data_path, tmp_path): r = Reporter() expected = Quantity(pd.read_csv(test_data_path / 'report-input0.csv', index_col=['i', 'j'])['value'], units='km') # CSV file is automatically parsed to xr.DataArray p1 = test_data_path / 'report-input0.csv' k = r.add_file(p1, units=pint.Unit('km')) assert_qty_equal(r.get(k), expected) # Dimensions can be specified p2 = test_data_path / 'report-input1.csv' k2 = r.add_file(p2, dims=dict(i='i', j_dim='j')) assert_qty_equal(r.get(k), r.get(k2)) # Units are loaded from a column assert r.get(k2).attrs['_unit'] == pint.Unit('km') # Specifying units that do not match file contents → ComputationError r.add_file(p2, key='bad', dims=dict(i='i', j_dim='j'), units='kg') with pytest.raises(ComputationError): r.get('bad') # Write to CSV p3 = tmp_path / 'report-output.csv' r.write(k, p3) # Output is identical to input file, except for order assert (sorted(p1.read_text().split('\n')) == sorted( p3.read_text().split('\n'))) # Write to Excel p4 = tmp_path / 'report-output.xlsx' r.write(k, p4)
def a(self): da = xr.DataArray([0.8, 0.2], coords=[['oil', 'water']], dims=['p']) yield Quantity(da)
def test_reporter_from_dantzig(test_mp, ureg): scen = make_dantzig(test_mp, solve=True) # Reporter.from_scenario can handle the Dantzig problem rep = Reporter.from_scenario(scen) # Partial sums are available automatically (d is defined over i and j) d_i = rep.get('d:i') # Units pass through summation assert d_i.attrs['_unit'] == ureg.parse_units('km') # Summation across all dimensions results a 1-element Quantity d = rep.get('d:') assert d.shape == ((1, ) if Quantity.CLASS == 'AttrSeries' else tuple()) assert d.size == 1 assert np.isclose(d.values, 11.7) # Weighted sum weights = Quantity( xr.DataArray([1, 2, 3], coords=['chicago new-york topeka'.split()], dims=['j'])) new_key = rep.aggregate('d:i-j', 'weighted', 'j', weights) # ...produces the expected new key with the summed dimension removed and # tag added assert new_key == 'd:i:weighted' # ...produces the expected new value obs = rep.get(new_key) d_ij = rep.get('d:i-j') exp = Quantity( (d_ij * weights).sum(dim=['j']) / weights.sum(dim=['j']), attrs=d_ij.attrs, ) assert_qty_equal(exp, obs) # Disaggregation with explicit data # (cases of canned food 'p'acked in oil or water) shares = xr.DataArray([0.8, 0.2], coords=[['oil', 'water']], dims=['p']) new_key = rep.disaggregate('b:j', 'p', args=[Quantity(shares)]) # ...produces the expected key with new dimension added assert new_key == 'b:j-p' b_jp = rep.get('b:j-p') # Units pass through disaggregation assert b_jp.attrs['_unit'] == 'cases' # Set elements are available assert rep.get('j') == ['new-york', 'chicago', 'topeka'] # 'all' key retrieves all quantities obs = {da.name for da in rep.get('all')} exp = set(('a b d f x z cost cost-margin demand demand-margin supply ' 'supply-margin').split()) assert obs == exp # Shorthand for retrieving a full key name assert rep.full_key('d') == 'd:i-j' and isinstance(rep.full_key('d'), Key)