Ejemplo n.º 1
0
def test_cli(ixmp_cli, test_mp, test_data_path):
    # Put something in the database
    make_dantzig(test_mp)
    test_mp.close_db()

    platform_name = test_mp.name

    # Delete the platform/close the database connection
    del test_mp

    cmd = ['--platform', platform_name,
           '--model', 'canning problem',
           '--scenario', 'standard',
           'report',
           '--config', str(test_data_path / 'report-config-0.yaml'),
           'd_check',
           ]

    # 'report' command runs
    result = ixmp_cli.invoke(cmd)
    assert result.exit_code == 0

    # TODO warning should be logged

    # Reporting produces the expected command-line output
    assert result.output.endswith("""<xarray.DataArray 'value' (i: 2, j: 3)>
array([[1.8, 2.5, 1.4],
       [1.7, 2.5, 1.8]])
Coordinates:
  * i        (i) object 'san-diego' 'seattle'
  * j        (j) object 'chicago' 'new-york' 'topeka'
""")
Ejemplo n.º 2
0
def test_format_scenario_list(test_mp):
    make_dantzig(test_mp)

    exp = [
        '',
        'Douglas Adams/',
        '  Hitchhiker#1',
        '',
        'canning problem/',
        '  standard#3  1–3',
        '',
        '2 model name(s)',
        '2 scenario name(s)',
        '2 (model, scenario) combination(s)',
        '4 total scenarios',
    ]

    # Expected results
    assert exp == utils.format_scenario_list(test_mp)

    # With as_url=True
    exp = list(
        map(lambda s: s.format(test_mp.name), [
            'ixmp://{}/Douglas Adams/Hitchhiker#1',
            'ixmp://{}/canning problem/standard#3',
        ]))
    assert exp == utils.format_scenario_list(test_mp, as_url=True)
Ejemplo n.º 3
0
def test_reporting_cli(test_mp_props, test_data_path):
    # Put something in the database
    mp = ixmp.Platform(dbprops=test_mp_props)
    make_dantzig(mp)
    mp.close_db()
    del mp

    cmd = [
        'ixmp',
        '--dbprops',
        str(test_mp_props),
        '--model',
        'canning problem',
        '--scenario',
        'standard',
        'report',
        '--config',
        str(test_data_path / 'report-config-0.yaml'),
        '--default',
        'd_check',
    ]
    out = subprocess.check_output(cmd, encoding='utf-8')

    # Reporting produces the expected command-line output
    assert out.endswith("""
<xarray.DataArray 'value' (i: 2, j: 3)>
array([[1.8, 2.5, 1.4],
       [1.7, 2.5, 1.8]])
Coordinates:
  * i        (i) object 'san-diego' 'seattle'
  * j        (j) object 'chicago' 'new-york' 'topeka'
""")
Ejemplo n.º 4
0
def test_diff_data(test_mp):
    """diff() when Scenarios contain the same items, but different data."""
    scen_a = make_dantzig(test_mp)
    scen_b = make_dantzig(test_mp)

    # Modify `scen_a` and `scen_b`
    scen_a.check_out()
    scen_b.check_out()

    # Remove elements from "b"
    drop_args = dict(labels=["value", "unit"], axis=1)
    scen_a.remove_par("b", scen_a.par("b").iloc[0:1, :].drop(**drop_args))
    scen_b.remove_par("b", scen_b.par("b").iloc[1:2, :].drop(**drop_args))
    # Remove elements from "d"
    scen_a.remove_par("d", scen_a.par("d").query("i == 'san-diego'").drop(**drop_args))
    # Modify values in "d"
    scen_b.add_par("d", scen_b.par("d").query("i == 'seattle'").assign(value=123.4))

    # Expected results
    exp_b = pd.DataFrame(
        [
            ["chicago", 300.0, "cases", np.NaN, None, "left_only"],
            ["new-york", np.NaN, None, 325.0, "cases", "right_only"],
            ["topeka", 275.0, "cases", 275.0, "cases", "both"],
        ],
        columns="j value_a unit_a value_b unit_b _merge".split(),
    )
    exp_d = pd.DataFrame(
        [
            ["san-diego", "chicago", np.NaN, None, 1.8, "km", "right_only"],
            ["san-diego", "new-york", np.NaN, None, 2.5, "km", "right_only"],
            ["san-diego", "topeka", np.NaN, None, 1.4, "km", "right_only"],
            ["seattle", "chicago", 1.7, "km", 123.4, "km", "both"],
            ["seattle", "new-york", 2.5, "km", 123.4, "km", "both"],
            ["seattle", "topeka", 1.8, "km", 123.4, "km", "both"],
        ],
        columns="i j value_a unit_a value_b unit_b _merge".split(),
    )

    # Use the specific categorical produced by pd.merge()
    merge_cat = pd.CategoricalDtype(["left_only", "right_only", "both"])
    exp_b = exp_b.astype(dict(_merge=merge_cat))
    exp_d = exp_d.astype(dict(_merge=merge_cat))

    # Compare different scenarios without filters
    for name, df in utils.diff(scen_a, scen_b):
        if name == "b":
            pdt.assert_frame_equal(exp_b, df)
        elif name == "d":
            pdt.assert_frame_equal(exp_d, df)

    # Compare different scenarios with filters
    iterator = utils.diff(scen_a, scen_b, filters=dict(j=["chicago"]))
    for name, df in iterator:
        # Same as above, except only the filtered rows should appear
        if name == "b":
            pdt.assert_frame_equal(exp_b.iloc[0:1, :], df)
        elif name == "d":
            pdt.assert_frame_equal(exp_d.iloc[[0, 3], :].reset_index(drop=True), df)
Ejemplo n.º 5
0
def test_diff_identical(test_mp):
    """diff() of identical Scenarios."""
    scen_a = make_dantzig(test_mp)
    scen_b = make_dantzig(test_mp)

    # Compare identical scenarios: produces data of same length
    for name, df in utils.diff(scen_a, scen_b):
        data_a = utils.maybe_convert_scalar(scen_a.par(name))
        assert len(data_a) == len(df)

    # Compare identical scenarios, with filters
    iterator = utils.diff(scen_a, scen_b, filters=dict(i=["seattle"]))
    for (name, df), (exp_name, N) in zip(iterator, [("a", 1), ("d", 3)]):
        assert exp_name == name and len(df) == N
Ejemplo n.º 6
0
def test_reporter_describe(test_mp, test_data_path, capsys):
    scen = make_dantzig(test_mp)
    r = Reporter.from_scenario(scen)

    # hexadecimal ID of *scen*
    id_ = hex(id(scen)) if os.name != 'nt' else \
        '{:#018X}'.format(id(scen)).replace('X', 'x')

    # Describe one key
    desc1 = """'d:i':
- sum(dimensions=['j'], weights=None, ...)
- 'd:i-j':
  - data_for_quantity('par', 'd', 'value', ...)
  - 'scenario':
    - <ixmp.core.Scenario object at {id}>
  - 'filters':
    - {{}}""".format(id=id_)
    assert desc1 == r.describe('d:i')

    # Description was also written to stdout
    out1, _ = capsys.readouterr()
    assert desc1 + '\n' == out1

    # Description of all keys is as expected
    desc2 = (test_data_path / 'report-describe.txt').read_text() \
                                                    .format(id=id_)
    assert desc2 == r.describe() + '\n'

    # Result was also written to stdout
    out2, _ = capsys.readouterr()
    assert desc2 == out2
Ejemplo n.º 7
0
def test_run_clone(tmpdir, test_data_path):
    # this test is designed to cover the full functionality of the GAMS API
    # - initialize a new platform instance
    # - creates a new scenario and exports a gdx file
    # - runs the tutorial transport model
    # - reads back the solution from the output
    # - performs the test on the objective value and the timeseries data
    mp = ixmp.Platform(tmpdir, dbtype='HSQLDB')
    scen = make_dantzig(mp, solve=test_data_path)
    assert np.isclose(scen.var('z')['lvl'], 153.675)
    pdt.assert_frame_equal(scen.timeseries(iamc=True), TS_DF)

    # cloning with `keep_solution=True` keeps all timeseries and the solution
    scen2 = scen.clone(keep_solution=True)
    assert np.isclose(scen2.var('z')['lvl'], 153.675)
    pdt.assert_frame_equal(scen2.timeseries(iamc=True), TS_DF)

    # cloning with `keep_solution=True` and `first_model_year` raises an error
    pytest.raises(ValueError, scen.clone, first_model_year=2005)

    # cloning with `keep_solution=False` drops the solution and only keeps
    # timeseries set as `meta=True`
    scen3 = scen.clone(keep_solution=False)
    assert np.isnan(scen3.var('z')['lvl'])
    pdt.assert_frame_equal(scen3.timeseries(iamc=True), HIST_DF)

    # cloning with `keep_solution=False` and `first_model_year`
    # drops the solution and removes all timeseries not marked `meta=True`
    # in the model horizon (i.e, `year >= first_model_year`)
    scen4 = scen.clone(keep_solution=False, first_model_year=2005)
    assert np.isnan(scen4.var('z')['lvl'])
    pdt.assert_frame_equal(scen4.timeseries(iamc=True), TS_DF_CLEARED)
Ejemplo n.º 8
0
def test_multi_db_run(tmpdir):
    # create a new instance of the transport problem and solve it
    mp1 = ixmp.Platform(backend="jdbc", driver="hsqldb", path=tmpdir / "mp1")
    scen1 = make_dantzig(mp1, solve=True, quiet=True)

    mp2 = ixmp.Platform(backend="jdbc", driver="hsqldb", path=tmpdir / "mp2")
    # add other unit to make sure that the mapping is correct during clone
    mp2.add_unit("wrong_unit")
    mp2.add_region("wrong_region", "country")

    # check that cloning across platforms must copy the full solution
    pytest.raises(NotImplementedError, scen1.clone, platform=mp2, keep_solution=False)

    # clone solved model across platforms (with default settings)
    scen1.clone(platform=mp2, keep_solution=True)

    # close the db to ensure that data and solution of the clone are saved
    mp2.close_db()
    del mp2

    # reopen the connection to the second platform and reload scenario
    _mp2 = ixmp.Platform(backend="jdbc", driver="hsqldb", path=tmpdir / "mp2")
    assert_multi_db(mp1, _mp2)
    scen2 = ixmp.Scenario(_mp2, **models["dantzig"])

    # check that sets, variables and parameter were copied correctly
    assert_array_equal(scen1.set("i"), scen2.set("i"))
    assert_frame_equal(scen1.par("d"), scen2.par("d"))
    assert np.isclose(scen2.var("z")["lvl"], 153.675)
    assert_frame_equal(scen1.var("x"), scen2.var("x"))

    # check that custom unit, region and timeseries are migrated correctly
    assert scen2.par("f")["value"] == 90.0
    assert scen2.par("f")["unit"] == "USD/km"
    assert_frame_equal(scen2.timeseries(iamc=True), TS_DF)
Ejemplo n.º 9
0
def test_multi_db_run(tmpdir, test_data_path):
    # create a new instance of the transport problem and solve it
    mp1 = ixmp.Platform(tmpdir / 'mp1', dbtype='HSQLDB')
    scen1 = make_dantzig(mp1, solve=test_data_path)

    mp2 = ixmp.Platform(tmpdir / 'mp2', dbtype='HSQLDB')
    # add other unit to make sure that the mapping is correct during clone
    mp2.add_unit('wrong_unit')
    mp2.add_region('wrong_region', 'country')

    # check that cloning across platforms must copy the full solution
    pytest.raises(ValueError, scen1.clone, platform=mp2, keep_solution=False)

    # clone solved model across platforms (with default settings)
    scen1.clone(platform=mp2, keep_solution=True)

    # close the db to ensure that data and solution of the clone are saved
    mp2.close_db()
    del mp2

    # reopen the connection to the second platform and reload scenario
    _mp2 = ixmp.Platform(tmpdir / 'mp2', dbtype='HSQLDB')
    assert_multi_db(mp1, _mp2)
    scen2 = ixmp.Scenario(_mp2, **models['dantzig'])

    # check that sets, variables and parameter were copied correctly
    npt.assert_array_equal(scen1.set('i'), scen2.set('i'))
    pdt.assert_frame_equal(scen1.par('d'), scen2.par('d'))
    assert np.isclose(scen2.var('z')['lvl'], 153.675)
    pdt.assert_frame_equal(scen1.var('x'), scen2.var('x'))

    # check that custom unit, region and timeseries are migrated correctly
    assert scen2.par('f')['value'] == 90.0
    assert scen2.par('f')['unit'] == 'USD_per_km'
    pdt.assert_frame_equal(scen2.timeseries(iamc=True), TS_DF)
Ejemplo n.º 10
0
def test_run_remove_solution(test_mp, test_data_path):
    # create a new instance of the transport problem and solve it
    mp = test_mp
    scen = make_dantzig(mp, solve=test_data_path)
    assert np.isclose(scen.var('z')['lvl'], 153.675)

    # check that re-solving the model will raise an error if a solution exists
    pytest.raises(ValueError,
                  scen.solve,
                  model=str(test_data_path / 'transport_ixmp'),
                  case='fail')

    # remove the solution, check that variables are empty
    # and timeseries not marked `meta=True` are removed
    scen2 = scen.clone()
    scen2.remove_solution()
    assert not scen2.has_solution()
    assert np.isnan(scen2.var('z')['lvl'])
    assert_frame_equal(scen2.timeseries(iamc=True), HIST_DF)

    # remove the solution with a specific year as first model year, check that
    # variables are empty and timeseries not marked `meta=True` are removed
    scen3 = scen.clone()
    scen3.remove_solution(first_model_year=2005)
    assert not scen3.has_solution()
    assert np.isnan(scen3.var('z')['lvl'])
    assert_frame_equal(scen3.timeseries(iamc=True), TS_DF_CLEARED)
Ejemplo n.º 11
0
def test_reporter_from_dantzig(test_mp, test_data_path):
    scen = make_dantzig(test_mp, solve=test_data_path)

    # Reporter.from_scenario can handle the Dantzig problem
    rep = Reporter.from_scenario(scen)

    # Partial sums are available automatically (d is defined over i and j)
    d_i = rep.get('d:i')

    # Units pass through summation
    assert d_i.attrs['_unit'] == ureg.parse_units('km')

    # Summation across all dimensions results a 1-element Quantity
    d = rep.get('d:')
    assert len(d) == 1
    assert np.isclose(d.iloc[0], 11.7)

    # Weighted sum
    weights = Quantity(xr.DataArray(
        [1, 2, 3],
        coords=['chicago new-york topeka'.split()],
        dims=['j']))
    new_key = rep.aggregate('d:i-j', 'weighted', 'j', weights)

    # ...produces the expected new key with the summed dimension removed and
    # tag added
    assert new_key == 'd:i:weighted'

    # ...produces the expected new value
    obs = rep.get(new_key)
    exp = (rep.get('d:i-j') * weights).sum(dim=['j']) / weights.sum(dim=['j'])
    # TODO: attrs has to be explicitly copied here because math is done which
    # returns a pd.Series
    exp = Quantity(exp, attrs=rep.get('d:i-j').attrs)

    assert_series_equal(obs.sort_index(), exp.sort_index())

    # Disaggregation with explicit data
    # (cases of canned food 'p'acked in oil or water)
    shares = xr.DataArray([0.8, 0.2], coords=[['oil', 'water']], dims=['p'])
    new_key = rep.disaggregate('b:j', 'p', args=[Quantity(shares)])

    # ...produces the expected key with new dimension added
    assert new_key == 'b:j-p'

    b_jp = rep.get('b:j-p')

    # Units pass through disaggregation
    assert b_jp.attrs['_unit'] == 'cases'

    # Set elements are available
    assert rep.get('j') == ['new-york', 'chicago', 'topeka']

    # 'all' key retrieves all quantities
    obs = {da.name for da in rep.get('all')}
    exp = set('a b d f demand demand-margin z x'.split())
    assert obs == exp

    # Shorthand for retrieving a full key name
    assert rep.full_key('d') == 'd:i-j' and isinstance(rep.full_key('d'), Key)
Ejemplo n.º 12
0
def test_model_initialize(test_mp, caplog):
    # Model.initialize runs on an empty Scenario
    s = make_dantzig(test_mp)
    b1 = s.par('b')
    assert len(b1) == 3

    # Modify a value for 'b'
    s.check_out()
    new_value = 301
    s.add_par('b', 'chicago', new_value, 'cases')
    s.commit('Overwrite b(chicago)')

    # Model.initialize runs on an already-initialized Scenario, without error
    DantzigModel.initialize(s, with_data=True)

    # Data has the same length...
    b2 = s.par('b')
    assert len(b2) == 3
    # ...but modified value(s) are not overwritten
    assert (b2.query("j == 'chicago'")['value'] == new_value).all()

    # Unrecognized Scenario(scheme=...) is initialized using the base method, a
    # no-op
    messages = [
        "No scheme for new Scenario model-name/scenario-name",
        "No initialization for None-scheme Scenario",
    ]
    with assert_logs(caplog, messages, at_level=logging.DEBUG):
        Scenario(test_mp, model='model-name', scenario='scenario-name',
                 version='new')

    with assert_logs(caplog, "No initialization for 'foo'-scheme Scenario",
                     at_level=logging.DEBUG):
        Scenario(test_mp, model='model-name', scenario='scenario-name',
                 version='new', scheme='foo')

    # Keyword arguments to Scenario(...) that are not recognized by
    # Model.initialize() raise an intelligible exception
    with pytest.raises(TypeError,
                       match="unexpected keyword argument 'bad_arg1'"):
        Scenario(test_mp, model='model-name', scenario='scenario-name',
                 version='new', scheme='unknown', bad_arg1=111)

    with pytest.raises(TypeError,
                       match="unexpected keyword argument 'bad_arg2'"):
        Scenario(test_mp, model='model-name', scenario='scenario-name',
                 version='new', scheme='dantzig', with_data=True,
                 bad_arg2=222)

    # Replace b[j] with a parameter of the same name, but different indices
    s.check_out()
    s.remove_par('b')
    s.init_par('b', idx_sets=['i'], idx_names=['i_dim'])

    # Logs an error message
    with assert_logs(caplog,
                     "Existing index sets of 'b' ['i'] do not match ['j']"):
        DantzigModel.initialize(s)
Ejemplo n.º 13
0
def test_cli(ixmp_cli, test_mp, test_data_path):
    # Put something in the database
    test_mp.open_db()
    make_dantzig(test_mp)
    test_mp.close_db()

    platform_name = test_mp.name

    # Delete the platform/close the database connection
    del test_mp

    cmd = [
        "--platform",
        platform_name,
        "--model",
        "canning problem",
        "--scenario",
        "standard",
        "report",
        "--config",
        str(test_data_path / "report-config-0.yaml"),
        "d_check",
    ]

    # 'report' command runs
    result = ixmp_cli.invoke(cmd)
    assert result.exit_code == 0

    # TODO warning should be logged

    # Reporting produces the expected command-line output

    assert re.match(
        "i          j       "  # Trailing whitespace
        r"""
san-diego  chicago     1\.8
           new-york    2\.5
           topeka      1\.4
seattle    chicago     1\.7
           new-york    2\.5
           topeka      1\.8
(Name: value, )?dtype: float64
""",
        result.output,
    )
Ejemplo n.º 14
0
def test_reporter_read_config(test_mp, test_data_path):
    scen = make_dantzig(test_mp)
    rep = Reporter.from_scenario(scen)

    # Configuration can be read from file
    rep.configure(test_data_path / 'report-config-0.yaml')

    # Data from configured file is available
    assert rep.get('d_check').loc['seattle', 'chicago'] == 1.7
Ejemplo n.º 15
0
def test_gh_216(test_mp):
    scen = make_dantzig(test_mp)

    filters = dict(i=['seattle', 'beijing'])

    # Java code in ixmp_source would raise an exception because 'beijing' is
    # not in set i; but JDBCBackend removes 'beijing' from the filters before
    # calling the underlying method (https://github.com/iiasa/ixmp/issues/216)
    scen.par('a', filters=filters)
Ejemplo n.º 16
0
def test_reporter_read_config(test_mp, test_data_path):
    scen = make_dantzig(test_mp)

    rep = Reporter.from_scenario(scen)
    with pytest.warns(UserWarning,
                      match=r"Unrecognized sections {'notarealsection'}"):
        rep.read_config(test_data_path / 'report-config-0.yaml')

    # Data from configured file is available
    assert rep.get('d_check').loc['seattle', 'chicago'] == 1.7
Ejemplo n.º 17
0
def test_cache_arg(arg):
    """Test 'cache' argument, passed to CachingBackend."""
    mp = ixmp.Platform(backend='jdbc', driver='hsqldb',
                       url='jdbc:hsqldb:mem://test_cache_false',
                       cache=arg)
    scen = make_dantzig(mp)

    # Maybe put something in the cache
    scen.par('a')

    assert len(mp._backend._cache) == (1 if arg else 0)
Ejemplo n.º 18
0
def test_diff_items(test_mp):
    """diff() when Scenarios contain the different items."""
    scen_a = make_dantzig(test_mp)
    scen_b = make_dantzig(test_mp)

    # Modify `scen_a` and `scen_b`
    scen_a.check_out()
    scen_b.check_out()

    # Remove items
    scen_a.remove_par("b")
    scen_b.remove_par("d")

    # Compare different scenarios without filters
    for name, df in utils.diff(scen_a, scen_b):
        pass  # No check on the contents

    # Compare different scenarios with filters
    iterator = utils.diff(scen_a, scen_b, filters=dict(j=["chicago"]))
    for name, df in iterator:
        pass  # No check of the contents
Ejemplo n.º 19
0
def test_cli(ixmp_cli, test_mp, test_data_path):
    # Put something in the database
    make_dantzig(test_mp)
    test_mp.close_db()

    platform_name = test_mp.name

    # Delete the platform/close the database connection
    del test_mp

    cmd = [
        '--platform',
        platform_name,
        '--model',
        'canning problem',
        '--scenario',
        'standard',
        'report',
        '--config',
        str(test_data_path / 'report-config-0.yaml'),
        'd_check',
    ]

    # 'report' command runs
    result = ixmp_cli.invoke(cmd)
    assert result.exit_code == 0

    # TODO warning should be logged

    # Reporting produces the expected command-line output
    assert result.output.endswith("i          j       "  # Trailing whitespace
                                  """
seattle    new-york    2.5
           chicago     1.7
           topeka      1.8
san-diego  new-york    2.5
           chicago     1.8
           topeka      1.4
Name: value, dtype: float64
""")
Ejemplo n.º 20
0
def test_update_scenario(caplog, test_mp):
    scen = make_dantzig(test_mp)
    scen.check_out()
    scen.add_set("j", "toronto")
    scen.commit("Add j=toronto")

    # Number of rows in the 'd' parameter
    N_before = len(scen.par("d"))
    assert 6 == N_before

    # A Computer used as calculation engine
    c = Computer()

    # Target Scenario for updating data
    c.add("target", scen)

    # Create a pd.DataFrame suitable for Scenario.add_par()
    data = dantzig_data["d"].query("j == 'chicago'").assign(j="toronto")
    data["value"] += 1.0

    # Add to the Reporter
    c.add("input", data)

    # Task to update the scenario with the data
    c.add("test 1",
          (partial(update_scenario, params=["d"]), "target", "input"))

    # Trigger the computation that results in data being added
    with assert_logs(caplog, f"'d' ← {len(data)} rows", at_level=logging.INFO):
        # Returns nothing
        assert c.get("test 1") is None

    # Rows were added to the parameter
    assert len(scen.par("d")) == N_before + len(data)

    # Modify the data
    data = pd.concat([dantzig_data["d"], data]).reset_index(drop=True)
    data["value"] *= 2.0

    # Convert to a Quantity object and re-add
    q = Quantity(data.set_index(["i", "j"])["value"], name="d", units="km")
    c.add("input", q)

    # Revise the task; the parameter name ('demand') is read from the Quantity
    c.add("test 2", (update_scenario, "target", "input"))

    # Trigger the computation
    with assert_logs(caplog, f"'d' ← {len(data)} rows", at_level=logging.INFO):
        c.get("test 2")

    # All the rows have been updated
    assert_frame_equal(scen.par("d"), data)
Ejemplo n.º 21
0
def test_run_gams_api(tmpdir, test_data_path):
    # this test is designed to cover the full functionality of the GAMS API
    # - creates a new scenario and exports a gdx file
    # - runs the tutorial transport model
    # - reads back the solution from the output
    # - performs the test on the objective value
    mp = ixmp.Platform(tmpdir, dbtype='HSQLDB')
    scen = make_dantzig(mp, solve=test_data_path)

    # test it
    obs = scen.var('z')['lvl']
    exp = 153.675
    assert np.isclose(obs, exp)
Ejemplo n.º 22
0
def test_model_initialize(test_mp, caplog):
    # Model.initialize runs on an empty Scenario
    s = make_dantzig(test_mp)
    b1 = s.par('b')
    assert len(b1) == 3

    # Modify a value for 'b'
    s.check_out()
    s.add_par('b', 'chicago', 600, 'cases')
    s.commit('Overwrite b(chicago)')

    # Model.initialize runs on an already-initialized Scenario, without error
    DantzigModel.initialize(s, with_data=True)

    # Data has the same length...
    b2 = s.par('b')
    assert len(b2) == 3
    # ...but modified value(s) are not overwritten
    assert (b2.query("j == 'chicago'")['value'] == 600).all()

    # Unrecognized Scenario(scheme=...) is initialized using the base method, a
    # no-op
    caplog.set_level(logging.DEBUG)
    Scenario(test_mp,
             model='model name',
             scenario='scenario name',
             version='new',
             scheme='unknown')
    assert caplog.records[-1].message == \
        "No initialization for 'unknown'-scheme Scenario"

    # Keyword arguments to Scenario(...) that are not recognized by
    # Model.initialize() raise an intelligible exception
    with pytest.raises(TypeError,
                       match="unexpected keyword argument 'bad_arg1'"):
        Scenario(test_mp,
                 model='model name',
                 scenario='scenario name',
                 version='new',
                 scheme='unknown',
                 bad_arg1=111)

    with pytest.raises(TypeError,
                       match="unexpected keyword argument 'bad_arg2'"):
        Scenario(test_mp,
                 model='model name',
                 scenario='scenario name',
                 version='new',
                 scheme='dantzig',
                 with_data=True,
                 bad_arg2=222)
Ejemplo n.º 23
0
def test_cache_clear(test_mp):
    """Removing set elements causes the cache to be cleared entirely."""
    scen = make_dantzig(test_mp)

    # Load an item so that it is cached
    d0 = scen.par('d')

    # Remove a set element
    scen.check_out()
    scen.remove_set('j', 'topeka')

    # The retrieved item content reflects the removal of 'topeka'; not the
    # cached value used to return d0
    assert scen.par('d').shape[0] < d0.shape[0]
Ejemplo n.º 24
0
def test_reporter_read_config(test_mp, test_data_path, caplog):
    scen = make_dantzig(test_mp)

    rep = Reporter.from_scenario(scen)

    caplog.clear()

    # Warning is raised when reading configuration with unrecognized section(s)
    rep.read_config(test_data_path / 'report-config-0.yaml')

    assert ("Unrecognized sections ['notarealsection'] in reporting "
            "configuration will have no effect") == caplog.records[0].message

    # Data from configured file is available
    assert rep.get('d_check').loc['seattle', 'chicago'] == 1.7
Ejemplo n.º 25
0
def test_configure(test_mp, test_data_path):
    # Configure globally; reads 'rename_dims' section
    configure(rename_dims={"i": "i_renamed"})

    # Reporting uses the RENAME_DIMS mapping of 'i' to 'i_renamed'
    scen = make_dantzig(test_mp)
    rep = Reporter.from_scenario(scen)
    assert "d:i_renamed-j" in rep, rep.graph.keys()
    assert ["seattle", "san-diego"] == rep.get("i_renamed")

    # Original name 'i' are not found in the reporter
    assert "d:i-j" not in rep, rep.graph.keys()
    pytest.raises(KeyError, rep.get, "i")

    # Remove the configuration for renaming 'i', so that other tests work
    RENAME_DIMS.pop("i")
Ejemplo n.º 26
0
def test_del_ts():
    mp = ixmp.Platform(
        backend='jdbc',
        driver='hsqldb',
        url='jdbc:hsqldb:mem:test_del_ts',
    )

    # Number of Java objects referenced by the JDBCBackend
    N_obj = len(mp._backend.jindex)

    # Create a list of some Scenario objects
    N = 8
    scenarios = [make_dantzig(mp)]
    for i in range(1, N):
        scenarios.append(scenarios[0].clone(scenario=f'clone {i}'))

    # Number of referenced objects has increased by 8
    assert len(mp._backend.jindex) == N_obj + N

    # Pop and free the objects
    for i in range(N):
        s = scenarios.pop(0)

        # The variable 's' is the only reference to this Scenario object
        assert getrefcount(s) - 1 == 1

        # ID of the Scenario object
        s_id = id(s)

        # Underlying Java object
        s_jobj = mp._backend.jindex[s]

        # Now delete the Scenario object
        del s

        # Number of referenced objects decreases by 1
        assert len(mp._backend.jindex) == N_obj + N - (i + 1)
        # ID is no longer in JDBCBackend.jindex
        assert s_id not in mp._backend.jindex

        # s_jobj is the only remaining reference to the Java object
        assert getrefcount(s_jobj) - 1 == 1
        del s_jobj

    # Backend is again empty
    assert len(mp._backend.jindex) == N_obj
Ejemplo n.º 27
0
def test_cache_del_ts(test_mp):
    """Test CachingBackend.del_ts()."""
    # Since CachingBackend is an abstract class, test it via JDBCBackend
    backend = test_mp._backend
    cache_size_pre = len(backend._cache)

    # Load data, thereby adding to the cache
    s = make_dantzig(test_mp)
    s.par("d")

    # Cache size has increased
    assert len(backend._cache) == cache_size_pre + 1

    # Delete the object; associated cache is freed
    del s

    # Objects were invalidated/removed from cache
    assert len(backend._cache) == cache_size_pre
Ejemplo n.º 28
0
def test_docs(test_mp):
    scen = make_dantzig(test_mp)
    # test model docs
    test_mp.set_doc('model', {scen.model: 'Dantzig model'})
    assert test_mp.get_doc('model') == {'canning problem': 'Dantzig model'}

    # test timeseries variables docs
    gdp = ('Gross Domestic Product (GDP) is the monetary value of all '
           'finished goods and services made within a country during '
           'a specific period.')
    test_mp.set_doc('timeseries', dict(GDP=gdp))
    assert test_mp.get_doc('timeseries', 'GDP') == gdp

    # test bad domain
    ex = raises(ValueError, test_mp.set_doc, 'baddomain', {})
    exp = ('No such domain: baddomain, existing domains: '
           'scenario, model, region, metadata, timeseries')
    assert ex.value.args[0] == exp
Ejemplo n.º 29
0
def test_docs(test_mp):
    scen = make_dantzig(test_mp)
    # test model docs
    test_mp.set_doc("model", {scen.model: "Dantzig model"})
    assert test_mp.get_doc("model") == {"canning problem": "Dantzig model"}

    # test timeseries variables docs
    gdp = ("Gross Domestic Product (GDP) is the monetary value of all "
           "finished goods and services made within a country during "
           "a specific period.")
    test_mp.set_doc("timeseries", dict(GDP=gdp))
    assert test_mp.get_doc("timeseries", "GDP") == gdp

    # test bad domain
    ex = raises(ValueError, test_mp.set_doc, "baddomain", {})
    exp = ("No such domain: baddomain, existing domains: "
           "scenario, model, region, metadata, timeseries")
    assert ex.value.args[0] == exp
Ejemplo n.º 30
0
def test_configure(test_mp, test_data_path):
    # TODO test: configuration keys 'units', 'replace_units'

    # Configure globally; reads 'rename_dims' section
    configure(rename_dims={'i': 'i_renamed'})

    # Reporting uses the RENAME_DIMS mapping of 'i' to 'i_renamed'
    scen = make_dantzig(test_mp)
    rep = Reporter.from_scenario(scen)
    assert 'd:i_renamed-j' in rep, rep.graph.keys()
    assert ['seattle', 'san-diego'] == rep.get('i_renamed')

    # Original name 'i' are not found in the reporter
    assert 'd:i-j' not in rep, rep.graph.keys()
    pytest.raises(KeyError, rep.get, 'i')

    # Remove the configuration for renaming 'i', so that other tests work
    RENAME_DIMS.pop('i')