示例#1
0
    def test_empty(self):
        """ScenarioInfo created from scratch."""
        info = ScenarioInfo()

        # Set values directly
        info.set["node"] = [Code(id="AT", name="Austria")]
        info.set["year"] = [1000, 1010, 1020, 1030]
        info.y0 = 1010

        # Shorthand properties

        # `yv_ya` is generated
        assert_frame_equal(
            pd.DataFrame(
                [
                    [1010, 1010],
                    [1010, 1020],
                    [1010, 1030],
                    [1020, 1020],
                    [1020, 1030],
                    [1030, 1030],
                ],
                columns=["year_vtg", "year_act"],
            ),
            info.yv_ya,
        )

        # List of Codes is converted to list of strings
        assert ["AT"] == info.N

        # Only years >= y0
        assert [1010, 1020, 1030] == info.Y
示例#2
0
def test_make_source_tech():
    info = ScenarioInfo()
    info.set["node"] = ["World", "node0", "node1"]
    info.set["year"] = [1, 2, 3]

    values = dict(
        capacity_factor=1.0,
        output=2.0,
        var_cost=3.0,
        technical_lifetime=4.0,
    )
    common = dict(
        commodity="commodity",
        level="level",
        mode="mode",
        technology="technology",
        time="time",
        time_dest="time",
        unit="unit",
    )
    # Code runs
    result = make_source_tech(info, common, **values)
    # Result is dictionary with the expected keys
    assert isinstance(result, dict)
    assert set(result.keys()) == set(values.keys())

    # "World" node does not appear in results
    assert set(result["output"]["node_loc"].unique()) == set(info.N[1:])

    for df in result.values():
        # Results have 2 nodes × 3 years
        assert len(df) == 2 * 3
        # No empty values
        assert not df.isna().any(None)

    del values["var_cost"]
    with pytest.raises(ValueError,
                       match=re.escape("needs values for {'var_cost'}")):
        make_source_tech(info, common, **values)
示例#3
0
def get_data(scenario, spec, **kwargs) -> Mapping[str, pd.DataFrame]:
    """Get data for disutility formulation.

    Calls :meth:`data_conversion` and :meth:`data_source`.

    Parameters
    ----------
    spec : dict
        The output of :meth:`get_spec`.
    """
    if len(kwargs):
        log.warning(f"Ignore {repr(kwargs)}")

    info = ScenarioInfo(scenario)

    # Get conversion technology data
    data = data_conversion(info, spec)

    # Get and append source data
    merge_data(data, data_source(info, spec))

    return data
示例#4
0
    def test_from_scenario(self, test_context):
        """ScenarioInfo initialized from an existing Scenario."""
        mp = test_context.get_platform()
        scenario = make_dantzig(mp, multi_year=True)

        # ScenarioInfo can be initialized from the scenario
        info = ScenarioInfo(scenario)

        # Shorthand properties
        assert_frame_equal(
            pd.DataFrame(
                [
                    [1962, 1963],
                    [1962, 1964],
                    [1962, 1965],
                    [1963, 1963],
                    [1963, 1964],
                    [1963, 1965],
                    [1964, 1964],
                    [1964, 1965],
                    [1965, 1965],
                ],
                columns=["year_vtg", "year_act"],
            ),
            info.yv_ya,
        )
        assert [
            "World",
            "seattle",
            "san-diego",
            "new-york",
            "chicago",
            "topeka",
        ] == info.N
        assert 1963 == info.y0
        assert [1963, 1964, 1965] == info.Y
示例#5
0
    def test_year_from_codes(self, caplog, codelist, y0, N_all, N_Y, y_m1,
                             dp_checks):
        caplog.set_level(logging.DEBUG, logger="message_ix_models")

        info = ScenarioInfo()
        codes = get_codes(f"year/{codelist}")
        info.year_from_codes(codes)

        # First model period
        assert y0 == info.y0
        assert ("firstmodelyear", y0) in info.set["cat_year"]

        # Total number of periods
        assert N_all == len(info.set["year"])

        # Number of model periods
        assert N_Y == len(info.Y)

        # Final period
        assert y_m1 == info.Y[-1]

        # Convert the data frame to a series
        dp = info.par["duration_period"].set_index("year")["value"]

        # duration_period entries are as expected
        for key, expected in dp_checks:
            assert expected == dp[key]

        # Test logging
        assert 0 == len(caplog.messages)

        info.year_from_codes(codes)

        assert 3 == len(caplog.messages)
        assert all(
            msg.startswith("Discard existing") for msg in caplog.messages)
def minimal_test_data(scenario):
    """Generate data for :func:`test_minimal`."""
    common = COMMON.copy()
    common.pop("node_loc")
    common.update(dict(mode="all"))

    data = dict()

    info = ScenarioInfo(scenario)
    y0 = info.Y[0]
    y1 = info.Y[1]

    # Output from t0 and t1
    for t in ("t0", "t1"):
        common.update(dict(technology=t, commodity=f"output of {t}"))
        merge_data(data,
                   make_source_tech(info, common, output=1.0, var_cost=1.0))

    # Disutility input for each combination of (tech) × (group) × (2 years)
    input_data = pd.DataFrame(
        [
            ["usage of t0 by g0", y0, 0.1],
            ["usage of t0 by g0", y1, 0.1],
            ["usage of t1 by g0", y0, 0.1],
            ["usage of t1 by g0", y1, 0.1],
            ["usage of t0 by g1", y0, 0.1],
            ["usage of t0 by g1", y1, 0.1],
            ["usage of t1 by g1", y0, 0.1],
            ["usage of t1 by g1", y1, 0.1],
        ],
        columns=["technology", "year_vtg", "value"],
    )
    data["input"] = make_df("input",
                            **input_data,
                            commodity="disutility",
                            **COMMON).assign(
                                node_origin=copy_column("node_loc"),
                                year_act=copy_column("year_vtg"))

    # Demand
    c, y = zip(
        *product(["demand of group g0", "demand of group g1"], [y0, y1]))
    data["demand"] = make_df("demand",
                             commodity=c,
                             year=y,
                             value=1.0,
                             **COMMON)

    # Constraint on activity in the first period
    t = sorted(input_data["technology"].unique())
    for bound in ("lo", "up"):
        par = f"bound_activity_{bound}"
        data[par] = make_df(par,
                            value=0.5,
                            technology=t,
                            year_act=y0,
                            **COMMON)

    # Constraint on activity growth
    annual = (1.1**(1.0 / 5.0)) - 1.0
    for bound, factor in (("lo", -1.0), ("up", 1.0)):
        par = f"growth_activity_{bound}"
        data[par] = make_df(par,
                            value=factor * annual,
                            technology=t,
                            year_act=y1,
                            **COMMON)

    return data, y0, y1
def test_data_source(scenario, spec):
    """:func:`~.disutility.data_source` runs."""
    info = ScenarioInfo(scenario)
    disutility.data_source(info, spec)
def test_data_conversion(scenario, spec):
    """:func:`~.disutility.data_conversion` runs."""
    info = ScenarioInfo(scenario)
    disutility.data_conversion(info, spec)
示例#9
0
def get_spec(context) -> Mapping[str, ScenarioInfo]:
    """Return the spec for the MESSAGE-GLOBIOM global model RES.

    Returns
    -------
    :class:`dict` of :class:`.ScenarioInfo` objects
    """
    context.use_defaults(SETTINGS)

    add = ScenarioInfo()

    # Add technologies
    add.set["technology"] = copy(get_codes("technology"))

    # Add regions

    # Load configuration for the specified region mapping
    nodes = get_codes(f"node/{context.regions}")

    # Top-level "World" node
    # FIXME typing ignored temporarily for PR#9
    world = nodes[nodes.index("World")]  # type: ignore [arg-type]

    # Set elements: World, followed by the direct children of World
    add.set["node"] = [world] + world.child

    # Initialize time periods
    add.year_from_codes(get_codes(f"year/{context.years}"))

    # Add levels
    add.set["level"] = get_codes("level")

    # Add commodities
    add.set["commodity"] = get_codes("commodity")

    # Add units, associated with commodities
    units = set(
        eval_anno(commodity, "unit") for commodity in add.set["commodity"])
    # Deduplicate by converting to a set and then back; not strictly necessary,
    # but reduces duplicate log entries
    add.set["unit"] = sorted(filter(None, units))

    if context.res_with_dummies:
        # Add dummy technologies
        add.set["technology"].extend(
            [Code(id="dummy"), Code(id="dummy source")])
        # Add a dummy commodity
        add.set["commodity"].append(Code(id="dummy"))

    # The RES is the base, so does not require/remove any elements
    return dict(add=add, remove=ScenarioInfo(), require=ScenarioInfo())
示例#10
0
def spec():
    yield dict(add=ScenarioInfo(),
               require=ScenarioInfo(),
               remove=ScenarioInfo())
示例#11
0
def get_spec(
    groups: Sequence[Code],
    technologies: Sequence[Code],
    template: Code,
) -> Dict[str, ScenarioInfo]:
    """Get a spec for a disutility formulation.

    Parameters
    ----------
    groups : list of Code
        Identities of the consumer groups with distinct disutilities.
    technologies : list of Code
        The technologies to which the disutilities are applied.
    template : .Code

    """
    require = ScenarioInfo()
    remove = ScenarioInfo()
    add = ScenarioInfo()

    require.set["technology"].extend(technologies)

    # Disutility commodity and source
    add.set["commodity"] = [Code(id="disutility")]
    add.set["technology"] = [Code(id="disutility source")]

    # Disutility is unitless
    add.set["unit"].append("")

    # Add conversion technologies
    for t, g in product(technologies, groups):
        # String formatting arguments
        fmt = dict(technology=t, group=g)

        # Format each field in the "input" and "output" annotations
        input = {
            k: v.format(**fmt)
            for k, v in eval_anno(template, id="input").items()
        }
        output = {
            k: v.format(**fmt)
            for k, v in eval_anno(template, id="output").items()
        }

        # - Format the ID string from the template
        # - Copy the "output" annotation without modification
        t_code = Code(
            id=template.id.format(**fmt),
            annotations=[
                Annotation(id="input", text=repr(input)),
                Annotation(id="output", text=repr(output)),
            ],
        )

        # "commodity" set elements to add
        add.set["commodity"].extend([input["commodity"], output["commodity"]])

        # "technology" set elements to add
        t_code.annotations.append(Annotation(id="input", text=repr(input)))
        add.set["technology"].append(t_code)

    # Deduplicate "commodity" set elements
    add.set["commodity"] = sorted(map(str, set(add.set["commodity"])))

    return dict(require=require, remove=remove, add=add)
def test_cached(caplog, test_context, tmp_path):
    """:func:`.cached` works as expected.

    .. todo:: test behaviour when :data:`.SKIP_CACHE` is :obj:`True`
    """
    # Clear seen paths, so that log message below is guaranteed to occur
    message_ix_models.util.cache.PATHS_SEEN.clear()

    # Store in the temporary directory for this session, to avoid collisions across
    # sessions
    test_context.cache_path = tmp_path.joinpath("cache")

    # A dummy path to be hashed as an argument
    path_foo = tmp_path.joinpath("foo", "bar")

    with caplog.at_level(logging.DEBUG, logger="message_ix_models"):

        @cached
        def func0(ctx, a, path, b=3):
            """A test function."""
            log.info("func0 runs")
            return f"{id(ctx)}, {a + b}"

    # Docstring is modified
    assert "Data returned by this function is cached" in func0.__doc__
    # Message is logged
    assert f"func0() will cache in {tmp_path.joinpath('cache')}" in caplog.messages

    @cached
    def func1(x=1, y=2, **kwargs):
        # Function with defaults for all arguments
        log.info("func1 runs")
        return x + y

    caplog.clear()

    # pathlib.Path argument is serialized to JSON as part of the argument hash;
    # function runs, messages logged
    with assert_logs(caplog, "func0 runs"):
        result0 = func0(test_context, 1, path_foo)

    caplog.clear()
    result1 = func0(test_context, 1, path_foo)
    # Function does not run
    assert "func0 runs" not in caplog.messages
    assert caplog.messages[0].startswith("Cache hit for func0")
    # Results identical
    assert result0 == result1

    # Different context object with identical contents hashes equal
    ctx2 = deepcopy(test_context)
    assert id(test_context) != id(ctx2)

    result2 = func0(ctx2, 1, path_foo)
    # Function does not run
    assert "func0 runs" not in caplog.messages
    # Results are identical, i.e. including the old ID
    assert result0 == result2

    ctx2.delete()
    caplog.clear()

    # Hash of no arguments is the same, function only runs once
    assert 3 == func1() == func1()
    assert 1 == sum(m == "func1 runs" for m in caplog.messages)

    # Warnings logged for unhashables; ScenarioInfo is hashed as dict
    caplog.clear()
    with assert_logs(
            caplog,
        [
            "ignores <class 'xarray.core.dataset.Dataset'>",
            "ignores <class 'ixmp.core.platform.Platform'>",
        ],
    ):
        func1(ds=xr.Dataset(),
              mp=test_context.get_platform(),
              si=ScenarioInfo())

    # Unserializable type raises an exception
    with pytest.raises(TypeError,
                       match="Object of type slice is not JSON serializable"):
        func1(arg=slice(None))