Esempio n. 1
0
def data_source(info, spec) -> Mapping[str, pd.DataFrame]:
    """Generate data for a technology that emits the “disutility” commodity."""
    # List of input levels where disutility commodity must exist
    levels = set()
    for t in spec["add"].set["technology"]:
        input = eval_anno(t, "input")
        if input:
            levels.add(input["level"])

    log.info(f"Generate disutility on level(s): {repr(levels)}")

    # Use default capacity_factor = 1.0
    result = make_source_tech(
        info,
        common=dict(
            commodity="disutility",
            mode="all",
            technology="disutility source",
            time="year",
            time_dest="year",
            unit="-",
        ),
        output=1.0,
        var_cost=1.0,
    )
    result["output"] = result["output"].pipe(broadcast, level=sorted(levels))

    return result
Esempio n. 2
0
def get_spec(context) -> Mapping[str, ScenarioInfo]:
    """Return the spec for the MESSAGE-GLOBIOM global model RES.

    Returns
    -------
    :class:`dict` of :class:`.ScenarioInfo` objects
    """
    context.use_defaults(SETTINGS)

    add = ScenarioInfo()

    # Add technologies
    add.set["technology"] = copy(get_codes("technology"))

    # Add regions

    # Load configuration for the specified region mapping
    nodes = get_codes(f"node/{context.regions}")

    # Top-level "World" node
    # FIXME typing ignored temporarily for PR#9
    world = nodes[nodes.index("World")]  # type: ignore [arg-type]

    # Set elements: World, followed by the direct children of World
    add.set["node"] = [world] + world.child

    # Initialize time periods
    add.year_from_codes(get_codes(f"year/{context.years}"))

    # Add levels
    add.set["level"] = get_codes("level")

    # Add commodities
    add.set["commodity"] = get_codes("commodity")

    # Add units, associated with commodities
    units = set(
        eval_anno(commodity, "unit") for commodity in add.set["commodity"])
    # Deduplicate by converting to a set and then back; not strictly necessary,
    # but reduces duplicate log entries
    add.set["unit"] = sorted(filter(None, units))

    if context.res_with_dummies:
        # Add dummy technologies
        add.set["technology"].extend(
            [Code(id="dummy"), Code(id="dummy source")])
        # Add a dummy commodity
        add.set["commodity"].append(Code(id="dummy"))

    # The RES is the base, so does not require/remove any elements
    return dict(add=add, remove=ScenarioInfo(), require=ScenarioInfo())
Esempio n. 3
0
def process_commodity_codes(codes):
    """Process a list of codes for ``commodity``.

    The function warns for commodities missing units or with non-:mod:`pint`-compatible
    units.
    """
    for code in codes:
        unit = eval_anno(code, "unit")
        if unit is None:
            log.warning(f"Commodity {code} lacks defined units")
            continue

        try:
            # Check that the unit can be parsed by the pint.UnitRegistry
            registry(unit)
        except Exception:  # pragma: no cover
            # No coverage: code that triggers this exception should never be committed
            log.warning(f"Unit {unit} for commodity {code} not pint compatible")
Esempio n. 4
0
    def year_from_codes(self, codes: List[sdmx.model.Code]):
        """Update using a list of `codes`.

        The following are updated:

        - :attr:`.set` ``year``
        - :attr:`.set` ``cat_year``, with the first model year.
        - :attr:`.par` ``duration_period``

        Any existing values are discarded.

        After this, the attributes :attr:`.y0` and :attr:`.Y` give the first model year
        and model years, respectively.

        Examples
        --------
        Get a particular code list, create a ScenarioInfo instance, and update using
        the codes:

        >>> years = get_codes("year/A")
        >>> info = ScenarioInfo()
        >>> info.year_from_codes(years)

        Use populated values:

        >>> info.y0
        2020
        >>> info.Y[:3]
        [2020, 2030, 2040]
        >>> info.Y[-3:]
        [2090, 2100, 2110]

        """
        from message_ix_models.util import eval_anno

        # Clear existing values
        if len(self.set["year"]):
            log.debug(f"Discard existing 'year' elements: {repr(self.set['year'])}")
            self.set["year"] = []
        if len(self.set["cat_year"]):
            log.debug(
                f"Discard existing 'cat_year' elements: {repr(self.set['cat_year'])}"
            )
            self.set["cat_year"] = []
        if "duration_period" in self.par:
            log.debug("Discard existing 'duration_period' elements")

        fmy_set = False
        duration_period: List[Dict] = []

        # TODO use sorted() here once supported by sdmx
        for code in codes:
            year = int(code.id)
            # Store the year
            self.set["year"].append(year)

            # Check for an annotation 'firstmodelyear: true'
            if eval_anno(code, "firstmodelyear"):
                if fmy_set:
                    # No coverage: data that triggers this should not be committed
                    raise ValueError(  # pragma: no cover
                        "≥2 periods are annotated firstmodelyear: true"
                    )

                self.y0 = year
                self.set["cat_year"].append(("firstmodelyear", year))
                fmy_set = True

            # Store the duration_period: either from an annotation, or computed vs. the
            # prior period
            duration_period.append(
                dict(
                    year=year,
                    value=eval_anno(code, "duration_period")
                    or (year - duration_period[-1]["year"]),
                    unit="y",
                )
            )

        # Store
        self.par["duration_period"] = pd.DataFrame(duration_period)
Esempio n. 5
0
def get_spec(
    groups: Sequence[Code],
    technologies: Sequence[Code],
    template: Code,
) -> Dict[str, ScenarioInfo]:
    """Get a spec for a disutility formulation.

    Parameters
    ----------
    groups : list of Code
        Identities of the consumer groups with distinct disutilities.
    technologies : list of Code
        The technologies to which the disutilities are applied.
    template : .Code

    """
    require = ScenarioInfo()
    remove = ScenarioInfo()
    add = ScenarioInfo()

    require.set["technology"].extend(technologies)

    # Disutility commodity and source
    add.set["commodity"] = [Code(id="disutility")]
    add.set["technology"] = [Code(id="disutility source")]

    # Disutility is unitless
    add.set["unit"].append("")

    # Add conversion technologies
    for t, g in product(technologies, groups):
        # String formatting arguments
        fmt = dict(technology=t, group=g)

        # Format each field in the "input" and "output" annotations
        input = {
            k: v.format(**fmt)
            for k, v in eval_anno(template, id="input").items()
        }
        output = {
            k: v.format(**fmt)
            for k, v in eval_anno(template, id="output").items()
        }

        # - Format the ID string from the template
        # - Copy the "output" annotation without modification
        t_code = Code(
            id=template.id.format(**fmt),
            annotations=[
                Annotation(id="input", text=repr(input)),
                Annotation(id="output", text=repr(output)),
            ],
        )

        # "commodity" set elements to add
        add.set["commodity"].extend([input["commodity"], output["commodity"]])

        # "technology" set elements to add
        t_code.annotations.append(Annotation(id="input", text=repr(input)))
        add.set["technology"].append(t_code)

    # Deduplicate "commodity" set elements
    add.set["commodity"] = sorted(map(str, set(add.set["commodity"])))

    return dict(require=require, remove=remove, add=add)
Esempio n. 6
0
def data_conversion(info, spec) -> Mapping[str, pd.DataFrame]:
    """Generate input and output data for disutility conversion technologies."""
    common = dict(
        mode="all",
        year_vtg=info.Y,
        year_act=info.Y,
        # No subannual detail
        time="year",
        time_origin="year",
        time_dest="year",
    )

    # Use the spec to retrieve information
    technology = spec["add"].set["technology"]

    # Data to return
    data0: Mapping[str, List[pd.DataFrame]] = defaultdict(list)

    # Loop over conversion technologies
    for t in technology:
        # Use the annotations on the technology Code to get information about the
        # commodity, level, and unit
        input = eval_anno(t, "input")
        output = eval_anno(t, "output")
        if None in (input, output):
            if t.id == "disutility source":
                continue  # Data for this tech is from data_source()
            else:  # pragma: no cover
                raise ValueError(t)  # Error in user input

        # Make input and output data frames
        i_o = make_io(
            (input["commodity"], input["level"], input["unit"]),
            (output["commodity"], output["level"], output["unit"]),
            1.0,
            on="output",
            technology=t.id,
            **common,
        )
        for par, df in i_o.items():
            # Broadcast across nodes
            df = df.pipe(broadcast, node_loc=info.N[1:]).pipe(same_node)

            if par == "input":
                # Add input of disutility
                df = pd.concat(
                    [df, df.assign(commodity="disutility", unit="")],
                    ignore_index=True)

            data0[par].append(df)

    # Concatenate to a single data frame per parameter
    data = {
        par: pd.concat(dfs, ignore_index=True)
        for par, dfs in data0.items()
    }

    # Create data for capacity_factor
    data.update(make_matched_dfs(base=data["input"], capacity_factor=1.0))

    return data