示例#1
0
def systemwide_capacity_factor(results, model_data):
    """
    Returns a DataArray with systemwide capacity factors over the entire
    model duration, for the given results, indexed by techs and carriers.

    The weight of timesteps is considered when computing capacity factors,
    such that higher-weighted timesteps have a stronger influence
    on the resulting system-wide time-averaged capacity factor.

    """
    # In operate mode, energy_cap is an input parameter
    if 'energy_cap' not in results.keys():
        energy_cap = model_data.energy_cap
    else:
        energy_cap = results.energy_cap

    prod_sum = (
        # Aggregated/clustered days are represented `timestep_weights` times
        split_loc_techs(results['carrier_prod']) * model_data.timestep_weights
    ).sum(dim='timesteps').sum(dim='locs')
    cap_sum = split_loc_techs(energy_cap).sum(dim='locs')
    time_sum = (model_data.timestep_resolution * model_data.timestep_weights).sum()

    capacity_factors = prod_sum / (cap_sum * time_sum)

    return capacity_factors
示例#2
0
 def test_split_loc_tech_too_many_loc_tech_dims(self, example_dataarray):
     _array = example_dataarray.rename({"costs": "loc_techs_2"})
     with pytest.raises(exceptions.ModelError) as excinfo:
         dataset.split_loc_techs(_array)
     assert check_error_or_warning(
         excinfo, "Cannot split loc_techs or loc_tech_carriers dimension"
     )
示例#3
0
def systemwide_capacity_factor(results, model_data):
    """
    Returns a DataArray with systemwide capacity factors over the entire
    model duration, for the given results, indexed by techs and carriers.

    The weight of timesteps is considered when computing capacity factors,
    such that higher-weighted timesteps have a stronger influence
    on the resulting system-wide time-averaged capacity factor.

    """
    # In operate mode, energy_cap is an input parameter
    if 'energy_cap' not in results.keys():
        energy_cap = model_data.energy_cap
    else:
        energy_cap = results.energy_cap

    prod_sum = (
        # Aggregated/clustered days are represented `timestep_weights` times
        split_loc_techs(results['carrier_prod']) *
        model_data.timestep_weights).sum(dim='timesteps').sum(dim='locs')
    cap_sum = split_loc_techs(energy_cap).sum(dim='locs')
    time_sum = (model_data.timestep_resolution *
                model_data.timestep_weights).sum()

    capacity_factors = prod_sum / (cap_sum * time_sum)

    return capacity_factors
示例#4
0
 def test_split_loc_tech_unknown_output(self, example_dataarray,
                                        example_one_dim_dataarray):
     for array in [example_dataarray, example_one_dim_dataarray]:
         with pytest.raises(ValueError) as excinfo:
             dataset.split_loc_techs(array, return_as='foo')
         assert check_error_or_warning(
             excinfo,
             '`return_as` must be `DataArray`, `Series`, or `MultiIndex DataArray`'
         )
示例#5
0
def systemwide_levelised_cost(results, model_data, total=False):
    """
    Returns a DataArray with systemwide levelised costs for the given
    results, indexed by techs, carriers and costs if total is False,
    or by carriers and costs if total is True.

    The weight of timesteps is considered when computing levelised costs:

    * costs are already multiplied by weight in the constraints, and not
      further adjusted here.

    * production is not multiplied by weight in the contraints, so scaled
      by weight here to be consistent with costs. CAUTION: this scaling
      is temporary duriing levelised cost computation - the actual
      costs in the results remain untouched.

    Parameters
    ----------
    results : xarray.Dataset
        Model results
    model_data : xarray.Dataset
        Model input data
    total : bool, optional
        If False (default) returns per-technology levelised cost, if True,
        returns overall system-wide levelised cost.

    """
    cost = results["cost"]
    # Here we scale production by timestep weight
    carrier_prod = results["carrier_prod"] * model_data.timestep_weights

    if total:
        cost = split_loc_techs(cost).sum(dim=["locs", "techs"])
        supply_only_carrier_prod = carrier_prod.sel(
            loc_tech_carriers_prod=list(
                model_data.loc_tech_carriers_supply_conversion_all.values
            )
        )
        carrier_prod = split_loc_techs(supply_only_carrier_prod).sum(
            dim=["timesteps", "locs", "techs"]
        )
    else:
        cost = split_loc_techs(cost).sum(dim=["locs"])
        carrier_prod = split_loc_techs(carrier_prod).sum(["timesteps", "locs"])

    levelised_cost = []

    for carrier in carrier_prod["carriers"].values:
        levelised_cost.append(cost / carrier_prod.loc[dict(carriers=carrier)])

    return xr.concat(levelised_cost, dim="carriers")
示例#6
0
 def test_split_loc_tech_to_series(self, example_dataarray):
     formatted_series = dataset.split_loc_techs(example_dataarray,
                                                return_as='Series')
     assert isinstance(formatted_series, pd.Series)
     assert formatted_series.index.names == [
         'costs', 'locs', 'techs', 'timesteps'
     ]
示例#7
0
 def test_split_loc_tech_to_multiindex_dataarray(self, example_dataarray):
     formatted_array = dataset.split_loc_techs(
         example_dataarray, return_as='MultiIndex DataArray')
     assert isinstance(formatted_array, xr.DataArray)
     assert formatted_array.dims == ('timesteps', 'loc_techs_bar', 'costs')
     assert isinstance(formatted_array.loc_techs_bar.to_index(),
                       pd.MultiIndex)
示例#8
0
文件: model.py 项目: awelsz/calliope
    def get_formatted_array(self, var, index_format='index'):
        """
        Return an xr.DataArray with locs, techs, and carriers as
        separate dimensions.

        Parameters
        ----------
        var : str
            Decision variable for which to return a DataArray.
        index_format : str, default = 'index'
            'index' to return the `loc_tech(_carrier)` dimensions as individual
            indexes, 'multiindex' to return them as a MultiIndex. The latter
            has the benefit of having a smaller memory footprint, but you cannot
            undertake dimension specific operations (e.g. formatted_array.sum('locs'))
        """
        if var not in self._model_data.data_vars:
            raise KeyError("Variable {} not in Model data".format(var))

        if index_format not in ['index', 'multiindex']:
            raise ValueError(
                "Argument 'index_format' must be one of 'index' or 'multiindex'"
            )
        elif index_format == 'index':
            return_as = 'DataArray'
        elif index_format == 'multiindex':
            return_as = 'MultiIndex DataArray'

        return split_loc_techs(self._model_data[var], return_as=return_as)
示例#9
0
文件: io.py 项目: awelsz/calliope
def save_csv(model_data, path, dropna=True):
    """
    If termination condition was not optimal, filters inputs only, and
    warns that results will not be saved.

    """
    os.makedirs(path, exist_ok=False)

    # a MILP model which optimises to within the MIP gap, but does not fully
    # converge on the LP relaxation, may return as 'feasible', not 'optimal'
    if ('termination_condition' not in model_data.attrs or
            model_data.attrs['termination_condition'] in ['optimal', 'feasible']):
        data_vars = model_data.data_vars
    else:
        data_vars = model_data.filter_by_attrs(is_result=0).data_vars
        exceptions.warn(
            'Model termination condition was not optimal, saving inputs only.'
        )

    for var in data_vars:
        in_out = 'results' if model_data[var].attrs['is_result'] else 'inputs'
        out_path = os.path.join(path, '{}_{}.csv'.format(in_out, var))
        series = split_loc_techs(model_data[var], return_as='Series')
        if dropna:
            series = series.dropna()
        series.to_csv(out_path, header=True)
示例#10
0
def capacity_factor(results, model_data, systemwide=False):
    """
    Returns a DataArray with capacity factor for the given results.
    The results are either indexed by loc_tech_carriers_prod and timesteps,
    or by techs and carriers if systemwide results are being calculated.

    The weight of timesteps is considered when computing systemwide capacity factors,
    such that higher-weighted timesteps have a stronger influence
    on the resulting system-wide time-averaged capacity factor.

    """
    # In operate mode, energy_cap is an input parameter
    if "energy_cap" not in results.keys():
        energy_cap = model_data.energy_cap
    else:
        energy_cap = results.energy_cap

    _prod = split_loc_techs(results["carrier_prod"])
    _cap = split_loc_techs(energy_cap)
    if systemwide:
        # Aggregated/clustered days are represented `timestep_weights` times
        prod_sum = (_prod * model_data.timestep_weights).sum(
            ["timesteps", "locs"])
        cap_sum = _cap.sum(dim="locs")
        time_sum = (model_data.timestep_resolution *
                    model_data.timestep_weights).sum()
        capacity_factors = prod_sum / (cap_sum * time_sum)

    else:
        extra_dims = {
            i: model_data[i].to_index()
            for i in _prod.dims if i not in _cap.dims
        }
        capacity_factors = ((_prod /
                             _cap.expand_dims(extra_dims)).fillna(0).stack({
                                 "loc_tech_carriers_prod":
                                 ["locs", "techs", "carriers"]
                             }))
        new_idx = concat_iterable(
            capacity_factors.loc_tech_carriers_prod.values, ["::", "::"])
        capacity_factors = capacity_factors.assign_coords({
            "loc_tech_carriers_prod":
            new_idx
        }).reindex({"loc_tech_carriers_prod": results.loc_tech_carriers_prod})

    return capacity_factors
示例#11
0
 def test_split_loc_tech_one_dim_to_multiindex_dataarray(
     self, example_one_dim_dataarray
 ):
     formatted_array = dataset.split_loc_techs(
         example_one_dim_dataarray, return_as="MultiIndex DataArray"
     )
     assert isinstance(formatted_array, xr.DataArray)
     assert formatted_array.dims == ("timesteps",)
示例#12
0
    def _cap_loc_score_default(results, subset=None):
        if subset is None:
            subset = {}
        cap_loc_score = split_loc_techs(results["energy_cap"]).loc[subset]
        cap_loc_score = cap_loc_score.where(cap_loc_score > 1e-3, other=0)
        cap_loc_score = cap_loc_score.where(cap_loc_score == 0, other=100)

        return cap_loc_score.to_pandas()
示例#13
0
文件: io.py 项目: cuixueqin/calliope
def save_csv(model_data, path):
    os.makedirs(path, exist_ok=False)

    for var in model_data.data_vars:
        in_out = 'results' if model_data[var].attrs['is_result'] else 'inputs'
        out_path = os.path.join(path, '{}_{}.csv'.format(in_out, var))
        series = split_loc_techs(model_data[var], as_='Series')
        series.to_csv(out_path)
示例#14
0
def systemwide_capacity_factor(results, model_data):
    """
    Returns a DataArray with systemwide capacity factors for the given
    results, indexed by techs and carriers.

    """
    # In operate mode, energy_cap is an input parameter
    if 'energy_cap' not in results.keys():
        energy_cap = model_data.energy_cap
    else:
        energy_cap = results.energy_cap

    capacity_factors = (split_loc_techs(
        results['carrier_prod']).sum(dim='timesteps').sum(dim='locs') /
                        (split_loc_techs(energy_cap).sum(dim='locs') *
                         model_data.timestep_resolution.sum()))

    return capacity_factors
示例#15
0
def systemwide_levelised_cost(results, model_data, total=False):
    """
    Returns a DataArray with systemwide levelised costs for the given
    results, indexed by techs, carriers and costs if total is False,
    or by carriers and costs if total is True.

    The weight of timesteps is considered when computing levelised costs:

    * costs are already multiplied by weight in the constraints, and not
      further adjusted here.

    * production is not multiplied by weight in the contraints, so scaled
      by weight here to be consistent with costs. CAUTION: this scaling
      is temporary duriing levelised cost computation - the actual
      costs in the results remain untouched.

    Parameters
    ----------
    results : xarray.Dataset
        Model results
    model_data : xarray.Dataset
        Model input data
    total : bool, optional
        If False (default) returns per-technology levelised cost, if True,
        returns overall system-wide levelised cost.

    """
    cost = split_loc_techs(results['cost']).sum(dim='locs')
    carrier_prod = (
        # Here we scale production by timestep weight
        split_loc_techs(results['carrier_prod']) * model_data.timestep_weights
    ).sum(dim='timesteps').sum(dim='locs')

    if total:
        cost = cost.sum(dim='techs')
        carrier_prod = carrier_prod.sum(dim='techs')

    levelised_cost = []

    for carrier in carrier_prod['carriers'].values:
        levelised_cost.append(cost / carrier_prod.loc[dict(carriers=carrier)])

    return xr.concat(levelised_cost, dim='carriers')
示例#16
0
def systemwide_levelised_cost(results, total=False):
    """
    Returns a DataArray with systemwide levelised costs for the given
    results, indexed by techs, carriers and costs if total is False,
    or by carriers and costs if total is True.

    """
    cost = split_loc_techs(results['cost']).sum(dim='locs')
    carrier_prod = split_loc_techs(
        results['carrier_prod'].sum(dim='timesteps')).sum(dim='locs')

    if total:
        cost = cost.sum(dim='techs')
        carrier_prod = carrier_prod.sum(dim='techs')

    levelised_cost = []

    for carrier in carrier_prod['carriers'].values:
        levelised_cost.append(cost / carrier_prod.loc[dict(carriers=carrier)])

    return xr.concat(levelised_cost, dim='carriers')
示例#17
0
    def get_formatted_array(self, var):
        """
        Return an xr.DataArray with locs, techs, and carriers as
        separate dimensions.

        Parameters
        ----------
        var : str
            Decision variable for which to return a DataArray.

        """
        if var not in self._model_data.data_vars:
            raise KeyError("Variable {} not in Model data".format(var))

        return split_loc_techs(self._model_data[var])
示例#18
0
    def get_formatted_array(self, var):
        """
        Return an xr.DataArray with locs, techs, and carriers as
        separate dimensions.

        Parameters
        ----------
        var : str
            Decision variable for which to return a DataArray.

        """
        if var not in self._model_data.data_vars:
            raise KeyError("Variable {} not in Model data".format(var))

        return split_loc_techs(self._model_data[var])
示例#19
0
def _get_array(data, var, tech, **kwargs):
    subset = {'techs':tech}
    if kwargs is not None:
        subset.update({k:v for k, v in kwargs.items()})
    unusable_dims = (set(subset.keys())
                        .difference(["techs", "locs"])
                        .difference(data[var].dims)
                    )
    if unusable_dims:
        raise exceptions.ModelError("attempting to mask time based on "
                                    "technology {}, but dimension(s) "
                                    "{} don't exist for parameter {}".format(
                                        tech, unusable_dims, var.name))
    arr = split_loc_techs(data[var].copy()).loc[subset]
    arr = arr.mean(dim=[i for i in arr.dims if i is not 'timesteps']).to_pandas()
    return arr
示例#20
0
def _get_array(data, var, tech, **kwargs):
    subset = {'techs': tech}
    if kwargs is not None:
        subset.update({k: v for k, v in kwargs.items()})

    unusable_dims = (
        set(subset.keys())
        .difference(["techs", "locs"])
        .difference(data[var].dims)
    )
    if unusable_dims:
        raise exceptions.ModelError(
            'Attempting to mask time based on  technology {}, '
            'but dimension(s) {} do not exist for parameter {}'.format(
                tech, unusable_dims, var.name)
        )

    arr = split_loc_techs(data[var].copy()).loc[subset]
    arr = arr.mean(dim=[i for i in arr.dims if i is not 'timesteps']).to_pandas()
    return arr
示例#21
0
def save_csv(model_data, path, dropna=True):
    """
    If termination condition was not optimal, filters inputs only, and
    warns that results will not be saved.

    """
    os.makedirs(path, exist_ok=False)

    if ('termination_condition' not in model_data.attrs
            or model_data.attrs['termination_condition'] == 'optimal'):
        data_vars = model_data.data_vars
    else:
        data_vars = model_data.filter_by_attrs(is_result=0).data_vars
        exceptions.warn(
            'Model termination condition was not optimal, saving inputs only.')

    for var in data_vars:
        in_out = 'results' if model_data[var].attrs['is_result'] else 'inputs'
        out_path = os.path.join(path, '{}_{}.csv'.format(in_out, var))
        series = split_loc_techs(model_data[var], as_='Series')
        if dropna:
            series = series.dropna()
        series.to_csv(out_path)
示例#22
0
def save_csv(model_data, path, dropna=True):
    """
    If termination condition was not optimal, filters inputs only, and
    warns that results will not be saved.

    """
    os.makedirs(path, exist_ok=False)

    if ('termination_condition' not in model_data.attrs or
            model_data.attrs['termination_condition'] == 'optimal'):
        data_vars = model_data.data_vars
    else:
        data_vars = model_data.filter_by_attrs(is_result=0).data_vars
        exceptions.warn(
            'Model termination condition was not optimal, saving inputs only.'
        )

    for var in data_vars:
        in_out = 'results' if model_data[var].attrs['is_result'] else 'inputs'
        out_path = os.path.join(path, '{}_{}.csv'.format(in_out, var))
        series = split_loc_techs(model_data[var], as_='Series')
        if dropna:
            series = series.dropna()
        series.to_csv(out_path)
示例#23
0
 def test_split_loc_tech_to_dataarray(self, example_dataarray):
     formatted_array = dataset.split_loc_techs(example_dataarray)
     assert isinstance(formatted_array, xr.DataArray)
     assert formatted_array.dims == ('costs', 'locs', 'techs', 'timesteps')
示例#24
0
 def test_split_loc_tech_one_dim_to_dataarray(self, example_one_dim_dataarray):
     formatted_array = dataset.split_loc_techs(example_one_dim_dataarray)
     assert isinstance(formatted_array, xr.DataArray)
     assert formatted_array.dims == ("timesteps",)
示例#25
0
 def test_split_loc_tech_to_series(self, example_dataarray):
     formatted_series = dataset.split_loc_techs(
         example_dataarray, return_as="Series"
     )
     assert isinstance(formatted_series, pd.Series)
     assert formatted_series.index.names == ["costs", "locs", "techs", "timesteps"]