コード例 #1
0
ファイル: model.py プロジェクト: suvayu/calliope
    def _init_from_model_data(self, model_data):
        if "_model_run" in model_data.attrs:
            self._model_run = AttrDict.from_yaml_string(
                model_data.attrs["_model_run"])
            del model_data.attrs["_model_run"]

        if "_debug_data" in model_data.attrs:
            self._debug_data = AttrDict.from_yaml_string(
                model_data.attrs["_debug_data"])
            del model_data.attrs["_debug_data"]

        self._model_data = model_data
        self.inputs = self._model_data.filter_by_attrs(is_result=0)
        self.model_config = UpdateObserverDict(
            initial_yaml_string=model_data.attrs.get("model_config", "{}"),
            name="model_config",
            observer=self._model_data,
        )
        self.run_config = UpdateObserverDict(
            initial_yaml_string=model_data.attrs.get("run_config", "{}"),
            name="run_config",
            observer=self._model_data,
        )

        results = self._model_data.filter_by_attrs(is_result=1)
        if len(results.data_vars) > 0:
            self.results = results
        log_time(
            logger,
            self._timings,
            "model_data_loaded",
            comment="Model: loaded model_data",
        )
コード例 #2
0
ファイル: model.py プロジェクト: awelsz/calliope
    def _init_from_model_data(self, model_data):
        if '_model_run' in model_data.attrs:
            self._model_run = AttrDict.from_yaml_string(
                model_data.attrs['_model_run'])
            del model_data.attrs['_model_run']

        if '_debug_data' in model_data.attrs:
            self._debug_data = AttrDict.from_yaml_string(
                model_data.attrs['_debug_data'])
            del model_data.attrs['_debug_data']

        self._model_data = model_data
        self.inputs = self._model_data.filter_by_attrs(is_result=0)
        self.model_config = UpdateObserverDict(
            initial_yaml_string=model_data.attrs.get('model_config', '{}'),
            name='model_config',
            observer=self._model_data)
        self.run_config = UpdateObserverDict(
            initial_yaml_string=model_data.attrs.get('run_config', '{}'),
            name='run_config',
            observer=self._model_data)

        results = self._model_data.filter_by_attrs(is_result=1)
        if len(results.data_vars) > 0:
            self.results = results
        log_time(logger,
                 self._timings,
                 'model_data_loaded',
                 comment='Model: loaded model_data')
コード例 #3
0
ファイル: model.py プロジェクト: FraSanvit/calliope
    def _add_model_data_methods(self):
        self.inputs = self._model_data.filter_by_attrs(is_result=0)
        self.results = self._model_data.filter_by_attrs(is_result=1)
        self.model_config = UpdateObserverDict(
            initial_yaml_string=self._model_data.attrs.get("model_config", "{}"),
            name="model_config",
            observer=self._model_data,
        )
        self.run_config = UpdateObserverDict(
            initial_yaml_string=self._model_data.attrs.get("run_config", "{}"),
            name="run_config",
            observer=self._model_data,
        )
        self.subsets = UpdateObserverDict(
            initial_yaml_string=self._model_data.attrs.get("subsets", "{}"),
            name="subsets",
            observer=self._model_data,
            flat=True,
        )

        results = self._model_data.filter_by_attrs(is_result=1)
        if len(results.data_vars) > 0:
            self.results = results
        log_time(
            logger,
            self._timings,
            "model_data_loaded",
            comment="Model: loaded model_data",
        )
コード例 #4
0
ファイル: results.py プロジェクト: brmanuel/calliope
def postprocess_model_results(results, model_data, timings):
    """
    Adds additional post-processed result variables to
    the given model results in-place. Model must have solved successfully.

    Parameters
    ----------
    results : xarray Dataset
        Output from the solver backend
    model_data : xarray Dataset
        Calliope model data, stored as calliope.Model()._model_data
    timings : dict
        Calliope timing dictionary, stored as calliope.Model()._timings

    Returns
    -------
    results : xarray Dataset
        Input results Dataset, with additional DataArray variables and removed
        all instances of unreasonably low numbers (set by zero_threshold)

    """
    log_time(logger,
             timings,
             "post_process_start",
             comment="Postprocessing: started")

    if model_data.attrs['scale']:
        scale(model_data, lambda x: 1 / x)
        results['scale'] = model_data['scale']
        scale(results, lambda x: 1 / x)

    run_config = AttrDict.from_yaml_string(model_data.attrs["run_config"])
    results["capacity_factor"] = capacity_factor(results, model_data)
    results["systemwide_capacity_factor"] = systemwide_capacity_factor(
        results, model_data)
    results["systemwide_levelised_cost"] = systemwide_levelised_cost(
        results, model_data)
    results["total_levelised_cost"] = systemwide_levelised_cost(results,
                                                                model_data,
                                                                total=True)
    results = clean_results(results, run_config.get("zero_threshold", 0),
                            timings)

    log_time(
        logger,
        timings,
        "post_process_end",
        time_since_run_start=True,
        comment="Postprocessing: ended",
    )

    if "run_solution_returned" in timings.keys():
        results.attrs["solution_time"] = (
            timings["run_solution_returned"] -
            timings["run_start"]).total_seconds()
        results.attrs["time_finished"] = timings[
            "run_solution_returned"].strftime("%Y-%m-%d %H:%M:%S")

    return results
コード例 #5
0
def postprocess_model_results(results, model_data, timings):
    """
    Adds additional post-processed result variables to
    the given model results in-place. Model must have solved successfully.

    Parameters
    ----------
    results : xarray Dataset
        Output from the solver backend
    model_data : xarray Dataset
        Calliope model data, stored as calliope.Model()._model_data
    timings : dict
        Calliope timing dictionary, stored as calliope.Model()._timings

    Returns
    -------
    results : xarray Dataset
        Input results Dataset, with additional DataArray variables and removed
        all instances of unreasonably low numbers (set by zero_threshold)

    """
    log_time(logger,
             timings,
             'post_process_start',
             comment='Postprocessing: started')

    run_config = AttrDict.from_yaml_string(model_data.attrs['run_config'])
    results['capacity_factor'] = capacity_factor(results, model_data)
    results['systemwide_capacity_factor'] = systemwide_capacity_factor(
        results, model_data)
    results['systemwide_levelised_cost'] = systemwide_levelised_cost(
        results, model_data)
    results['total_levelised_cost'] = systemwide_levelised_cost(results,
                                                                model_data,
                                                                total=True)
    results = clean_results(results, run_config.get('zero_threshold', 0),
                            timings)

    log_time(logger,
             timings,
             'post_process_end',
             time_since_run_start=True,
             comment='Postprocessing: ended')

    if 'run_solution_returned' in timings.keys():
        results.attrs['solution_time'] = (
            timings['run_solution_returned'] -
            timings['run_start']).total_seconds()
        results.attrs['time_finished'] = (
            timings['run_solution_returned'].strftime('%Y-%m-%d %H:%M:%S'))

    return results
コード例 #6
0
    def _init_from_model_data(self, model_data):
        self._model_run = None
        self._debug_data = None
        self._model_data = model_data
        self.inputs = self._model_data.filter_by_attrs(is_result=0)

        results = self._model_data.filter_by_attrs(is_result=1)
        if len(results.data_vars) > 0:
            self.results = results
        log_time(self._timings,
                 'model_data_loaded',
                 comment='Model: loaded model_data',
                 time_since_start=True)
コード例 #7
0
ファイル: model.py プロジェクト: brynpickering/calliope
    def _init_from_model_data(self, model_data):
        self._model_run = None
        self._debug_data = None
        self._model_data = model_data
        self.inputs = self._model_data.filter_by_attrs(is_result=0)

        results = self._model_data.filter_by_attrs(is_result=1)
        if len(results.data_vars) > 0:
            self.results = results
        log_time(
            self._timings, 'model_data_loaded',
            comment='Model: loaded model_data',
            time_since_start=True
        )
コード例 #8
0
ファイル: results.py プロジェクト: wroldwiedbwe/calliope
def clean_results(results, zero_threshold, timings):
    """
    Remove unreasonably small values (solver output can lead to floating point
    errors) and remove unmet_demand if it was never used (i.e. sum = zero)

    zero_threshold is a value set in model configuration. If not set, defaults
    to zero (i.e. doesn't do anything). Reasonable value = 1e-12
    """
    threshold_applied = []
    for k, v in results.data_vars.items():
        # If there are any values in the data variable which fall below the
        # threshold, note the data variable name and set those values to zero
        if v.where(abs(v) < zero_threshold, drop=True).sum():
            threshold_applied.append(k)
            with np.errstate(invalid="ignore"):
                v.values[abs(v.values) < zero_threshold] = 0
            v.loc[{}] = v.values

    if threshold_applied:
        comment = "All values < {} set to 0 in {}".format(
            zero_threshold, ", ".join(threshold_applied))
    else:
        comment = "zero threshold of {} not required".format(zero_threshold)

    log_time(logger,
             timings,
             "threshold_applied",
             comment="Postprocessing: " + comment)

    # Combine unused_supply and unmet_demand into one variable
    if ("unmet_demand" in results.data_vars.keys()
            or "unused_supply" in results.data_vars.keys()):
        results["unmet_demand"] = results.get("unmet_demand", 0) + results.get(
            "unused_supply", 0)

        results = results.drop_vars("unused_supply")

        if not results.unmet_demand.sum():

            log_time(
                logger,
                timings,
                "delete_unmet_demand",
                comment=
                "Postprocessing: Model was feasible, deleting unmet_demand variable",
            )
            results = results.drop_vars("unmet_demand")

    return results
コード例 #9
0
ファイル: interface.py プロジェクト: awelsz/calliope
def rerun_pyomo_model(model_data, backend_model):
    """
    Rerun the Pyomo backend, perhaps after updating a parameter value,
    (de)activating a constraint/objective or updating run options in the model
    model_data object (e.g. `run.solver`).

    Returns
    -------
    run_data : xarray.Dataset
        Raw data from this rerun, including both inputs and results.
        to filter inputs/results, use `run_data.filter_by_attrs(is_result=...)`
        with 0 for inputs and 1 for results.
    """
    backend_model.__calliope_run_config = AttrDict.from_yaml_string(model_data.attrs['run_config'])

    if backend_model.__calliope_run_config['mode'] != 'plan':
        raise exceptions.ModelError(
            'Cannot rerun the backend in {} run mode. Only `plan` mode is '
            'possible.'.format(backend_model.__calliope_run_config['mode'])
        )

    timings = {}
    log_time(logger, timings, 'model_creation')

    results, backend_model = backend_run.run_plan(
        model_data, timings, run_pyomo,
        build_only=False, backend_rerun=backend_model
    )
    for k, v in timings.items():
        results.attrs['timings.' + k] = v

    exceptions.ModelWarning(
        'model.results will only be updated on running the model from '
        '`model.run()`. We provide results of this rerun as a standalone xarray '
        'Dataset'
    )

    results.attrs.update(model_data.attrs)
    for key, var in results.data_vars.items():
        var.attrs['is_result'] = 1

    inputs = access_pyomo_model_inputs(backend_model)
    for key, var in inputs.data_vars.items():
        var.attrs['is_result'] = 0

    results.update(inputs)
    run_data = results

    return run_data
コード例 #10
0
ファイル: interface.py プロジェクト: brynpickering/calliope
def rerun_pyomo_model(model_data, backend_model):
    """
    Rerun the Pyomo backend, perhaps after updating a parameter value,
    (de)activating a constraint/objective or updating run options in the model
    model_data object (e.g. `run.solver`).

    Returns
    -------
    run_data : xarray.Dataset
        Raw data from this rerun, including both inputs and results.
        to filter inputs/results, use `run_data.filter_by_attrs(is_result=...)`
        with 0 for inputs and 1 for results.
    """

    if model_data.attrs['run.mode'] != 'plan':
        raise exceptions.ModelError(
            'Cannot rerun the backend in {} run mode. Only `plan` mode is '
            'possible.'.format(model_data.attrs['run.mode'])
        )

    timings = {}
    log_time(timings, 'model_creation')

    results, backend_model = backend_run.run_plan(
        model_data, timings, run_pyomo,
        build_only=False, backend_rerun=backend_model
    )
    for k, v in timings.items():
        results.attrs['timings.' + k] = v

    exceptions.ModelWarning(
        'model.results will only be updated on running the model from '
        '`model.run()`. We provide results of this rerun as a standalone xarray '
        'Dataset'
    )

    results.attrs.update(model_data.attrs)
    for key, var in results.data_vars.items():
        var.attrs['is_result'] = 1

    inputs = access_pyomo_model_inputs(backend_model)
    for key, var in inputs.data_vars.items():
        var.attrs['is_result'] = 0

    results.update(inputs)
    run_data = results

    return run_data
コード例 #11
0
ファイル: postprocess.py プロジェクト: brynpickering/calliope
def clean_results(results, zero_threshold, timings):
    """
    Remove unreasonably small values (solver output can lead to floating point
    errors) and remove unmet_demand if it was never used (i.e. sum = zero)

    zero_threshold is a value set in model configuration. If not set, defaults
    to zero (i.e. doesn't do anything). Reasonable value = 1e-12
    """
    threshold_applied = []
    for k, v in results.data_vars.items():
        # If there are any values in the data variable which fall below the
        # threshold, note the data variable name and set those values to zero
        if v.where(abs(v) < zero_threshold, drop=True).sum():
            threshold_applied.append(k)
            with np.errstate(invalid='ignore'):
                v.values[abs(v.values) < zero_threshold] = 0
            v.loc[{}] = v.values

    if threshold_applied:
        comment = 'All values < {} set to 0 in {}'.format(zero_threshold, ', '.join(threshold_applied))
    else:
        comment = 'zero threshold of {} not required'.format(zero_threshold)

    log_time(
        timings, 'threshold_applied',
        comment='Postprocessing: ' + comment
    )

    # Combine unused_supply and unmet_demand into one variable
    if ('unmet_demand' in results.data_vars.keys() or
            'unused_supply' in results.data_vars.keys()):
        results['unmet_demand'] = (
            results.get('unmet_demand', 0) + results.get('unused_supply', 0)
        )

        results = results.drop('unused_supply')

        if not results.unmet_demand.sum():

            log_time(
                timings, 'delete_unmet_demand',
                comment='Postprocessing: Model was feasible, deleting unmet_demand variable'
            )
            results = results.drop('unmet_demand')

    return results
コード例 #12
0
ファイル: model.py プロジェクト: wroldwiedbwe/calliope
    def __init__(self, config, model_data=None, debug=False, *args, **kwargs):
        """
        Returns a new Model from either the path to a YAML model
        configuration file or a dict fully specifying the model.

        Parameters
        ----------
        config : str or dict or AttrDict
            If str, must be the path to a model configuration file.
            If dict or AttrDict, must fully specify the model.
        model_data : Dataset, optional
            Create a Model instance from a fully built model_data Dataset.
            This is only used if `config` is explicitly set to None
            and is primarily used to re-create a Model instance from
            a model previously saved to a NetCDF file.

        """
        self._timings = {}
        # try to set logging output format assuming python interactive. Will
        # use CLI logging format if model called from CLI
        log_time(logger,
                 self._timings,
                 "model_creation",
                 comment="Model: initialising")
        if isinstance(config, str):
            model_run, debug_data = model_run_from_yaml(
                config, *args, **kwargs)
            self._init_from_model_run(model_run, debug_data, debug)
        elif isinstance(config, dict):
            model_run, debug_data = model_run_from_dict(
                config, *args, **kwargs)
            self._init_from_model_run(model_run, debug_data, debug)
        elif model_data is not None and config is None:
            self._init_from_model_data(model_data)
        else:
            # expected input is a string pointing to a YAML file of the run
            # configuration or a dict/AttrDict in which the run and model
            # configurations are defined
            raise ValueError(
                "Input configuration must either be a string or a dictionary.")
        self._check_future_deprecation_warnings()

        self.plot = plotting.ModelPlotMethods(self)
コード例 #13
0
ファイル: model.py プロジェクト: FraSanvit/calliope
    def _init_from_model_data(self, model_data):
        if "_model_run" in model_data.attrs:
            self._model_run = AttrDict.from_yaml_string(model_data.attrs["_model_run"])
            del model_data.attrs["_model_run"]

        if "_debug_data" in model_data.attrs:
            self._debug_data = AttrDict.from_yaml_string(
                model_data.attrs["_debug_data"]
            )
            del model_data.attrs["_debug_data"]

        self._model_data = model_data
        self._add_model_data_methods()

        log_time(
            logger,
            self._timings,
            "model_data_loaded",
            comment="Model: loaded model_data",
        )
コード例 #14
0
ファイル: postprocess.py プロジェクト: knut0815/calliope
def postprocess_model_results(results, model_data, timings):
    """
    Adds additional post-processed result variables to
    the given model results in-place. Model must have solved successfully.

    Parameters
    ----------
    results : xarray Dataset
        Output from the solver backend
    model_data : xarray Dataset
        Calliope model data, stored as calliope.Model()._model_data
    timings : dict
        Calliope timing dictionary, stored as calliope.Model()._timings

    Returns
    -------
    results : xarray Dataset
        Input results Dataset, with additional DataArray variables and removed
        all instances of unreasonably low numbers (set by zero_threshold)

    """
    log_time(timings, 'post_process_start', comment='Postprocessing: started')

    results['capacity_factor'] = capacity_factor(results, model_data)
    results['systemwide_capacity_factor'] = systemwide_capacity_factor(
        results, model_data)
    results['systemwide_levelised_cost'] = systemwide_levelised_cost(
        results, model_data)
    results['total_levelised_cost'] = systemwide_levelised_cost(results,
                                                                model_data,
                                                                total=True)
    results = clean_results(results,
                            model_data.attrs.get('run.zero_threshold', 0),
                            timings)

    log_time(timings,
             'post_process_end',
             time_since_start=True,
             comment='Postprocessing: ended')

    return results
コード例 #15
0
ファイル: postprocess.py プロジェクト: mlgarchery/calliope
def clean_results(results, zero_threshold, timings):
    """
    Remove unreasonably small values (solver output can lead to floating point
    errors) and remove unmet_demand if it was never used (i.e. sum = zero)

    zero_threshold is a value set in model configuration. If not set, defaults
    to zero (i.e. doesn't do anything). Reasonable value = 1e-12
    """
    threshold_applied = []
    for k, v in results.data_vars.items():
        # If there are any values in the data variable which fall below the
        # threshold, note the data variable name and set those values to zero
        if v.where(abs(v) < zero_threshold, drop=True).sum():
            threshold_applied.append(k)
            with np.errstate(invalid='ignore'):
                v.values[abs(v.values) < zero_threshold] = 0
            v.loc[{}] = v.values

    if threshold_applied:
        comment = 'All values < {} set to 0 in {}'.format(
            zero_threshold, ', '.join(threshold_applied))
    else:
        comment = 'zero threshold of {} not required'.format(zero_threshold)

    log_time(timings,
             'threshold_applied',
             comment='Postprocessing: ' + comment)

    if 'unmet_demand' in results.data_vars.keys(
    ) and not results.unmet_demand.sum():

        log_time(
            timings,
            'delete_unmet_demand',
            comment=
            'Postprocessing: Model was feasible, deleting unmet_demand variable'
        )
        results = results.drop('unmet_demand')

    return results
コード例 #16
0
    def _init_from_model_run(self, model_run, debug_data):
        self._model_run = model_run
        self._debug_data = debug_data
        log_time(self._timings,
                 'model_run_creation',
                 comment='Model: preprocessing stage 1 (model_run)')

        self._model_data_original = build_model_data(model_run)
        log_time(self._timings,
                 'model_data_original_creation',
                 comment='Model: preprocessing stage 2 (model_data)')

        random_seed = self._model_run.get_key('model.random_seed', None)
        if random_seed:
            np.random.seed(seed=random_seed)

        # After setting the random seed, time clustering can take place
        time_config = model_run.model.get('time', None)
        if not time_config:
            _model_data = self._model_data_original
        else:
            _model_data = apply_time_clustering(self._model_data_original,
                                                model_run)
        self._model_data = final_timedimension_processing(_model_data)
        log_time(self._timings,
                 'model_data_creation',
                 comment='Model: preprocessing complete',
                 time_since_start=True)

        for var in self._model_data.data_vars:
            self._model_data[var].attrs['is_result'] = 0
        self.inputs = self._model_data.filter_by_attrs(is_result=0)
コード例 #17
0
ファイル: model.py プロジェクト: brynpickering/calliope
    def _init_from_model_run(self, model_run, debug_data):
        self._model_run = model_run
        self._debug_data = debug_data
        log_time(self._timings, 'model_run_creation', comment='Model: preprocessing stage 1 (model_run)')

        self._model_data_original = build_model_data(model_run)
        log_time(self._timings, 'model_data_original_creation', comment='Model: preprocessing stage 2 (model_data)')

        random_seed = self._model_run.get_key('model.random_seed', None)
        if random_seed:
            np.random.seed(seed=random_seed)

        # After setting the random seed, time clustering can take place
        time_config = model_run.model.get('time', None)
        if not time_config:
            _model_data = self._model_data_original
        else:
            _model_data = apply_time_clustering(
                self._model_data_original, model_run
            )
        self._model_data = final_timedimension_processing(_model_data)
        log_time(
            self._timings, 'model_data_creation',
            comment='Model: preprocessing complete',
            time_since_start=True
        )

        for var in self._model_data.data_vars:
            self._model_data[var].attrs['is_result'] = 0
        self.inputs = self._model_data.filter_by_attrs(is_result=0)
コード例 #18
0
ファイル: model.py プロジェクト: brynpickering/calliope
    def __init__(self, config, model_data=None, *args, **kwargs):
        """
        Returns a new Model from either the path to a YAML model
        configuration file or a dict fully specifying the model.

        Parameters
        ----------
        config : str or dict or AttrDict
            If str, must be the path to a model configuration file.
            If dict or AttrDict, must fully specify the model.
        model_data : Dataset, optional
            Create a Model instance from a fully built model_data Dataset.
            This is only used if `config` is explicitly set to None
            and is primarily used to re-create a Model instance from
            a model previously saved to a NetCDF file.

        """
        self._timings = {}
        # try to set logging output format assuming python interactive. Will
        # use CLI logging format if model called from CLI
        log_time(self._timings, 'model_creation', comment='Model: initialising')
        if isinstance(config, str):
            model_run, debug_data = model_run_from_yaml(config, *args, **kwargs)
            self._init_from_model_run(model_run, debug_data)
        elif isinstance(config, dict):
            model_run, debug_data = model_run_from_dict(config, *args, **kwargs)
            self._init_from_model_run(model_run, debug_data)
        elif model_data is not None and config is None:
            self._init_from_model_data(model_data)
        else:
            # expected input is a string pointing to a YAML file of the run
            # configuration or a dict/AttrDict in which the run and model
            # configurations are defined
            raise ValueError(
                'Input configuration must either be a string or a dictionary.'
            )
        check_future_deprecation_warnings(self._model_run, self._model_data)

        self.plot = plotting.ModelPlotMethods(self)
コード例 #19
0
ファイル: postprocess.py プロジェクト: brynpickering/calliope
def postprocess_model_results(results, model_data, timings):
    """
    Adds additional post-processed result variables to
    the given model results in-place. Model must have solved successfully.

    Parameters
    ----------
    results : xarray Dataset
        Output from the solver backend
    model_data : xarray Dataset
        Calliope model data, stored as calliope.Model()._model_data
    timings : dict
        Calliope timing dictionary, stored as calliope.Model()._timings

    Returns
    -------
    results : xarray Dataset
        Input results Dataset, with additional DataArray variables and removed
        all instances of unreasonably low numbers (set by zero_threshold)

    """
    log_time(
        timings, 'post_process_start',
        comment='Postprocessing: started'
    )

    results['capacity_factor'] = capacity_factor(results, model_data)
    results['systemwide_capacity_factor'] = systemwide_capacity_factor(results, model_data)
    results['systemwide_levelised_cost'] = systemwide_levelised_cost(results, model_data)
    results['total_levelised_cost'] = systemwide_levelised_cost(results, model_data, total=True)
    results = clean_results(results, model_data.attrs.get('run.zero_threshold', 0), timings)

    log_time(
        timings, 'post_process_end', time_since_start=True,
        comment='Postprocessing: ended'
    )

    return results
コード例 #20
0
ファイル: model.py プロジェクト: wroldwiedbwe/calliope
    def _init_from_model_run(self, model_run, debug_data, debug):
        self._model_run = model_run
        log_time(
            logger,
            self._timings,
            "model_run_creation",
            comment="Model: preprocessing stage 1 (model_run)",
        )

        model_data_factory = ModelDataFactory(model_run)
        (
            model_data_pre_clustering,
            model_data,
            data_pre_time,
            stripped_keys,
        ) = model_data_factory()

        self._model_data_pre_clustering = model_data_pre_clustering
        self._model_data = model_data
        if debug:
            self._debug_data = debug_data
            self._model_data_pre_time = data_pre_time
            self._model_data_stripped_keys = stripped_keys
        self.inputs = self._model_data.filter_by_attrs(is_result=0)
        log_time(
            logger,
            self._timings,
            "model_data_original_creation",
            comment="Model: preprocessing stage 2 (model_data)",
        )

        # Ensure model and run attributes of _model_data update themselves
        model_config = {
            k: v
            for k, v in model_run.get("model", {}).items()
            if k != "file_allowed"
        }
        self.model_config = UpdateObserverDict(initial_dict=model_config,
                                               name="model_config",
                                               observer=self._model_data)
        self.run_config = UpdateObserverDict(
            initial_dict=model_run.get("run", {}),
            name="run_config",
            observer=self._model_data,
        )
        self.subsets = UpdateObserverDict(
            initial_dict=model_run.get("subsets").as_dict_flat(),
            name="subsets",
            observer=self._model_data,
        )

        log_time(
            logger,
            self._timings,
            "model_data_creation",
            comment="Model: preprocessing complete",
        )
コード例 #21
0
ファイル: model.py プロジェクト: suvayu/calliope
    def _init_from_model_run(self, model_run, debug_data):
        self._model_run = model_run
        self._debug_data = debug_data
        log_time(
            logger,
            self._timings,
            "model_run_creation",
            comment="Model: preprocessing stage 1 (model_run)",
        )

        self._model_data_original = build_model_data(model_run)
        log_time(
            logger,
            self._timings,
            "model_data_original_creation",
            comment="Model: preprocessing stage 2 (model_data)",
        )

        random_seed = self._model_run.get_key("model.random_seed", None)
        if random_seed:
            np.random.seed(seed=random_seed)

        # After setting the random seed, time clustering can take place
        time_config = model_run.model.get("time", None)
        if not time_config:
            _model_data = self._model_data_original
        else:
            _model_data = apply_time_clustering(self._model_data_original,
                                                model_run)
        self._model_data = final_timedimension_processing(_model_data)
        log_time(
            logger,
            self._timings,
            "model_data_creation",
            comment="Model: preprocessing complete",
        )

        # Ensure model and run attributes of _model_data update themselves
        for var in self._model_data.data_vars:
            self._model_data[var].attrs["is_result"] = 0
        self.inputs = self._model_data.filter_by_attrs(is_result=0)

        model_config = {
            k: v
            for k, v in model_run.get("model", {}).items()
            if k != "file_allowed"
        }
        self.model_config = UpdateObserverDict(initial_dict=model_config,
                                               name="model_config",
                                               observer=self._model_data)
        self.run_config = UpdateObserverDict(
            initial_dict=model_run.get("run", {}),
            name="run_config",
            observer=self._model_data,
        )
コード例 #22
0
    def test_timing_log(self):
        timings = {'model_creation': datetime.datetime.now()}

        # TODO: capture logging output and check that comment is in string
        log_time(timings, 'test', comment='test_comment', level='info')
        assert isinstance(timings['test'], datetime.datetime)

        log_time(timings, 'test2', comment=None, level='info')
        assert isinstance(timings['test2'], datetime.datetime)

        # TODO: capture logging output and check that time_since_start is in the string
        log_time(timings,
                 'test',
                 comment=None,
                 level='info',
                 time_since_start=True)
コード例 #23
0
ファイル: test_core_util.py プロジェクト: FraSanvit/calliope
    def test_timing_log(self):
        timings = {"model_creation": datetime.datetime.now()}
        logger = logging.getLogger("calliope.testlogger")

        # TODO: capture logging output and check that comment is in string
        log_time(logger, timings, "test", comment="test_comment", level="info")
        assert isinstance(timings["test"], datetime.datetime)

        log_time(logger, timings, "test2", comment=None, level="info")
        assert isinstance(timings["test2"], datetime.datetime)

        # TODO: capture logging output and check that time_since_run_start is in the string
        log_time(
            logger,
            timings,
            "test",
            comment=None,
            level="info",
            time_since_run_start=True,
        )
コード例 #24
0
ファイル: run.py プロジェクト: FraSanvit/calliope
def run_spores(
    model_data,
    run_config,
    timings,
    interface,
    backend,
    build_only,
    backend_rerun=False,
    opt=None,
):
    """
    For use when mode is 'spores', to allow the model to be built, edited, and
    iteratively run within Pyomo, modifying, at each iteration, the score of
    each location-technology combination in such a way to generate
    Spatially explicit Practically Optimal RESults (SPORES).

    """
    log_time(
        logger,
        timings,
        "run_start",
        comment="Backend: starting model run in SPORES mode",
    )

    def _cap_loc_score_default(results, subset=None):
        if subset is None:
            subset = {}
        # Define default scoring function, based on integer scoring method
        # TODO: make it possible to point to a custom function instead of this one
        cap_loc_score = xr.where(results.energy_cap > 1e-3, 100, 0)
        return cap_loc_score.to_series()[subset]

    # Define function to update "spores_score" after each iteration of the method
    def _update_spores_score(backend_model, cap_loc_score):
        print("Updating node-technology spores scores")
        cap_loc_score_dict = cap_loc_score.to_dict()

        interface.update_pyomo_param(backend_model, opt, "cost_energy_cap",
                                     cap_loc_score_dict)

    def _warn_on_infeasibility():
        return exceptions.warn(
            "Infeasible SPORE detected. Please check your model configuration. "
            "No more SPORES will be generated.")

    def _get_updated_spores_inputs(backend_model):
        inputs = interface.access_pyomo_model_inputs(backend_model)
        inputs_to_keep = ["cost_energy_cap"]
        return inputs[inputs_to_keep]

    def _combine_spores_results_and_inputs(backend_model,
                                           results,
                                           spore_num,
                                           model_data=None):
        inputs_to_keep = _get_updated_spores_inputs(backend_model)
        for var_data in results.data_vars.values():
            if "is_result" not in var_data.attrs.keys():
                var_data.attrs["is_result"] = 1
        if model_data is not None:
            datasets = [inputs_to_keep, model_data, results]
        else:
            datasets = [inputs_to_keep, results]
        new_ds = xr.combine_by_coords(datasets,
                                      compat="override",
                                      combine_attrs="no_conflicts")
        return new_ds.assign_coords(spores=("spores", [spore_num]))

    def _add_results_to_list(backend_model, spores_results, results,
                             spore_num):
        results_to_add = _combine_spores_results_and_inputs(
            backend_model, results, spore_num)
        spores_results[spore_num] = results_to_add

    def _save_spore(backend_model, results, spore_num, model_data=None):
        _path = spores_config["save_per_spore_path"].format(spore_num)
        new_ds = _combine_spores_results_and_inputs(backend_model, results,
                                                    spore_num, model_data)
        print(f"Saving SPORE {spore_num} to {_path}")
        io.save_netcdf(new_ds, _path)

    def _update_slack_cost_constraint(backend_model):
        slack_costs = model_data.group_cost_max.loc[{
            "group_names_cost_max":
            spores_config["slack_cost_group"]
        }].dropna("costs")
        interface.update_pyomo_param(
            backend_model,
            opt,
            "group_cost_max",
            {(_cost_class, spores_config["slack_cost_group"]):
             results.cost.loc[{
                 "costs": _cost_class
             }].sum().item() * (1 + spores_config["slack"])
             for _cost_class in slack_costs.costs.values},
        )

    def _update_to_spores_objective(backend_model):
        interface.update_pyomo_param(
            backend_model,
            opt,
            "objective_cost_class",
            spores_config["objective_cost_class"],
        )

    def _initialise_backend_model():
        if backend_rerun:
            kwargs = {"backend_rerun": backend_rerun, "opt": opt}
        else:
            kwargs = {}
        if spores_config["skip_cost_op"]:
            print(
                "Skipping cost optimal run and using model_data to initialise SPORES directly"
            )
            return run_plan(model_data,
                            run_config,
                            timings,
                            backend,
                            build_only=True,
                            **kwargs)
        else:
            # Run once for the 'cost-optimal' solution
            return run_plan(
                model_data,
                run_config,
                timings,
                backend,
                build_only,
                persistent=False,
                **kwargs,
            )

    def _initialise_spores_number():
        if "spores" in model_data.coords and spores_config["skip_cost_op"]:
            return model_data.spores.max().item()
        else:
            return 0

    def _error_on_malformed_input():
        if backend_rerun:
            try:
                backend_rerun.obj()
            except ValueError:  # model has not yet been run
                pass
            else:
                raise exceptions.ModelError(
                    "Cannot run SPORES if the backend model already has a solution. "
                    "Consider using the `build_only` optional `run()` argument to avoid this."
                )
        if "spores" in model_data.filter_by_attrs(is_result=0).squeeze().dims:
            raise exceptions.ModelError(
                "Cannot run SPORES with a SPORES dimension in any input (e.g. `cost_energy_cap`)."
            )

    _error_on_malformed_input()

    if backend_rerun:
        model_data = _combine_spores_results_and_inputs(
            backend_rerun, xr.Dataset(), 0, model_data=model_data)  #

    spores_config = run_config["spores_options"]
    if "spores" in model_data.dims and model_data.spores.size == 1:
        model_data = model_data.squeeze("spores")

    init_spores_scores = (model_data.cost_energy_cap.loc[{
        "costs": [spores_config["score_cost_class"]]
    }].to_series().dropna())
    spores_results = {}

    results, backend_model, opt = _initialise_backend_model()
    if build_only:
        return results, backend_model, opt

    init_spore = _initialise_spores_number()

    if spores_config["skip_cost_op"]:
        cumulative_spores_scores = init_spores_scores.copy()
        _update_to_spores_objective(backend_model)
    elif results.attrs["termination_condition"] in ["optimal", "feasible"]:
        results.attrs["objective_function_value"] = backend_model.obj()
        if spores_config["save_per_spore"] is True:
            _save_spore(backend_model,
                        results,
                        init_spore,
                        model_data=model_data)
        # Storing results and scores in the specific dictionaries
        _add_results_to_list(backend_model, spores_results, results, 0)
        cumulative_spores_scores = init_spores_scores + _cap_loc_score_default(
            results, init_spores_scores.index.droplevel("costs"))
        # Set group constraint "cost_max" equal to slacked cost
        _update_slack_cost_constraint(backend_model)
        _update_to_spores_objective(backend_model)
        # Update "spores_score" based on previous iteration
        _update_spores_score(backend_model, cumulative_spores_scores)

        log_time(
            logger,
            timings,
            "run_solution_returned",
            time_since_run_start=True,
            comment=
            "Backend: generated solution array for the cost-optimal case",
        )
    else:
        _warn_on_infeasibility()
        return results, backend_model, opt

    # Iterate over the number of SPORES requested by the user
    for _spore in range(init_spore + 1, spores_config["spores_number"] + 1):
        print(f"Running SPORES {_spore}")
        if opt is not None and "persistent" in opt.type:
            opt = interface.regenerate_persistent_pyomo_solver(
                backend_model,
                opt,
                obj=True,
                constraints={
                    "cost_investment_constraint":
                    cumulative_spores_scores.index,
                    "cost_constraint": cumulative_spores_scores.index,
                },
            )
        else:
            opt = None
        results, backend_model, opt = run_plan(
            model_data,
            run_config,
            timings,
            backend,
            build_only=False,
            backend_rerun=backend_model,
            allow_warmstart=False,
            persistent=True,
            opt=opt,
        )

        if results.attrs["termination_condition"] in ["optimal", "feasible"]:
            results.attrs["objective_function_value"] = backend_model.obj()
            if spores_config["save_per_spore"] is True:
                _save_spore(backend_model, results, _spore)
            # Storing results and scores in the specific dictionaries
            _add_results_to_list(backend_model, spores_results, results,
                                 _spore)
            print(
                f"Updating capacity scores from {cumulative_spores_scores.sum()}..."
            )
            cumulative_spores_scores += _cap_loc_score_default(
                results, init_spores_scores.index.droplevel("costs"))
            print(f"... to {cumulative_spores_scores.sum()}")
            # Update "spores_score" based on previous iteration
            _update_spores_score(backend_model, cumulative_spores_scores)
        else:
            _warn_on_infeasibility()
            break
        log_time(
            logger,
            timings,
            "run_solution_returned",
            time_since_run_start=True,
            comment=f"Backend: generated solution array for the SPORE {_spore}",
        )

    results = xr.concat(spores_results.values(),
                        dim=pd.Index(spores_results.keys(), name="spores"))

    return results, backend_model, opt
コード例 #25
0
def run_plan(model_data, timings, backend, build_only, backend_rerun=False):

    log_time(logger,
             timings,
             'run_start',
             comment='Backend: starting model run')

    if not backend_rerun:
        backend_model = backend.generate_model(model_data)

        log_time(logger,
                 timings,
                 'run_backend_model_generated',
                 time_since_run_start=True,
                 comment='Backend: model generated')

    else:
        backend_model = backend_rerun

    run_config = backend_model.__calliope_run_config
    solver = run_config['solver']
    solver_io = run_config.get('solver_io', None)
    solver_options = run_config.get('solver_options', None)
    save_logs = run_config.get('save_logs', None)

    if build_only:
        results = xr.Dataset()

    else:
        log_time(logger,
                 timings,
                 'run_solver_start',
                 comment='Backend: sending model to solver')

        results = backend.solve_model(backend_model,
                                      solver=solver,
                                      solver_io=solver_io,
                                      solver_options=solver_options,
                                      save_logs=save_logs)

        log_time(logger,
                 timings,
                 'run_solver_exit',
                 time_since_run_start=True,
                 comment='Backend: solver finished running')

        termination = backend.load_results(backend_model, results)

        log_time(logger,
                 timings,
                 'run_results_loaded',
                 comment='Backend: loaded results')

        results = backend.get_result_array(backend_model, model_data)
        results.attrs['termination_condition'] = termination

        if results.attrs['termination_condition'] in ['optimal', 'feasible']:
            results.attrs['objective_function_value'] = backend_model.obj()

        log_time(logger,
                 timings,
                 'run_solution_returned',
                 time_since_run_start=True,
                 comment='Backend: generated solution array')

    return results, backend_model
コード例 #26
0
def run_operate(model_data, timings, backend, build_only):
    """
    For use when mode is 'operate', to allow the model to be built, edited, and
    iteratively run within Pyomo.

    """
    log_time(logger,
             timings,
             'run_start',
             comment='Backend: starting model run in operational mode')

    defaults = AttrDict.from_yaml_string(model_data.attrs['defaults'])
    run_config = AttrDict.from_yaml_string(model_data.attrs['run_config'])

    operate_params = ['purchased'] + [
        i.replace('_max', '') for i in defaults if i[-4:] == '_max'
    ]

    # Capacity results (from plan mode) can be used as the input to operate mode
    if (any(model_data.filter_by_attrs(is_result=1).data_vars)
            and run_config.get('operation.use_cap_results', False)):
        # Anything with is_result = 1 will be ignored in the Pyomo model
        for varname, varvals in model_data.data_vars.items():
            if varname in operate_params:
                varvals.attrs['is_result'] = 1
                varvals.attrs['operate_param'] = 1

    else:
        cap_max = xr.merge([
            v.rename(k.replace('_max', ''))
            for k, v in model_data.data_vars.items() if '_max' in k
        ])
        cap_equals = xr.merge([
            v.rename(k.replace('_equals', ''))
            for k, v in model_data.data_vars.items() if '_equals' in k
        ])
        caps = cap_max.update(cap_equals)
        for cap in caps.data_vars.values():
            cap.attrs['is_result'] = 1
            cap.attrs['operate_param'] = 1
        model_data.update(caps)

    # Storage initial is carried over between iterations, so must be defined along with storage
    if ('loc_techs_store' in model_data.dims.keys()
            and 'storage_initial' not in model_data.data_vars.keys()):
        model_data['storage_initial'] = (xr.DataArray(
            [0 for loc_tech in model_data.loc_techs_store.values],
            dims='loc_techs_store'))
        model_data['storage_initial'].attrs['is_result'] = 0
        exceptions.warn(
            'Initial stored energy not defined, set to zero for all '
            'loc::techs in loc_techs_store, for use in iterative optimisation')
    # Operated units is carried over between iterations, so must be defined in a milp model
    if ('loc_techs_milp' in model_data.dims.keys()
            and 'operated_units' not in model_data.data_vars.keys()):
        model_data['operated_units'] = (xr.DataArray(
            [0 for loc_tech in model_data.loc_techs_milp.values],
            dims='loc_techs_milp'))
        model_data['operated_units'].attrs['is_result'] = 1
        model_data['operated_units'].attrs['operate_param'] = 1
        exceptions.warn(
            'daily operated units not defined, set to zero for all '
            'loc::techs in loc_techs_milp, for use in iterative optimisation')

    comments, warnings, errors = checks.check_operate_params(model_data)
    exceptions.print_warnings_and_raise_errors(warnings=warnings,
                                               errors=errors)

    # Initialize our variables
    solver = run_config['solver']
    solver_io = run_config.get('solver_io', None)
    solver_options = run_config.get('solver_options', None)
    save_logs = run_config.get('save_logs', None)
    window = run_config['operation']['window']
    horizon = run_config['operation']['horizon']
    window_to_horizon = horizon - window

    # get the cumulative sum of timestep resolution, to find where we hit our window and horizon
    timestep_cumsum = model_data.timestep_resolution.cumsum(
        'timesteps').to_pandas()
    # get the timesteps at which we start and end our windows
    window_ends = timestep_cumsum.where((timestep_cumsum % window == 0) | (
        timestep_cumsum == timestep_cumsum[-1]))
    window_starts = timestep_cumsum.where((~np.isnan(window_ends.shift(1))) | (
        timestep_cumsum == timestep_cumsum[0])).dropna()

    window_ends = window_ends.dropna()
    horizon_ends = timestep_cumsum[timestep_cumsum.isin(window_ends.values +
                                                        window_to_horizon)]

    if not any(window_starts):
        raise exceptions.ModelError(
            'Not enough timesteps or incorrect timestep resolution to run in '
            'operational mode with an optimisation window of {}'.format(
                window))

    # We will only update timseries parameters
    timeseries_data_vars = [
        k for k, v in model_data.data_vars.items()
        if 'timesteps' in v.dims and v.attrs['is_result'] == 0
    ]

    # Loop through each window, solve over the horizon length, and add result to
    # result_array we only go as far as the end of the last horizon, which may
    # clip the last bit of data
    result_array = []
    # track whether each iteration finds an optimal solution or not
    terminations = []

    if build_only:
        iterations = [0]
    else:
        iterations = range(len(window_starts))

    for i in iterations:
        start_timestep = window_starts.index[i]

        # Build full model in first instance
        if i == 0:
            warmstart = False
            end_timestep = horizon_ends.index[i]
            timesteps = slice(start_timestep, end_timestep)
            window_model_data = model_data.loc[dict(timesteps=timesteps)]

            log_time(logger,
                     timings,
                     'model_gen_1',
                     comment='Backend: generating initial model')

            backend_model = backend.generate_model(window_model_data)

        # Build the full model in the last instance(s),
        # where number of timesteps is less than the horizon length
        elif i > len(horizon_ends) - 1:
            warmstart = False
            end_timestep = window_ends.index[i]
            timesteps = slice(start_timestep, end_timestep)
            window_model_data = model_data.loc[dict(timesteps=timesteps)]

            log_time(
                logger,
                timings,
                'model_gen_{}'.format(i + 1),
                comment=(
                    'Backend: iteration {}: generating new model for '
                    'end of timeseries, with horizon = {} timesteps'.format(
                        i + 1, window_ends[i] - window_starts[i])))

            backend_model = backend.generate_model(window_model_data)

        # Update relevent Pyomo Params in intermediate instances
        else:
            warmstart = True
            end_timestep = horizon_ends.index[i]
            timesteps = slice(start_timestep, end_timestep)
            window_model_data = model_data.loc[dict(timesteps=timesteps)]

            log_time(
                logger,
                timings,
                'model_gen_{}'.format(i + 1),
                comment='Backend: iteration {}: updating model parameters'.
                format(i + 1))
            # Pyomo model sees the same timestamps each time, we just change the
            # values associated with those timestamps
            for var in timeseries_data_vars:
                # New values
                var_series = window_model_data[var].to_series().dropna(
                ).replace('inf', np.inf)
                # Same timestamps
                var_series.index = backend_model.__calliope_model_data['data'][
                    var].keys()
                var_dict = var_series.to_dict()
                # Update pyomo Param with new dictionary

                getattr(backend_model, var).store_values(var_dict)

        if not build_only:
            log_time(logger,
                     timings,
                     'model_run_{}'.format(i + 1),
                     time_since_run_start=True,
                     comment='Backend: iteration {}: sending model to solver'.
                     format(i + 1))
            # After iteration 1, warmstart = True, which should speed up the process
            # Note: Warmstart isn't possible with GLPK (dealt with later on)
            _results = backend.solve_model(
                backend_model,
                solver=solver,
                solver_io=solver_io,
                solver_options=solver_options,
                save_logs=save_logs,
                warmstart=warmstart,
            )

            log_time(logger,
                     timings,
                     'run_solver_exit_{}'.format(i + 1),
                     time_since_run_start=True,
                     comment='Backend: iteration {}: solver finished running'.
                     format(i + 1))
            # xarray dataset is built for each iteration
            _termination = backend.load_results(backend_model, _results)
            terminations.append(_termination)

            _results = backend.get_result_array(backend_model, model_data)

            # We give back the actual timesteps for this iteration and take a slice
            # equal to the window length
            _results['timesteps'] = window_model_data.timesteps.copy()

            # We always save the window data. Until the last window(s) this will crop
            # the window_to_horizon timesteps. In the last window(s), optimistion will
            # only be occurring over a window length anyway
            _results = _results.loc[dict(
                timesteps=slice(None, window_ends.index[i]))]
            result_array.append(_results)

            # Set up initial storage for the next iteration
            if 'loc_techs_store' in model_data.dims.keys():
                storage_initial = _results.storage.loc[{
                    'timesteps':
                    window_ends.index[i]
                }].drop('timesteps')
                model_data['storage_initial'].loc[
                    storage_initial.coords] = storage_initial.values
                backend_model.storage_initial.store_values(
                    storage_initial.to_series().dropna().to_dict())

            # Set up total operated units for the next iteration
            if 'loc_techs_milp' in model_data.dims.keys():
                operated_units = _results.operating_units.sum(
                    'timesteps').astype(np.int)
                model_data['operated_units'].loc[{}] += operated_units.values
                backend_model.operated_units.store_values(
                    operated_units.to_series().dropna().to_dict())

            log_time(logger,
                     timings,
                     'run_solver_exit_{}'.format(i + 1),
                     time_since_run_start=True,
                     comment='Backend: iteration {}: generated solution array'.
                     format(i + 1))

    if build_only:
        results = xr.Dataset()
    else:
        # Concatenate results over the timestep dimension to get a single
        # xarray Dataset of interest
        results = xr.concat(result_array, dim='timesteps')
        if all(i == 'optimal' for i in terminations):
            results.attrs['termination_condition'] = 'optimal'
        elif all(i in ['optimal', 'feasible'] for i in terminations):
            results.attrs['termination_condition'] = 'feasible'
        else:
            results.attrs['termination_condition'] = ','.join(terminations)

        log_time(logger,
                 timings,
                 'run_solution_returned',
                 time_since_run_start=True,
                 comment='Backend: generated full solution array')

    return results, backend_model
コード例 #27
0
def run_operate(model_data, timings, backend, build_only):
    """
    For use when mode is 'operate', to allow the model to be built, edited, and
    iteratively run within Pyomo.

    """
    log_time(
        logger,
        timings,
        "run_start",
        comment="Backend: starting model run in operational mode",
    )

    defaults = UpdateObserverDict(
        initial_yaml_string=model_data.attrs["defaults"],
        name="defaults",
        observer=model_data,
    )
    run_config = UpdateObserverDict(
        initial_yaml_string=model_data.attrs["run_config"],
        name="run_config",
        observer=model_data,
    )

    # New param defaults = old maximum param defaults (e.g. energy_cap gets default from energy_cap_max)
    operate_params = {
        k.replace("_max", ""): v
        for k, v in defaults.items() if k.endswith("_max")
    }
    operate_params[
        "purchased"] = 0  # no _max to work from here, so we hardcode a default

    defaults.update(operate_params)

    # Capacity results (from plan mode) can be used as the input to operate mode
    if any(model_data.filter_by_attrs(
            is_result=1).data_vars) and run_config.get(
                "operation.use_cap_results", False):
        # Anything with is_result = 1 will be ignored in the Pyomo model
        for varname, varvals in model_data.data_vars.items():
            if varname in operate_params.keys():
                varvals.attrs["is_result"] = 1
                varvals.attrs["operate_param"] = 1

    else:
        cap_max = xr.merge([
            v.rename(k.replace("_max", ""))
            for k, v in model_data.data_vars.items() if "_max" in k
        ])
        cap_equals = xr.merge([
            v.rename(k.replace("_equals", ""))
            for k, v in model_data.data_vars.items() if "_equals" in k
        ])
        caps = cap_max.update(cap_equals)
        for cap in caps.data_vars.values():
            cap.attrs["is_result"] = 1
            cap.attrs["operate_param"] = 1
        model_data.update(caps)

    comments, warnings, errors = checks.check_operate_params(model_data)
    exceptions.print_warnings_and_raise_errors(warnings=warnings,
                                               errors=errors)

    # Initialize our variables
    solver = run_config["solver"]
    solver_io = run_config.get("solver_io", None)
    solver_options = run_config.get("solver_options", None)
    save_logs = run_config.get("save_logs", None)
    window = run_config["operation"]["window"]
    horizon = run_config["operation"]["horizon"]
    window_to_horizon = horizon - window

    # get the cumulative sum of timestep resolution, to find where we hit our window and horizon
    timestep_cumsum = model_data.timestep_resolution.cumsum(
        "timesteps").to_pandas()
    # get the timesteps at which we start and end our windows
    window_ends = timestep_cumsum.where((timestep_cumsum % window == 0) | (
        timestep_cumsum == timestep_cumsum[-1]))
    window_starts = timestep_cumsum.where((~np.isnan(window_ends.shift(1))) | (
        timestep_cumsum == timestep_cumsum[0])).dropna()

    window_ends = window_ends.dropna()
    horizon_ends = timestep_cumsum[timestep_cumsum.isin(window_ends.values +
                                                        window_to_horizon)]

    if not any(window_starts):
        raise exceptions.ModelError(
            "Not enough timesteps or incorrect timestep resolution to run in "
            "operational mode with an optimisation window of {}".format(
                window))

    # We will only update timseries parameters
    timeseries_data_vars = [
        k for k, v in model_data.data_vars.items()
        if "timesteps" in v.dims and v.attrs["is_result"] == 0
    ]

    # Loop through each window, solve over the horizon length, and add result to
    # result_array we only go as far as the end of the last horizon, which may
    # clip the last bit of data
    result_array = []
    # track whether each iteration finds an optimal solution or not
    terminations = []

    if build_only:
        iterations = [0]
    else:
        iterations = range(len(window_starts))

    for i in iterations:
        start_timestep = window_starts.index[i]

        # Build full model in first instance
        if i == 0:
            warmstart = False
            end_timestep = horizon_ends.index[i]
            timesteps = slice(start_timestep, end_timestep)
            window_model_data = model_data.loc[dict(timesteps=timesteps)]

            log_time(
                logger,
                timings,
                "model_gen_1",
                comment="Backend: generating initial model",
            )

            backend_model = backend.generate_model(window_model_data)

        # Build the full model in the last instance(s),
        # where number of timesteps is less than the horizon length
        elif i > len(horizon_ends) - 1:
            warmstart = False
            end_timestep = window_ends.index[i]
            timesteps = slice(start_timestep, end_timestep)
            window_model_data = model_data.loc[dict(timesteps=timesteps)]

            log_time(
                logger,
                timings,
                "model_gen_{}".format(i + 1),
                comment=(
                    "Backend: iteration {}: generating new model for "
                    "end of timeseries, with horizon = {} timesteps".format(
                        i + 1, window_ends[i] - window_starts[i])),
            )

            backend_model = backend.generate_model(window_model_data)

        # Update relevent Pyomo Params in intermediate instances
        else:
            warmstart = True
            end_timestep = horizon_ends.index[i]
            timesteps = slice(start_timestep, end_timestep)
            window_model_data = model_data.loc[dict(timesteps=timesteps)]

            log_time(
                logger,
                timings,
                "model_gen_{}".format(i + 1),
                comment="Backend: iteration {}: updating model parameters".
                format(i + 1),
            )
            # Pyomo model sees the same timestamps each time, we just change the
            # values associated with those timestamps
            for var in timeseries_data_vars:
                # New values
                var_series = (
                    window_model_data[var].to_series().dropna().replace(
                        "inf", np.inf))
                # Same timestamps
                var_series.index = backend_model.__calliope_model_data["data"][
                    var].keys()
                var_dict = var_series.to_dict()
                # Update pyomo Param with new dictionary

                getattr(backend_model, var).store_values(var_dict)

        if not build_only:
            log_time(
                logger,
                timings,
                "model_run_{}".format(i + 1),
                time_since_run_start=True,
                comment="Backend: iteration {}: sending model to solver".
                format(i + 1),
            )
            # After iteration 1, warmstart = True, which should speed up the process
            # Note: Warmstart isn't possible with GLPK (dealt with later on)
            _results = backend.solve_model(
                backend_model,
                solver=solver,
                solver_io=solver_io,
                solver_options=solver_options,
                save_logs=save_logs,
                warmstart=warmstart,
            )

            log_time(
                logger,
                timings,
                "run_solver_exit_{}".format(i + 1),
                time_since_run_start=True,
                comment="Backend: iteration {}: solver finished running".
                format(i + 1),
            )
            # xarray dataset is built for each iteration
            _termination = backend.load_results(backend_model, _results)
            terminations.append(_termination)

            _results = backend.get_result_array(backend_model, model_data)

            # We give back the actual timesteps for this iteration and take a slice
            # equal to the window length
            _results["timesteps"] = window_model_data.timesteps.copy()

            # We always save the window data. Until the last window(s) this will crop
            # the window_to_horizon timesteps. In the last window(s), optimistion will
            # only be occurring over a window length anyway
            _results = _results.loc[dict(
                timesteps=slice(None, window_ends.index[i]))]
            result_array.append(_results)

            # Set up initial storage for the next iteration
            if "loc_techs_store" in model_data.dims.keys():
                storage_initial = _results.storage.loc[{
                    "timesteps":
                    window_ends.index[i]
                }].drop("timesteps")
                model_data["storage_initial"].loc[
                    storage_initial.coords] = storage_initial.values
                backend_model.storage_initial.store_values(
                    storage_initial.to_series().dropna().to_dict())

            # Set up total operated units for the next iteration
            if "loc_techs_milp" in model_data.dims.keys():
                operated_units = _results.operating_units.sum(
                    "timesteps").astype(np.int)
                model_data["operated_units"].loc[{}] += operated_units.values
                backend_model.operated_units.store_values(
                    operated_units.to_series().dropna().to_dict())

            log_time(
                logger,
                timings,
                "run_solver_exit_{}".format(i + 1),
                time_since_run_start=True,
                comment="Backend: iteration {}: generated solution array".
                format(i + 1),
            )

    if build_only:
        results = xr.Dataset()
    else:
        # Concatenate results over the timestep dimension to get a single
        # xarray Dataset of interest
        results = xr.concat(result_array, dim="timesteps")
        if all(i == "optimal" for i in terminations):
            results.attrs["termination_condition"] = "optimal"
        elif all(i in ["optimal", "feasible"] for i in terminations):
            results.attrs["termination_condition"] = "feasible"
        else:
            results.attrs["termination_condition"] = ",".join(terminations)

        log_time(
            logger,
            timings,
            "run_solution_returned",
            time_since_run_start=True,
            comment="Backend: generated full solution array",
        )

    return results, backend_model
コード例 #28
0
ファイル: run.py プロジェクト: brynpickering/calliope
def run_plan(model_data, timings, backend, build_only, backend_rerun=False):

    log_time(timings, 'run_start', comment='Backend: starting model run')

    if not backend_rerun:
        backend_model = backend.generate_model(model_data)

        log_time(
            timings, 'run_backend_model_generated', time_since_start=True,
            comment='Backend: model generated'
        )

    else:
        backend_model = backend_rerun

    solver = model_data.attrs['run.solver']
    solver_io = model_data.attrs.get('run.solver_io', None)
    solver_options = {
        k.split('.')[-1]: v
        for k, v in model_data.attrs.items() if '.solver_options.' in k
    }
    save_logs = model_data.attrs.get('run.save_logs', None)

    if build_only:
        results = xr.Dataset()

    else:
        log_time(
            timings, 'run_solver_start',
            comment='Backend: sending model to solver'
        )

        results = backend.solve_model(
            backend_model, solver=solver,
            solver_io=solver_io, solver_options=solver_options, save_logs=save_logs
        )

        log_time(
            timings, 'run_solver_exit', time_since_start=True,
            comment='Backend: solver finished running'
        )

        termination = backend.load_results(backend_model, results)

        log_time(
            timings, 'run_results_loaded',
            comment='Backend: loaded results'
        )

        results = backend.get_result_array(backend_model, model_data)
        results.attrs['termination_condition'] = termination

        log_time(
            timings, 'run_solution_returned', time_since_start=True,
            comment='Backend: generated solution array'
        )

    return results, backend_model
コード例 #29
0
ファイル: run.py プロジェクト: mlgarchery/calliope
def run_plan(model_data, timings, backend, build_only, backend_rerun=False):

    log_time(timings, 'run_start', comment='Backend: starting model run')

    if not backend_rerun:
        backend_model = backend.generate_model(model_data)

        log_time(timings,
                 'run_backend_model_generated',
                 time_since_start=True,
                 comment='Backend: model generated')

    else:
        backend_model = backend_rerun

    solver = model_data.attrs['run.solver']
    solver_io = model_data.attrs.get('run.solver_io', None)
    solver_options = {
        k.split('.')[-1]: v
        for k, v in model_data.attrs.items() if '.solver_options.' in k
    }
    save_logs = model_data.attrs.get('run.save_logs', None)

    if build_only:
        results = xr.Dataset()

    else:
        log_time(timings,
                 'run_solver_start',
                 comment='Backend: sending model to solver')

        results = backend.solve_model(backend_model,
                                      solver=solver,
                                      solver_io=solver_io,
                                      solver_options=solver_options,
                                      save_logs=save_logs)

        log_time(timings,
                 'run_solver_exit',
                 time_since_start=True,
                 comment='Backend: solver finished running')

        termination = backend.load_results(backend_model, results)

        log_time(timings,
                 'run_results_loaded',
                 comment='Backend: loaded results')

        results = backend.get_result_array(backend_model, model_data)
        results.attrs['termination_condition'] = termination

        log_time(timings,
                 'run_solution_returned',
                 time_since_start=True,
                 comment='Backend: generated solution array')

    return results, backend_model
コード例 #30
0
def rerun_pyomo_model(model_data, run_config, backend_model):
    """
    Rerun the Pyomo backend, perhaps after updating a parameter value,
    (de)activating a constraint/objective or updating run options in the model
    model_data object (e.g. `run.solver`).

    Returns
    -------
    new_model : calliope.Model
        New calliope model, including both inputs and results, but no backend interface.
    """
    backend_model.__calliope_run_config = run_config

    if run_config["mode"] != "plan":
        raise exceptions.ModelError(
            "Cannot rerun the backend in {} run mode. Only `plan` mode is "
            "possible.".format(run_config["mode"]))

    timings = {}
    log_time(logger, timings, "model_creation")

    results, backend_model = backend_run.run_plan(
        model_data,
        run_config,
        timings,
        run_pyomo,
        build_only=False,
        backend_rerun=backend_model,
    )

    inputs = access_pyomo_model_inputs(backend_model)

    # Add additional post-processed result variables to results
    if results.attrs.get("termination_condition",
                         None) in ["optimal", "feasible"]:
        results = postprocess_model_results(results,
                                            model_data.reindex(results.coords),
                                            timings)

    for key, var in results.data_vars.items():
        var.attrs["is_result"] = 1

    for key, var in inputs.data_vars.items():
        var.attrs["is_result"] = 0

    new_model_data = xr.merge((results, inputs))
    new_model_data.attrs.update(model_data.attrs)
    new_model_data.attrs.update(results.attrs)

    # Only add coordinates from the original model_data that don't already exist
    new_coords = [
        i for i in model_data.coords.keys()
        if i not in new_model_data.coords.keys()
    ]
    new_model_data = new_model_data.update(model_data[new_coords])

    # Reorganise the coordinates so that model data and new model data share
    # the same order of items in each dimension
    new_model_data = new_model_data.reindex(model_data.coords)

    exceptions.warn(
        "The results of rerunning the backend model are only available within "
        "the Calliope model returned by this function call.")

    new_calliope_model = calliope.Model(config=None, model_data=new_model_data)
    new_calliope_model._timings = timings

    return new_calliope_model
コード例 #31
0
def run_spores(model_data, timings, interface, backend, build_only):
    """
    For use when mode is 'spores', to allow the model to be built, edited, and
    iteratively run within Pyomo, modifying, at each iteration, the score of
    each loc::tech in such a way to generate Spatially explicit Practically Optimal
    RESults (SPORES).

    """
    log_time(
        logger,
        timings,
        "run_start",
        comment="Backend: starting model run in SPORES mode",
    )

    run_config = UpdateObserverDict(
        initial_yaml_string=model_data.attrs["run_config"],
        name="run_config",
        observer=model_data,
    )

    backend_model = backend.generate_model(model_data)

    log_time(
        logger,
        timings,
        "run_backend_model_generated",
        time_since_run_start=True,
        comment="Backend: model generated",
    )

    n_spores = run_config["spores_options"]["spores_number"]
    slack = run_config["spores_options"]["slack"]
    spores_score = run_config["spores_options"]["score_cost_class"]
    slack_group = run_config["spores_options"]["slack_cost_group"]

    # Define default scoring function, based on integer scoring method
    # TODO: make the function to run optional
    def _cap_loc_score_default(results, subset=None):
        if subset is None:
            subset = {}
        cap_loc_score = split_loc_techs(results["energy_cap"]).loc[subset]
        cap_loc_score = cap_loc_score.where(cap_loc_score > 1e-3, other=0)
        cap_loc_score = cap_loc_score.where(cap_loc_score == 0, other=100)

        return cap_loc_score.to_pandas()

    # Define function to update "spores_score" after each iteration of the method
    def _update_spores_score(backend_model, cap_loc_score):
        loc_tech_score_dict = {
            (spores_score, "{}::{}".format(i, j)): k
            for (i, j), k in cap_loc_score.stack().items()
            if "{}::{}".format(i, j) in model_data.loc_techs_investment_cost
        }

        interface.update_pyomo_param(backend_model, "cost_energy_cap",
                                     loc_tech_score_dict)

    def _warn_on_infeasibility():
        return exceptions.warn(
            "Infeasible SPORE detected. Please check your model configuration. "
            "No more SPORES will be generated.")

    # Run once for the 'cost-optimal' solution
    results, backend_model = run_plan(model_data, timings, backend, build_only)
    if build_only:
        return results, backend_model  # We have what we need, so break out of the loop

    if results.attrs["termination_condition"] in ["optimal", "feasible"]:
        results.attrs["objective_function_value"] = backend_model.obj()
        # Storing results and scores in the specific dictionaries
        spores_list = [results]
        cum_scores = _cap_loc_score_default(results)
        # Set group constraint "cost_max" equal to slacked cost
        slack_costs = model_data.group_cost_max.loc[{
            "group_names_cost_max":
            slack_group
        }].dropna("costs")
        interface.update_pyomo_param(
            backend_model,
            "group_cost_max",
            {(_cost_class, slack_group): results.cost.loc[{
                "costs": _cost_class
            }].sum().item() * (1 + slack)
             for _cost_class in slack_costs.costs.values},
        )
        # Modify objective function weights: spores_score -> 1, all others -> 0
        interface.update_pyomo_param(
            backend_model,
            "objective_cost_class",
            {
                spores_score: 1,
                **{i: 0
                   for i in slack_costs.costs.values}
            },
        )
        # Update "spores_score" based on previous iteration
        _update_spores_score(backend_model, cum_scores)
    else:
        _warn_on_infeasibility()
        return results, backend_model

    log_time(
        logger,
        timings,
        "run_solution_returned",
        time_since_run_start=True,
        comment="Backend: generated solution array for the cost-optimal case",
    )

    # Iterate over the number of SPORES requested by the user
    for _spore in range(0, n_spores):
        results, backend_model = run_plan(model_data,
                                          timings,
                                          backend,
                                          build_only,
                                          backend_rerun=backend_model)

        if results.attrs["termination_condition"] in ["optimal", "feasible"]:
            results.attrs["objective_function_value"] = backend_model.obj()
            # Storing results and scores in the specific dictionaries
            spores_list.append(results)
            cum_scores += _cap_loc_score_default(results)
            # Update "spores_score" based on previous iteration
            _update_spores_score(backend_model, cum_scores)
        else:
            _warn_on_infeasibility()
            break
        log_time(
            logger,
            timings,
            "run_solution_returned",
            time_since_run_start=True,
            comment=
            "Backend: generated solution array for the cost-optimal case",
        )
        # TODO: make this function work with the spores dimension,
        # so that postprocessing can take place in core/model.py, as with run_plan and run_operate

    results = xr.concat(spores_list, dim="spores")

    return results, backend_model
コード例 #32
0
ファイル: run.py プロジェクト: FraSanvit/calliope
def run_plan(
    model_data,
    run_config,
    timings,
    backend,
    build_only,
    backend_rerun=False,
    allow_warmstart=False,
    persistent=True,
    opt=None,
):

    log_time(logger,
             timings,
             "run_start",
             comment="Backend: starting model run")

    warmstart = False
    if not backend_rerun:
        backend_model = backend.generate_model(model_data)
        log_time(
            logger,
            timings,
            "run_backend_model_generated",
            time_since_run_start=True,
            comment="Backend: model generated",
        )

    else:
        backend_model = backend_rerun
        if allow_warmstart:
            warmstart = True

    run_config = UpdateObserverDict(
        initial_yaml_string=model_data.attrs["run_config"],
        name="run_config",
        observer=model_data,
    )
    solver = run_config["solver"]
    solver_io = run_config.get("solver_io", None)
    solver_options = run_config.get("solver_options", None)
    save_logs = run_config.get("save_logs", None)

    if build_only:
        results = xr.Dataset()

    else:
        if "persistent" in solver and persistent is False:
            exceptions.warn(
                f"The chosen solver, `{solver}` will not be used in this run. "
                f"`{solver.replace('_persistent', '')}` will be used instead.")
            solver = solver.replace("_persistent", "")
        log_time(
            logger,
            timings,
            "run_solver_start",
            comment="Backend: sending model to solver",
        )

        backend_results, opt = backend.solve_model(
            backend_model,
            solver=solver,
            solver_io=solver_io,
            solver_options=solver_options,
            save_logs=save_logs,
            warmstart=warmstart,
            opt=opt,
        )

        log_time(
            logger,
            timings,
            "run_solver_exit",
            time_since_run_start=True,
            comment="Backend: solver finished running",
        )

        termination = backend.load_results(backend_model, backend_results, opt)

        log_time(logger,
                 timings,
                 "run_results_loaded",
                 comment="Backend: loaded results")

        if termination in ["optimal", "feasible"]:
            results = backend.get_result_array(backend_model, model_data)
            results.attrs["termination_condition"] = termination
            if "persistent" in opt.name and persistent is True:
                results.attrs["objective_function_value"] = opt.get_model_attr(
                    "ObjVal")
            else:
                results.attrs["objective_function_value"] = backend_model.obj()
        else:
            results = xr.Dataset(attrs={"termination_condition": termination})

        log_time(
            logger,
            timings,
            "run_solution_returned",
            time_since_run_start=True,
            comment="Backend: generated solution array",
        )

    return results, backend_model, opt
コード例 #33
0
def run_plan(model_data, timings, backend, build_only, backend_rerun=False):

    log_time(logger,
             timings,
             "run_start",
             comment="Backend: starting model run")

    if not backend_rerun:
        backend_model = backend.generate_model(model_data)

        log_time(
            logger,
            timings,
            "run_backend_model_generated",
            time_since_run_start=True,
            comment="Backend: model generated",
        )

    else:
        backend_model = backend_rerun

    run_config = backend_model.__calliope_run_config
    solver = run_config["solver"]
    solver_io = run_config.get("solver_io", None)
    solver_options = run_config.get("solver_options", None)
    save_logs = run_config.get("save_logs", None)

    if build_only:
        results = xr.Dataset()

    else:
        log_time(
            logger,
            timings,
            "run_solver_start",
            comment="Backend: sending model to solver",
        )

        results = backend.solve_model(
            backend_model,
            solver=solver,
            solver_io=solver_io,
            solver_options=solver_options,
            save_logs=save_logs,
        )

        log_time(
            logger,
            timings,
            "run_solver_exit",
            time_since_run_start=True,
            comment="Backend: solver finished running",
        )

        termination = backend.load_results(backend_model, results)

        log_time(logger,
                 timings,
                 "run_results_loaded",
                 comment="Backend: loaded results")

        results = backend.get_result_array(backend_model, model_data)
        results.attrs["termination_condition"] = termination

        if results.attrs["termination_condition"] in ["optimal", "feasible"]:
            results.attrs["objective_function_value"] = backend_model.obj()

        log_time(
            logger,
            timings,
            "run_solution_returned",
            time_since_run_start=True,
            comment="Backend: generated solution array",
        )

    return results, backend_model
コード例 #34
0
ファイル: run.py プロジェクト: brynpickering/calliope
def run_operate(model_data, timings, backend, build_only):
    """
    For use when mode is 'operate', to allow the model to be built, edited, and
    iteratively run within Pyomo.

    """
    log_time(timings, 'run_start',
             comment='Backend: starting model run in operational mode')

    defaults = ruamel.yaml.load(model_data.attrs['defaults'], Loader=ruamel.yaml.Loader)
    operate_params = ['purchased'] + [
        i.replace('_max', '') for i in defaults if i[-4:] == '_max'
    ]

    # Capacity results (from plan mode) can be used as the input to operate mode
    if (any(model_data.filter_by_attrs(is_result=1).data_vars) and
            model_data.attrs.get('run.operation.use_cap_results', False)):
        # Anything with is_result = 1 will be ignored in the Pyomo model
        for varname, varvals in model_data.data_vars.items():
            if varname in operate_params:
                varvals.attrs['is_result'] = 1
                varvals.attrs['operate_param'] = 1

    else:
        cap_max = xr.merge([
            v.rename(k.replace('_max', ''))
            for k, v in model_data.data_vars.items() if '_max' in k
        ])
        cap_equals = xr.merge([
            v.rename(k.replace('_equals', ''))
            for k, v in model_data.data_vars.items() if '_equals' in k
        ])
        caps = cap_max.update(cap_equals)
        for cap in caps.data_vars.values():
            cap.attrs['is_result'] = 1
            cap.attrs['operate_param'] = 1
        model_data.update(caps)

    # Storage initial is carried over between iterations, so must be defined along with storage
    if ('loc_techs_store' in model_data.dims.keys() and
        'storage_initial' not in model_data.data_vars.keys()):
        model_data['storage_initial'] = (
            xr.DataArray([0 for loc_tech in model_data.loc_techs_store.values],
                         dims='loc_techs_store')
        )
        model_data['storage_initial'].attrs['is_result'] = 0
        exceptions.ModelWarning(
            'Initial stored energy not defined, set to zero for all '
            'loc::techs in loc_techs_store, for use in iterative optimisation'
        )
    # Operated units is carried over between iterations, so must be defined in a milp model
    if ('loc_techs_milp' in model_data.dims.keys() and
        'operated_units' not in model_data.data_vars.keys()):
        model_data['operated_units'] = (
            xr.DataArray([0 for loc_tech in model_data.loc_techs_milp.values],
                         dims='loc_techs_milp')
        )
        model_data['operated_units'].attrs['is_result'] = 1
        model_data['operated_units'].attrs['operate_param'] = 1
        exceptions.ModelWarning(
            'daily operated units not defined, set to zero for all '
            'loc::techs in loc_techs_milp, for use in iterative optimisation'
        )

    comments, warnings, errors = checks.check_operate_params(model_data)
    exceptions.print_warnings_and_raise_errors(warnings=warnings, errors=errors)

    # Initialize our variables
    solver = model_data.attrs['run.solver']
    solver_io = model_data.attrs.get('run.solver_io', None)
    solver_options = model_data.attrs.get('run.solver_options', None)
    save_logs = model_data.attrs.get('run.save_logs', None)
    window = model_data.attrs['run.operation.window']
    horizon = model_data.attrs['run.operation.horizon']
    window_to_horizon = horizon - window

    # get the cumulative sum of timestep resolution, to find where we hit our window and horizon
    timestep_cumsum = model_data.timestep_resolution.cumsum('timesteps').to_pandas()
    # get the timesteps at which we start and end our windows
    window_ends = timestep_cumsum.where(
        (timestep_cumsum % window == 0) | (timestep_cumsum == timestep_cumsum[-1])
    )
    window_starts = timestep_cumsum.where(
        (~np.isnan(window_ends.shift(1))) | (timestep_cumsum == timestep_cumsum[0])
    ).dropna()

    window_ends = window_ends.dropna()
    horizon_ends = timestep_cumsum[timestep_cumsum.isin(window_ends.values + window_to_horizon)]

    if not any(window_starts):
        raise exceptions.ModelError(
            'Not enough timesteps or incorrect timestep resolution to run in '
            'operational mode with an optimisation window of {}'.format(window)
        )

    # We will only update timseries parameters
    timeseries_data_vars = [
        k for k, v in model_data.data_vars.items() if 'timesteps' in v.dims
        and v.attrs['is_result'] == 0
    ]

    # Loop through each window, solve over the horizon length, and add result to
    # result_array we only go as far as the end of the last horizon, which may
    # clip the last bit of data
    result_array = []
    # track whether each iteration finds an optimal solution or not
    terminations = []

    if build_only:
        iterations = [0]
    else:
        iterations = range(len(window_starts))

    for i in iterations:
        start_timestep = window_starts.index[i]

        # Build full model in first instance
        if i == 0:
            warmstart = False
            end_timestep = horizon_ends.index[i]
            timesteps = slice(start_timestep, end_timestep)
            window_model_data = model_data.loc[dict(timesteps=timesteps)]

            log_time(
                timings, 'model_gen_1',
                comment='Backend: generating initial model'
            )

            backend_model = backend.generate_model(window_model_data)

        # Build the full model in the last instance(s),
        # where number of timesteps is less than the horizon length
        elif i > len(horizon_ends) - 1:
            warmstart = False
            end_timestep = window_ends.index[i]
            timesteps = slice(start_timestep, end_timestep)
            window_model_data = model_data.loc[dict(timesteps=timesteps)]

            log_time(
                timings, 'model_gen_{}'.format(i + 1),
                comment=(
                    'Backend: iteration {}: generating new model for '
                    'end of timeseries, with horizon = {} timesteps'
                    .format(i + 1, window_ends[i] - window_starts[i])
                )
            )

            backend_model = backend.generate_model(window_model_data)

        # Update relevent Pyomo Params in intermediate instances
        else:
            warmstart = True
            end_timestep = horizon_ends.index[i]
            timesteps = slice(start_timestep, end_timestep)
            window_model_data = model_data.loc[dict(timesteps=timesteps)]

            log_time(
                timings, 'model_gen_{}'.format(i + 1),
                comment='Backend: iteration {}: updating model parameters'.format(i + 1)
            )
            # Pyomo model sees the same timestamps each time, we just change the
            # values associated with those timestamps
            for var in timeseries_data_vars:
                # New values
                var_series = window_model_data[var].to_series().dropna().replace('inf', np.inf)
                # Same timestamps
                var_series.index = backend_model.__calliope_model_data__['data'][var].keys()
                var_dict = var_series.to_dict()
                # Update pyomo Param with new dictionary
                for k, v in getattr(backend_model, var).items():
                    if k in var_dict:
                        v.set_value(var_dict[k])

        if not build_only:
            log_time(
                timings, 'model_run_{}'.format(i + 1), time_since_start=True,
                comment='Backend: iteration {}: sending model to solver'.format(i + 1)
            )
            # After iteration 1, warmstart = True, which should speed up the process
            # Note: Warmstart isn't possible with GLPK (dealt with later on)
            _results = backend.solve_model(
                backend_model, solver=solver, solver_io=solver_io,
                solver_options=solver_options, save_logs=save_logs, warmstart=warmstart,
            )

            log_time(
                timings, 'run_solver_exit_{}'.format(i + 1), time_since_start=True,
                comment='Backend: iteration {}: solver finished running'.format(i + 1)
            )
            # xarray dataset is built for each iteration
            _termination = backend.load_results(backend_model, _results)
            terminations.append(_termination)

            _results = backend.get_result_array(backend_model, model_data)

            # We give back the actual timesteps for this iteration and take a slice
            # equal to the window length
            _results['timesteps'] = window_model_data.timesteps.copy()

            # We always save the window data. Until the last window(s) this will crop
            # the window_to_horizon timesteps. In the last window(s), optimistion will
            # only be occurring over a window length anyway
            _results = _results.loc[dict(timesteps=slice(None, window_ends.index[i]))]
            result_array.append(_results)

            # Set up initial storage for the next iteration
            if 'loc_techs_store' in model_data.dims.keys():
                storage_initial = _results.storage.loc[{'timesteps': window_ends.index[i]}].drop('timesteps')
                model_data['storage_initial'].loc[storage_initial.coords] = storage_initial.values
                for k, v in backend_model.storage_initial.items():
                    v.set_value(storage_initial.to_series().dropna().to_dict()[k])

            # Set up total operated units for the next iteration
            if 'loc_techs_milp' in model_data.dims.keys():
                operated_units = _results.operating_units.sum('timesteps').astype(np.int)
                model_data['operated_units'].loc[{}] += operated_units.values
                for k, v in backend_model.operated_units.items():
                    v.set_value(operated_units.to_series().dropna().to_dict()[k])

            log_time(
                timings, 'run_solver_exit_{}'.format(i + 1), time_since_start=True,
                comment='Backend: iteration {}: generated solution array'.format(i + 1)
            )

    if build_only:
        results = xr.Dataset()
    else:
        # Concatenate results over the timestep dimension to get a single
        # xarray Dataset of interest
        results = xr.concat(result_array, dim='timesteps')
        if all(i == 'optimal' for i in terminations):
            results.attrs['termination_condition'] = 'optimal'
        else:
            results.attrs['termination_condition'] = ','.join(terminations)

        log_time(
            timings, 'run_solution_returned', time_since_start=True,
            comment='Backend: generated full solution array'
        )

    return results, backend_model