def _check_data(self): if self.node_dict or self.tech_dict: raise exceptions.ModelError( "Some data not extracted from inputs into model dataset:\n" f"{self.node_dict}") self.model_data, final_check_comments, warns, errors = checks.check_model_data( self.model_data) exceptions.print_warnings_and_raise_errors(warnings=warns, errors=errors)
def final_timedimension_processing(model_data): # Final checking of the data model_data, final_check_comments, warnings, errors = checks.check_model_data(model_data) exceptions.print_warnings_and_raise_errors(warnings=warnings, errors=errors) model_data = reorganise_dataset_dimensions(model_data) model_data = add_max_demand_timesteps(model_data) return model_data
def final_timedimension_processing(model_data): # Final checking of the data model_data, final_check_comments, warns, errors = checks.check_model_data(model_data) exceptions.print_warnings_and_raise_errors(warnings=warns, errors=errors) model_data = reorganise_dataset_dimensions(model_data) model_data = add_max_demand_timesteps(model_data) ## Warning that cyclic storage will default to True in 0.6.3 #### # TODO: remove in v0.6.3-dev if 'loc_techs_store' in model_data and not model_data.attrs.get('run.cyclic_storage', False): warnings.warn( 'Cyclic storage, a new addition in v0.6.2, currently defaults to ' 'False (i.e. emulating functionality prior to v0.6.2). ' 'From v0.6.3, cyclic storage will default to True.', FutureWarning ) return model_data
def generate_model_run(config, debug_comments, applied_overrides, scenario): """ Returns a processed model_run configuration AttrDict and a debug YAML object with comments attached, ready to write to disk. Parameters ---------- config : AttrDict debug_comments : AttrDict """ model_run = AttrDict() model_run['scenario'] = scenario model_run['applied_overrides'] = ';'.join(applied_overrides) # 1) Initial checks on model configuration warnings, errors = checks.check_initial(config) exceptions.print_warnings_and_raise_errors(warnings=warnings, errors=errors) # 2) Fully populate techs # Raises ModelError if necessary model_run['techs'], debug_techs, errors = process_techs(config) debug_comments.set_key('model_run.techs', debug_techs) exceptions.print_warnings_and_raise_errors(errors=errors) # 3) Fully populate tech_groups model_run['tech_groups'] = process_tech_groups(config, model_run['techs']) # 4) Fully populate locations model_run['locations'], debug_locs, warnings, errors = locations.process_locations( config, model_run['techs'] ) debug_comments.set_key('model_run.locations', debug_locs) exceptions.print_warnings_and_raise_errors(warnings=warnings, errors=errors) # 5) Fully populate timeseries data # Raises ModelErrors if there are problems with timeseries data at this stage model_run['timeseries_data'], model_run['timesteps'] = ( process_timeseries_data(config, model_run) ) # 6) Grab additional relevant bits from run and model config model_run['run'] = config['run'] model_run['model'] = config['model'] # 7) Initialize sets all_sets = sets.generate_simple_sets(model_run) all_sets.union(sets.generate_loc_tech_sets(model_run, all_sets)) all_sets = AttrDict({k: list(v) for k, v in all_sets.items()}) model_run['sets'] = all_sets model_run['constraint_sets'] = constraint_sets.generate_constraint_sets(model_run) # 8) Final sense-checking final_check_comments, warnings, errors = checks.check_final(model_run) debug_comments.union(final_check_comments) exceptions.print_warnings_and_raise_errors(warnings=warnings, errors=errors) # 9) Build a debug data dict with comments and the original configs debug_data = AttrDict({ 'comments': debug_comments, 'config_initial': config, }) return model_run, debug_data
def apply_overrides(config, scenario=None, override_dict=None): """ Generate processed Model configuration, applying any scenarios overrides. Parameters ---------- config : AttrDict a model configuration AttrDict scenario : str, optional override_dict : str or dict or AttrDict, optional If a YAML string, converted to AttrDict """ debug_comments = AttrDict() base_model_config_file = os.path.join( os.path.dirname(calliope.__file__), 'config', 'model.yaml' ) config_model = AttrDict.from_yaml(base_model_config_file) # Interpret timeseries_data_path as relative config.model.timeseries_data_path = relative_path( config.config_path, config.model.timeseries_data_path ) # The input files are allowed to override other model defaults config_model.union(config, allow_override=True) # First pass of applying override dict before applying scenarios, # so that can override scenario definitions by override_dict if override_dict: if isinstance(override_dict, str): override_dict = AttrDict.from_yaml_string(override_dict) elif not isinstance(override_dict, AttrDict): override_dict = AttrDict(override_dict) warnings = checks.check_overrides(config_model, override_dict) exceptions.print_warnings_and_raise_errors(warnings=warnings) config_model.union( override_dict, allow_override=True, allow_replacement=True ) if scenario: scenarios = config_model.get('scenarios', {}) if scenario in scenarios: # Manually defined scenario names cannot be the same as single # overrides or any combination of semicolon-delimited overrides if all([i in config_model.get('overrides', {}) for i in scenario.split(',')]): raise exceptions.ModelError( 'Manually defined scenario cannot be a combination of override names.' ) if not isinstance(scenarios[scenario], str): raise exceptions.ModelError( 'Scenario definition must be string of comma-separated overrides.' ) overrides = scenarios[scenario].split(',') logger.info( 'Using scenario `{}` leading to the application of ' 'overrides `{}`.'.format(scenario, scenarios[scenario]) ) else: overrides = str(scenario).split(',') logger.info( 'Applying overrides `{}` without a ' 'specific scenario name.'.format(scenario) ) overrides_from_scenario = combine_overrides(config_model, overrides) warnings = checks.check_overrides(config_model, overrides_from_scenario) exceptions.print_warnings_and_raise_errors(warnings=warnings) config_model.union( overrides_from_scenario, allow_override=True, allow_replacement=True ) for k, v in overrides_from_scenario.as_dict_flat().items(): debug_comments.set_key( '{}'.format(k), 'Applied from override') else: overrides = [] # Second pass of applying override dict after applying scenarios, # so that scenario-based overrides are overridden by override_dict! if override_dict: config_model.union( override_dict, allow_override=True, allow_replacement=True ) for k, v in override_dict.as_dict_flat().items(): debug_comments.set_key( '{}'.format(k), 'Overridden via override dictionary.') return config_model, debug_comments, overrides, scenario
def generate_model_run( config, timeseries_dataframes, debug_comments, applied_overrides, scenario, subsets, ): """ Returns a processed model_run configuration AttrDict and a debug YAML object with comments attached, ready to write to disk. Parameters ---------- config : AttrDict timeseries_dataframes : dict debug_comments : AttrDict scenario : str """ model_run = AttrDict() model_run["scenario"] = scenario model_run["applied_overrides"] = ";".join(applied_overrides) # 1) Initial checks on model configuration warning_messages, errors = checks.check_initial(config) exceptions.print_warnings_and_raise_errors(warnings=warning_messages, errors=errors) # 2) Fully populate techs # Raises ModelError if necessary model_run["techs"], debug_techs, errors = process_techs(config) debug_comments.set_key("model_run.techs", debug_techs) exceptions.print_warnings_and_raise_errors(errors=errors) # 3) Fully populate tech_groups model_run["tech_groups"] = process_tech_groups(config, model_run["techs"]) # 4) Fully populate nodes ( model_run["nodes"], debug_nodes, warning_messages, errors, ) = nodes.process_nodes(config, model_run["techs"]) debug_comments.set_key("model_run.nodes", debug_nodes) exceptions.print_warnings_and_raise_errors(warnings=warning_messages, errors=errors) # 5) Fully populate timeseries data # Raises ModelErrors if there are problems with timeseries data at this stage ( model_run["timeseries_data"], model_run["timeseries_vars"], ) = process_timeseries_data(config, model_run, timeseries_dataframes) # 6) Grab additional relevant bits from run and model config model_run["run"] = config["run"] model_run["model"] = config["model"] # model_run["sets"] = all_sets model_run["subsets"] = subsets # model_run["constraint_sets"] = constraint_sets.generate_constraint_sets(model_run) # 8) Final sense-checking final_check_comments, warning_messages, errors = checks.check_final( model_run) debug_comments.union(final_check_comments) exceptions.print_warnings_and_raise_errors(warnings=warning_messages, errors=errors) # 9) Build a debug data dict with comments and the original configs debug_data = AttrDict({ "comments": debug_comments, "config_initial": config, }) return model_run, debug_data
def apply_overrides(config, scenario=None, override_dict=None): """ Generate processed Model configuration, applying any scenarios overrides. Parameters ---------- config : AttrDict a model configuration AttrDict scenario : str, optional override_dict : str or dict or AttrDict, optional If a YAML string, converted to AttrDict """ debug_comments = AttrDict() config_model = AttrDict.from_yaml( os.path.join(os.path.dirname(calliope.__file__), "config", "defaults.yaml")) # Interpret timeseries_data_path as relative if "timeseries_data_path" in config.model: config.model.timeseries_data_path = relative_path( config.config_path, config.model.timeseries_data_path) # FutureWarning: check if config includes an explicit objective cost class. # Added in 0.6.4-dev, to be removed in v0.7.0-dev. has_explicit_cost_class = isinstance( config.get_key("run.objective_options.cost_class", None), dict) # The input files are allowed to override other model defaults config_model.union(config, allow_override=True) # First pass of applying override dict before applying scenarios, # so that can override scenario definitions by override_dict if override_dict: if isinstance(override_dict, str): override_dict = AttrDict.from_yaml_string(override_dict) elif not isinstance(override_dict, AttrDict): override_dict = AttrDict(override_dict) warning_messages = checks.check_overrides(config_model, override_dict) exceptions.print_warnings_and_raise_errors(warnings=warning_messages) # FutureWarning: If config does not include an explicit objective cost class, check override dict. # Added in 0.6.4-dev, to be removed in v0.7.0-dev. if has_explicit_cost_class is False: has_explicit_cost_class = isinstance( override_dict.get_key("run.objective_options.cost_class", None), dict) config_model.union(override_dict, allow_override=True, allow_replacement=True) if scenario: scenario_overrides = load_overrides_from_scenario( config_model, scenario) if not all(i in config_model.get("overrides", {}) for i in scenario_overrides): raise exceptions.ModelError( "Scenario definition must be a list of override or other scenario names." ) else: logger.info( "Applying the following overrides from scenario definition: {} " .format(scenario_overrides)) overrides_from_scenario = combine_overrides(config_model, scenario_overrides) warning_messages = checks.check_overrides(config_model, overrides_from_scenario) exceptions.print_warnings_and_raise_errors(warnings=warning_messages) # FutureWarning: If config nor override_dict include an explicit objective cost class, check scenario dict. # Added in 0.6.4-dev, to be removed in v0.7.0-dev if has_explicit_cost_class is False: has_explicit_cost_class = isinstance( overrides_from_scenario.get_key( "run.objective_options.cost_class", None), dict, ) config_model.union(overrides_from_scenario, allow_override=True, allow_replacement=True) for k, v in overrides_from_scenario.as_dict_flat().items(): debug_comments.set_key("{}".format(k), "Applied from override") else: scenario_overrides = [] # Second pass of applying override dict after applying scenarios, # so that scenario-based overrides are overridden by override_dict! if override_dict: config_model.union(override_dict, allow_override=True, allow_replacement=True) for k, v in override_dict.as_dict_flat().items(): debug_comments.set_key("{}".format(k), "Overridden via override dictionary.") # FutureWarning: raise cost class warning here. # Warning that there will be no default cost class in 0.7.0 # # Added in 0.6.4-dev, to be removed in v0.7.0-dev if has_explicit_cost_class is False: warnings.warn( "There will be no default cost class for the objective function in " 'v0.7.0 (currently "monetary" with a weight of 1). ' "Explicitly specify the cost class(es) you would like to use " 'under `run.objective_options.cost_class`. E.g. `{"monetary": 1}` to ' "replicate the current default.", FutureWarning, ) # Drop default nodes, links, and techs config_model.del_key("techs.default_tech") config_model.del_key("nodes.default_node") config_model.del_key("links.default_node_from,default_node_to") return config_model, debug_comments, scenario_overrides, scenario
def add_time_dimension(data, model_run): """ Once all constraints and costs have been loaded into the model dataset, any timeseries data is loaded from file and substituted into the model dataset Parameters: ----------- data : xarray Dataset A data structure which has already gone through `constraints_to_dataset`, `costs_to_dataset`, and `add_attributes` model_run : AttrDict Calliope model_run dictionary Returns: -------- data : xarray Dataset A data structure with an additional time dimension to the input dataset, with all relevant `file=` entries replaced with data from file. """ data["timesteps"] = pd.to_datetime(data.timesteps) # Search through every constraint/cost for use of '=' for variable in data.data_vars: # 1) If '=' in variable, it will give the variable a string data type if data[variable].dtype.kind != "U": continue # 2) convert to a Pandas Series to do 'string contains' search data_series = data[variable].to_series() # 3) get a Series of all the uses of 'file=' for this variable filenames = data_series[data_series.str.contains("file=")] # 4) If no use of 'file=' then we can be on our way if filenames.empty: continue # 5) remove all before '=' and split filename and location column filenames = filenames.str.split("=").str[1].str.rsplit(":", 1) if isinstance(filenames.index, pd.MultiIndex): filenames.index = filenames.index.remove_unused_levels() # 6) Get all timeseries data from dataframes stored in model_run timeseries_data = [] key_errors = [] for loc_tech, (filename, column) in filenames.iteritems(): try: timeseries_data.append( model_run.timeseries_data[filename].loc[:, column].values) except KeyError: key_errors.append( "column `{}` not found in file `{}`, but was requested by " "loc::tech `{}`.".format(column, filename, loc_tech)) if key_errors: exceptions.print_warnings_and_raise_errors(errors=key_errors) timeseries_data_series = pd.DataFrame(index=filenames.index, columns=data.timesteps.values, data=timeseries_data).stack() timeseries_data_series.index.rename("timesteps", -1, inplace=True) # 7) Add time dimension to the relevent DataArray and update the '=' # dimensions with the time varying data (static data is just duplicated # at each timestep) timeseries_data_array = xr.broadcast(data[variable], data.timesteps)[0].copy() timeseries_data_array.loc[xr.DataArray.from_series( timeseries_data_series).coords] = xr.DataArray.from_series( timeseries_data_series).values # 8) assign correct dtype (might be string/object accidentally) # string 'nan' to NaN: array_to_check = timeseries_data_array.where( timeseries_data_array != "nan", drop=True) timeseries_data_array = timeseries_data_array.where( timeseries_data_array != "nan") if (((array_to_check == "True") | (array_to_check == "1") | (array_to_check == "False") | (array_to_check == "0")).all().item()): # Turn to bool timeseries_data_array = ((timeseries_data_array == "True") | (timeseries_data_array == "1")).copy() else: try: timeseries_data_array = timeseries_data_array.astype( np.float, copy=False) except ValueError: None data[variable] = timeseries_data_array # Add timestep_resolution by looking at the time difference between timestep n # and timestep n + 1 for all timesteps time_delta = (data.timesteps.shift(timesteps=-1) - data.timesteps).to_series() # Last timestep has no n + 1, so will be NaT (not a time), # we duplicate the penultimate time_delta instead time_delta[-1] = time_delta[-2] time_delta.name = "timestep_resolution" # Time resolution is saved in hours (i.e. seconds / 3600) data["timestep_resolution"] = xr.DataArray.from_series( time_delta.dt.total_seconds() / 3600) data["timestep_weights"] = xr.DataArray(np.ones(len(data.timesteps)), dims=["timesteps"]) return data
def apply_overrides(config, scenario=None, override_dict=None): """ Generate processed Model configuration, applying any scenarios overrides. Parameters ---------- config : AttrDict a model configuration AttrDict scenario : str, optional override_dict : str or dict or AttrDict, optional If a YAML string, converted to AttrDict """ debug_comments = AttrDict() base_model_config_file = os.path.join( os.path.dirname(calliope.__file__), 'config', 'model.yaml' ) config_model = AttrDict.from_yaml(base_model_config_file) # Interpret timeseries_data_path as relative config.model.timeseries_data_path = relative_path( config.config_path, config.model.timeseries_data_path ) # The input files are allowed to override other model defaults config_model.union(config, allow_override=True) # First pass of applying override dict before applying scenarios, # so that can override scenario definitions by override_dict if override_dict: if isinstance(override_dict, str): override_dict = AttrDict.from_yaml_string(override_dict) elif not isinstance(override_dict, AttrDict): override_dict = AttrDict(override_dict) warnings = checks.check_overrides(config_model, override_dict) exceptions.print_warnings_and_raise_errors(warnings=warnings) config_model.union( override_dict, allow_override=True, allow_replacement=True ) if scenario: scenarios = config_model.get('scenarios', {}) if scenario in scenarios.keys(): # Manually defined scenario names cannot be the same as single # overrides or any combination of semicolon-delimited overrides if all([i in config_model.get('overrides', {}) for i in scenario.split(',')]): raise exceptions.ModelError( 'Manually defined scenario cannot be a combination of override names.' ) if not isinstance(scenarios[scenario], list): raise exceptions.ModelError( 'Scenario definition must be a list of override names.' ) overrides = [str(i) for i in scenarios[scenario]] logger.info( 'Using scenario `{}` leading to the application of ' 'overrides `{}`.'.format(scenario, overrides) ) else: overrides = str(scenario).split(',') logger.info( 'Applying the following overrides without a ' 'specific scenario name: {}'.format(overrides) ) overrides_from_scenario = combine_overrides(config_model, overrides) warnings = checks.check_overrides(config_model, overrides_from_scenario) exceptions.print_warnings_and_raise_errors(warnings=warnings) config_model.union( overrides_from_scenario, allow_override=True, allow_replacement=True ) for k, v in overrides_from_scenario.as_dict_flat().items(): debug_comments.set_key( '{}'.format(k), 'Applied from override') else: overrides = [] # Second pass of applying override dict after applying scenarios, # so that scenario-based overrides are overridden by override_dict! if override_dict: config_model.union( override_dict, allow_override=True, allow_replacement=True ) for k, v in override_dict.as_dict_flat().items(): debug_comments.set_key( '{}'.format(k), 'Overridden via override dictionary.') return config_model, debug_comments, overrides, scenario
def generate_model_run(config, timeseries_dataframes, debug_comments, applied_overrides, scenario): """ Returns a processed model_run configuration AttrDict and a debug YAML object with comments attached, ready to write to disk. Parameters ---------- config : AttrDict timeseries_dataframes : dict debug_comments : AttrDict scenario : str """ model_run = AttrDict() model_run["scenario"] = scenario model_run["applied_overrides"] = ";".join(applied_overrides) # 1) Initial checks on model configuration warning_messages, errors = checks.check_initial(config) exceptions.print_warnings_and_raise_errors(warnings=warning_messages, errors=errors) # 2) Fully populate techs # Raises ModelError if necessary model_run["techs"], debug_techs, errors = process_techs(config) debug_comments.set_key("model_run.techs", debug_techs) exceptions.print_warnings_and_raise_errors(errors=errors) # 3) Fully populate tech_groups model_run["tech_groups"] = process_tech_groups(config, model_run["techs"]) # 4) Fully populate locations ( model_run["locations"], debug_locs, warning_messages, errors, ) = locations.process_locations(config, model_run["techs"]) debug_comments.set_key("model_run.locations", debug_locs) exceptions.print_warnings_and_raise_errors(warnings=warning_messages, errors=errors) # 5) Fully populate timeseries data # Raises ModelErrors if there are problems with timeseries data at this stage model_run["timeseries_data"], model_run[ "timesteps"] = process_timeseries_data(config, model_run, timeseries_dataframes) # 6) Grab additional relevant bits from run and model config model_run["run"] = config["run"] model_run["model"] = config["model"] model_run["group_constraints"] = config.get("group_constraints", {}) # 7) Initialize sets all_sets = sets.generate_simple_sets(model_run) all_sets.union(sets.generate_loc_tech_sets(model_run, all_sets)) all_sets = AttrDict({k: list(v) for k, v in all_sets.items()}) model_run["sets"] = all_sets model_run["constraint_sets"] = constraint_sets.generate_constraint_sets( model_run) # 7.5) get scaling factors if available if "scale" in config: model_run['scale'] = config['scale'] # 8) Final sense-checking final_check_comments, warning_messages, errors = checks.check_final( model_run) debug_comments.union(final_check_comments) exceptions.print_warnings_and_raise_errors(warnings=warning_messages, errors=errors) # 9) Build a debug data dict with comments and the original configs debug_data = AttrDict({ "comments": debug_comments, "config_initial": config, }) return model_run, debug_data
def apply_overrides(config, override_file=None, override_dict=None): """ Generate processed Model configuration, applying any overrides. Parameters ---------- config : AttrDict a model configuration AttrDict override_file : str, optional override_dict : dict or AttrDict, optional """ debug_comments = AttrDict() base_model_config_file = os.path.join(os.path.dirname(calliope.__file__), 'config', 'model.yaml') config_model = AttrDict.from_yaml(base_model_config_file) default_tech_groups = list(config_model.tech_groups.keys()) # README CHANGED: `model` is not a list any longer - # it is now always a single file # README CHANGED: order of arguments to relative_path reversed # README CHANGED: data_path option removed -- need to make sure # that for parallel runs, data_path relative to the currently # open model config file always works # Interpret timeseries_data_path as relative config.model.timeseries_data_path = relative_path( config.config_path, config.model.timeseries_data_path) # The input files are allowed to override other model defaults config_model.union(config, allow_override=True) # Apply overrides via 'override_file', which contains the path to a YAML file if override_file: # Due to the possible occurrance of `C:\path_to_file\file.yaml:override` we have to split # override_file into `path_to_file`, `file.yaml` and `override` before # merging `path_to_file` and `file.yaml` back together path_to_file, override_file_with_group = os.path.split(override_file) override_file, override_groups = override_file_with_group.split(':') override_file_path = os.path.join(path_to_file, override_file) override_from_file = combine_overrides(override_file_path, override_groups) warnings = checks.check_overrides(config_model, override_from_file) exceptions.print_warnings_and_raise_errors(warnings=warnings) config_model.union(override_from_file, allow_override=True, allow_replacement=True) for k, v in override_from_file.as_dict_flat().items(): debug_comments.set_key( '{}'.format(k), 'Overridden via override: {}'.format(override_file)) # Apply overrides via 'override', which is an AttrDict if override_dict: if not isinstance(override_dict, AttrDict): override_dict = AttrDict(override_dict) warnings = checks.check_overrides(config_model, override_dict) exceptions.print_warnings_and_raise_errors(warnings=warnings) config_model.union(override_dict, allow_override=True, allow_replacement=True) for k, v in override_dict.as_dict_flat().items(): debug_comments.set_key('{}'.format(k), 'Overridden via override dictionary.') return config_model, debug_comments
def add_time_dimension(data, model_run): """ Once all constraints and costs have been loaded into the model dataset, any timeseries data is loaded from file and substituted into the model dataset Parameters: ----------- data : xarray Dataset A data structure which has already gone through `constraints_to_dataset`, `costs_to_dataset`, and `add_attributes` model_run : AttrDict Calliope model_run dictionary Returns: -------- data : xarray Dataset A data structure with an additional time dimension to the input dataset, with all relevant `file=` and `df= `entries replaced with the correct data. """ key_errors = [] # Search through every constraint/cost for use of '=' for variable in model_run.timeseries_vars: # 2) convert to a Pandas Series to do 'string contains' search data_series = data[variable].to_series().dropna() # 3) get Series of all uses of 'file=' or 'df=' for this variable (timeseries keys) try: tskeys = data_series[data_series.str.contains("file=") | data_series.str.contains("df=")] except AttributeError: continue # 4) If no use of 'file=' or 'df=' then we can be on our way if tskeys.empty: continue # 5) remove all before '=' and split filename and node column tskeys = (tskeys.str.split("=").str[1].str.rsplit( ":", 1, expand=True).reset_index().rename(columns={ 0: "source", 1: "column" }).set_index(["source", "column"])) # 6) Get all timeseries data from dataframes stored in model_run try: timeseries_data = model_run.timeseries_data.loc[:, tskeys.index] except KeyError: key_errors.append( f"file:column combinations `{tskeys.index.values}` not found, but are" f" requested by parameter `{variable}`.") continue timeseries_data.columns = pd.MultiIndex.from_frame(tskeys) # 7) Add time dimension to the relevent DataArray and update the '=' # dimensions with the time varying data (static data is just duplicated # at each timestep) data[variable] = (xr.DataArray.from_series( timeseries_data.unstack()).reindex(data[variable].coords).fillna( data[variable])) if key_errors: exceptions.print_warnings_and_raise_errors(errors=key_errors) # Add timestep_resolution by looking at the time difference between timestep n # and timestep n + 1 for all timesteps # Last timestep has no n + 1, so will be NaT (not a time), we ffill this. # Time resolution is saved in hours (i.e. nanoseconds / 3600e6) data["timestep_resolution"] = data.timesteps.diff( "timesteps", label="lower").reindex({ "timesteps": data.timesteps }).ffill("timesteps").rename("timestep_resolution") / pd.Timedelta( "1 hour") if len(data.timesteps) == 1: exceptions.warn( "Only one timestep defined. Inferring timestep resolution to be 1 hour" ) data["timestep_resolution"] = data["timestep_resolution"].fillna(1) data["timestep_weights"] = xr.DataArray(np.ones(len(data.timesteps)), dims=["timesteps"]) return data
def run_operate(model_data, timings, backend, build_only): """ For use when mode is 'operate', to allow the model to be built, edited, and iteratively run within Pyomo. """ log_time(logger, timings, 'run_start', comment='Backend: starting model run in operational mode') defaults = AttrDict.from_yaml_string(model_data.attrs['defaults']) run_config = AttrDict.from_yaml_string(model_data.attrs['run_config']) operate_params = ['purchased'] + [ i.replace('_max', '') for i in defaults if i[-4:] == '_max' ] # Capacity results (from plan mode) can be used as the input to operate mode if (any(model_data.filter_by_attrs(is_result=1).data_vars) and run_config.get('operation.use_cap_results', False)): # Anything with is_result = 1 will be ignored in the Pyomo model for varname, varvals in model_data.data_vars.items(): if varname in operate_params: varvals.attrs['is_result'] = 1 varvals.attrs['operate_param'] = 1 else: cap_max = xr.merge([ v.rename(k.replace('_max', '')) for k, v in model_data.data_vars.items() if '_max' in k ]) cap_equals = xr.merge([ v.rename(k.replace('_equals', '')) for k, v in model_data.data_vars.items() if '_equals' in k ]) caps = cap_max.update(cap_equals) for cap in caps.data_vars.values(): cap.attrs['is_result'] = 1 cap.attrs['operate_param'] = 1 model_data.update(caps) # Storage initial is carried over between iterations, so must be defined along with storage if ('loc_techs_store' in model_data.dims.keys() and 'storage_initial' not in model_data.data_vars.keys()): model_data['storage_initial'] = (xr.DataArray( [0 for loc_tech in model_data.loc_techs_store.values], dims='loc_techs_store')) model_data['storage_initial'].attrs['is_result'] = 0 exceptions.warn( 'Initial stored energy not defined, set to zero for all ' 'loc::techs in loc_techs_store, for use in iterative optimisation') # Operated units is carried over between iterations, so must be defined in a milp model if ('loc_techs_milp' in model_data.dims.keys() and 'operated_units' not in model_data.data_vars.keys()): model_data['operated_units'] = (xr.DataArray( [0 for loc_tech in model_data.loc_techs_milp.values], dims='loc_techs_milp')) model_data['operated_units'].attrs['is_result'] = 1 model_data['operated_units'].attrs['operate_param'] = 1 exceptions.warn( 'daily operated units not defined, set to zero for all ' 'loc::techs in loc_techs_milp, for use in iterative optimisation') comments, warnings, errors = checks.check_operate_params(model_data) exceptions.print_warnings_and_raise_errors(warnings=warnings, errors=errors) # Initialize our variables solver = run_config['solver'] solver_io = run_config.get('solver_io', None) solver_options = run_config.get('solver_options', None) save_logs = run_config.get('save_logs', None) window = run_config['operation']['window'] horizon = run_config['operation']['horizon'] window_to_horizon = horizon - window # get the cumulative sum of timestep resolution, to find where we hit our window and horizon timestep_cumsum = model_data.timestep_resolution.cumsum( 'timesteps').to_pandas() # get the timesteps at which we start and end our windows window_ends = timestep_cumsum.where((timestep_cumsum % window == 0) | ( timestep_cumsum == timestep_cumsum[-1])) window_starts = timestep_cumsum.where((~np.isnan(window_ends.shift(1))) | ( timestep_cumsum == timestep_cumsum[0])).dropna() window_ends = window_ends.dropna() horizon_ends = timestep_cumsum[timestep_cumsum.isin(window_ends.values + window_to_horizon)] if not any(window_starts): raise exceptions.ModelError( 'Not enough timesteps or incorrect timestep resolution to run in ' 'operational mode with an optimisation window of {}'.format( window)) # We will only update timseries parameters timeseries_data_vars = [ k for k, v in model_data.data_vars.items() if 'timesteps' in v.dims and v.attrs['is_result'] == 0 ] # Loop through each window, solve over the horizon length, and add result to # result_array we only go as far as the end of the last horizon, which may # clip the last bit of data result_array = [] # track whether each iteration finds an optimal solution or not terminations = [] if build_only: iterations = [0] else: iterations = range(len(window_starts)) for i in iterations: start_timestep = window_starts.index[i] # Build full model in first instance if i == 0: warmstart = False end_timestep = horizon_ends.index[i] timesteps = slice(start_timestep, end_timestep) window_model_data = model_data.loc[dict(timesteps=timesteps)] log_time(logger, timings, 'model_gen_1', comment='Backend: generating initial model') backend_model = backend.generate_model(window_model_data) # Build the full model in the last instance(s), # where number of timesteps is less than the horizon length elif i > len(horizon_ends) - 1: warmstart = False end_timestep = window_ends.index[i] timesteps = slice(start_timestep, end_timestep) window_model_data = model_data.loc[dict(timesteps=timesteps)] log_time( logger, timings, 'model_gen_{}'.format(i + 1), comment=( 'Backend: iteration {}: generating new model for ' 'end of timeseries, with horizon = {} timesteps'.format( i + 1, window_ends[i] - window_starts[i]))) backend_model = backend.generate_model(window_model_data) # Update relevent Pyomo Params in intermediate instances else: warmstart = True end_timestep = horizon_ends.index[i] timesteps = slice(start_timestep, end_timestep) window_model_data = model_data.loc[dict(timesteps=timesteps)] log_time( logger, timings, 'model_gen_{}'.format(i + 1), comment='Backend: iteration {}: updating model parameters'. format(i + 1)) # Pyomo model sees the same timestamps each time, we just change the # values associated with those timestamps for var in timeseries_data_vars: # New values var_series = window_model_data[var].to_series().dropna( ).replace('inf', np.inf) # Same timestamps var_series.index = backend_model.__calliope_model_data['data'][ var].keys() var_dict = var_series.to_dict() # Update pyomo Param with new dictionary getattr(backend_model, var).store_values(var_dict) if not build_only: log_time(logger, timings, 'model_run_{}'.format(i + 1), time_since_run_start=True, comment='Backend: iteration {}: sending model to solver'. format(i + 1)) # After iteration 1, warmstart = True, which should speed up the process # Note: Warmstart isn't possible with GLPK (dealt with later on) _results = backend.solve_model( backend_model, solver=solver, solver_io=solver_io, solver_options=solver_options, save_logs=save_logs, warmstart=warmstart, ) log_time(logger, timings, 'run_solver_exit_{}'.format(i + 1), time_since_run_start=True, comment='Backend: iteration {}: solver finished running'. format(i + 1)) # xarray dataset is built for each iteration _termination = backend.load_results(backend_model, _results) terminations.append(_termination) _results = backend.get_result_array(backend_model, model_data) # We give back the actual timesteps for this iteration and take a slice # equal to the window length _results['timesteps'] = window_model_data.timesteps.copy() # We always save the window data. Until the last window(s) this will crop # the window_to_horizon timesteps. In the last window(s), optimistion will # only be occurring over a window length anyway _results = _results.loc[dict( timesteps=slice(None, window_ends.index[i]))] result_array.append(_results) # Set up initial storage for the next iteration if 'loc_techs_store' in model_data.dims.keys(): storage_initial = _results.storage.loc[{ 'timesteps': window_ends.index[i] }].drop('timesteps') model_data['storage_initial'].loc[ storage_initial.coords] = storage_initial.values backend_model.storage_initial.store_values( storage_initial.to_series().dropna().to_dict()) # Set up total operated units for the next iteration if 'loc_techs_milp' in model_data.dims.keys(): operated_units = _results.operating_units.sum( 'timesteps').astype(np.int) model_data['operated_units'].loc[{}] += operated_units.values backend_model.operated_units.store_values( operated_units.to_series().dropna().to_dict()) log_time(logger, timings, 'run_solver_exit_{}'.format(i + 1), time_since_run_start=True, comment='Backend: iteration {}: generated solution array'. format(i + 1)) if build_only: results = xr.Dataset() else: # Concatenate results over the timestep dimension to get a single # xarray Dataset of interest results = xr.concat(result_array, dim='timesteps') if all(i == 'optimal' for i in terminations): results.attrs['termination_condition'] = 'optimal' elif all(i in ['optimal', 'feasible'] for i in terminations): results.attrs['termination_condition'] = 'feasible' else: results.attrs['termination_condition'] = ','.join(terminations) log_time(logger, timings, 'run_solution_returned', time_since_run_start=True, comment='Backend: generated full solution array') return results, backend_model
def apply_overrides(config, scenario=None, override_dict=None): """ Generate processed Model configuration, applying any scenarios overrides. Parameters ---------- config : AttrDict a model configuration AttrDict scenario : str, optional override_dict : str or dict or AttrDict, optional If a YAML string, converted to AttrDict """ debug_comments = AttrDict() config_model = AttrDict.from_yaml(os.path.join( os.path.dirname(calliope.__file__), 'config', 'defaults.yaml' )) # Interpret timeseries_data_path as relative config.model.timeseries_data_path = relative_path( config.config_path, config.model.timeseries_data_path ) # FutureWarning: check if config includes an explicit objective cost class. # Added in 0.6.4-dev, to be removed in v0.7.0-dev. has_explicit_cost_class = isinstance(config.get_key('run.objective_options.cost_class', None), dict) # The input files are allowed to override other model defaults config_model.union(config, allow_override=True) # First pass of applying override dict before applying scenarios, # so that can override scenario definitions by override_dict if override_dict: if isinstance(override_dict, str): override_dict = AttrDict.from_yaml_string(override_dict) elif not isinstance(override_dict, AttrDict): override_dict = AttrDict(override_dict) warning_messages = checks.check_overrides(config_model, override_dict) exceptions.print_warnings_and_raise_errors(warnings=warning_messages) # FutureWarning: If config does not include an explicit objective cost class, check override dict. # Added in 0.6.4-dev, to be removed in v0.7.0-dev. if has_explicit_cost_class is False: has_explicit_cost_class = isinstance(override_dict.get_key('run.objective_options.cost_class', None), dict) config_model.union( override_dict, allow_override=True, allow_replacement=True ) if scenario: scenarios = config_model.get('scenarios', {}) if scenario in scenarios.keys(): # Manually defined scenario names cannot be the same as single # overrides or any combination of semicolon-delimited overrides if all([i in config_model.get('overrides', {}) for i in scenario.split(',')]): raise exceptions.ModelError( 'Manually defined scenario cannot be a combination of override names.' ) if not isinstance(scenarios[scenario], list): raise exceptions.ModelError( 'Scenario definition must be a list of override names.' ) overrides = [str(i) for i in scenarios[scenario]] logger.info( 'Using scenario `{}` leading to the application of ' 'overrides `{}`.'.format(scenario, overrides) ) else: overrides = str(scenario).split(',') logger.info( 'Applying the following overrides without a ' 'specific scenario name: {}'.format(overrides) ) overrides_from_scenario = combine_overrides(config_model, overrides) warning_messages = checks.check_overrides(config_model, overrides_from_scenario) exceptions.print_warnings_and_raise_errors(warnings=warning_messages) # FutureWarning: If config nor override_dict include an explicit objective cost class, check scenario dict. # Added in 0.6.4-dev, to be removed in v0.7.0-dev if has_explicit_cost_class is False: has_explicit_cost_class = isinstance(overrides_from_scenario.get_key('run.objective_options.cost_class', None), dict) config_model.union( overrides_from_scenario, allow_override=True, allow_replacement=True ) for k, v in overrides_from_scenario.as_dict_flat().items(): debug_comments.set_key( '{}'.format(k), 'Applied from override') else: overrides = [] # Second pass of applying override dict after applying scenarios, # so that scenario-based overrides are overridden by override_dict! if override_dict: config_model.union( override_dict, allow_override=True, allow_replacement=True ) for k, v in override_dict.as_dict_flat().items(): debug_comments.set_key( '{}'.format(k), 'Overridden via override dictionary.') # FutureWarning: raise cost class warning here. # Warning that there will be no default cost class in 0.7.0 # # Added in 0.6.4-dev, to be removed in v0.7.0-dev if has_explicit_cost_class is False: warnings.warn( 'There will be no default cost class for the objective function in ' 'v0.7.0 (currently "monetary" with a weight of 1). ' 'Explicitly specify the cost class(es) you would like to use ' 'under `run.objective_options.cost_class`. E.g. `{"monetary": 1}` to ' 'replicate the current default.', FutureWarning ) # Drop default locations, links, and techs config_model.del_key('techs.default_tech') config_model.del_key('locations.default_location') config_model.del_key('links.default_location_from,default_location_to') config_model.del_key('group_constraints.default_group') return config_model, debug_comments, overrides, scenario
def run_operate(model_data, timings, backend, build_only): """ For use when mode is 'operate', to allow the model to be built, edited, and iteratively run within Pyomo. """ log_time( logger, timings, "run_start", comment="Backend: starting model run in operational mode", ) defaults = UpdateObserverDict( initial_yaml_string=model_data.attrs["defaults"], name="defaults", observer=model_data, ) run_config = UpdateObserverDict( initial_yaml_string=model_data.attrs["run_config"], name="run_config", observer=model_data, ) # New param defaults = old maximum param defaults (e.g. energy_cap gets default from energy_cap_max) operate_params = { k.replace("_max", ""): v for k, v in defaults.items() if k.endswith("_max") } operate_params[ "purchased"] = 0 # no _max to work from here, so we hardcode a default defaults.update(operate_params) # Capacity results (from plan mode) can be used as the input to operate mode if any(model_data.filter_by_attrs( is_result=1).data_vars) and run_config.get( "operation.use_cap_results", False): # Anything with is_result = 1 will be ignored in the Pyomo model for varname, varvals in model_data.data_vars.items(): if varname in operate_params.keys(): varvals.attrs["is_result"] = 1 varvals.attrs["operate_param"] = 1 else: cap_max = xr.merge([ v.rename(k.replace("_max", "")) for k, v in model_data.data_vars.items() if "_max" in k ]) cap_equals = xr.merge([ v.rename(k.replace("_equals", "")) for k, v in model_data.data_vars.items() if "_equals" in k ]) caps = cap_max.update(cap_equals) for cap in caps.data_vars.values(): cap.attrs["is_result"] = 1 cap.attrs["operate_param"] = 1 model_data.update(caps) comments, warnings, errors = checks.check_operate_params(model_data) exceptions.print_warnings_and_raise_errors(warnings=warnings, errors=errors) # Initialize our variables solver = run_config["solver"] solver_io = run_config.get("solver_io", None) solver_options = run_config.get("solver_options", None) save_logs = run_config.get("save_logs", None) window = run_config["operation"]["window"] horizon = run_config["operation"]["horizon"] window_to_horizon = horizon - window # get the cumulative sum of timestep resolution, to find where we hit our window and horizon timestep_cumsum = model_data.timestep_resolution.cumsum( "timesteps").to_pandas() # get the timesteps at which we start and end our windows window_ends = timestep_cumsum.where((timestep_cumsum % window == 0) | ( timestep_cumsum == timestep_cumsum[-1])) window_starts = timestep_cumsum.where((~np.isnan(window_ends.shift(1))) | ( timestep_cumsum == timestep_cumsum[0])).dropna() window_ends = window_ends.dropna() horizon_ends = timestep_cumsum[timestep_cumsum.isin(window_ends.values + window_to_horizon)] if not any(window_starts): raise exceptions.ModelError( "Not enough timesteps or incorrect timestep resolution to run in " "operational mode with an optimisation window of {}".format( window)) # We will only update timseries parameters timeseries_data_vars = [ k for k, v in model_data.data_vars.items() if "timesteps" in v.dims and v.attrs["is_result"] == 0 ] # Loop through each window, solve over the horizon length, and add result to # result_array we only go as far as the end of the last horizon, which may # clip the last bit of data result_array = [] # track whether each iteration finds an optimal solution or not terminations = [] if build_only: iterations = [0] else: iterations = range(len(window_starts)) for i in iterations: start_timestep = window_starts.index[i] # Build full model in first instance if i == 0: warmstart = False end_timestep = horizon_ends.index[i] timesteps = slice(start_timestep, end_timestep) window_model_data = model_data.loc[dict(timesteps=timesteps)] log_time( logger, timings, "model_gen_1", comment="Backend: generating initial model", ) backend_model = backend.generate_model(window_model_data) # Build the full model in the last instance(s), # where number of timesteps is less than the horizon length elif i > len(horizon_ends) - 1: warmstart = False end_timestep = window_ends.index[i] timesteps = slice(start_timestep, end_timestep) window_model_data = model_data.loc[dict(timesteps=timesteps)] log_time( logger, timings, "model_gen_{}".format(i + 1), comment=( "Backend: iteration {}: generating new model for " "end of timeseries, with horizon = {} timesteps".format( i + 1, window_ends[i] - window_starts[i])), ) backend_model = backend.generate_model(window_model_data) # Update relevent Pyomo Params in intermediate instances else: warmstart = True end_timestep = horizon_ends.index[i] timesteps = slice(start_timestep, end_timestep) window_model_data = model_data.loc[dict(timesteps=timesteps)] log_time( logger, timings, "model_gen_{}".format(i + 1), comment="Backend: iteration {}: updating model parameters". format(i + 1), ) # Pyomo model sees the same timestamps each time, we just change the # values associated with those timestamps for var in timeseries_data_vars: # New values var_series = ( window_model_data[var].to_series().dropna().replace( "inf", np.inf)) # Same timestamps var_series.index = backend_model.__calliope_model_data["data"][ var].keys() var_dict = var_series.to_dict() # Update pyomo Param with new dictionary getattr(backend_model, var).store_values(var_dict) if not build_only: log_time( logger, timings, "model_run_{}".format(i + 1), time_since_run_start=True, comment="Backend: iteration {}: sending model to solver". format(i + 1), ) # After iteration 1, warmstart = True, which should speed up the process # Note: Warmstart isn't possible with GLPK (dealt with later on) _results = backend.solve_model( backend_model, solver=solver, solver_io=solver_io, solver_options=solver_options, save_logs=save_logs, warmstart=warmstart, ) log_time( logger, timings, "run_solver_exit_{}".format(i + 1), time_since_run_start=True, comment="Backend: iteration {}: solver finished running". format(i + 1), ) # xarray dataset is built for each iteration _termination = backend.load_results(backend_model, _results) terminations.append(_termination) _results = backend.get_result_array(backend_model, model_data) # We give back the actual timesteps for this iteration and take a slice # equal to the window length _results["timesteps"] = window_model_data.timesteps.copy() # We always save the window data. Until the last window(s) this will crop # the window_to_horizon timesteps. In the last window(s), optimistion will # only be occurring over a window length anyway _results = _results.loc[dict( timesteps=slice(None, window_ends.index[i]))] result_array.append(_results) # Set up initial storage for the next iteration if "loc_techs_store" in model_data.dims.keys(): storage_initial = _results.storage.loc[{ "timesteps": window_ends.index[i] }].drop("timesteps") model_data["storage_initial"].loc[ storage_initial.coords] = storage_initial.values backend_model.storage_initial.store_values( storage_initial.to_series().dropna().to_dict()) # Set up total operated units for the next iteration if "loc_techs_milp" in model_data.dims.keys(): operated_units = _results.operating_units.sum( "timesteps").astype(np.int) model_data["operated_units"].loc[{}] += operated_units.values backend_model.operated_units.store_values( operated_units.to_series().dropna().to_dict()) log_time( logger, timings, "run_solver_exit_{}".format(i + 1), time_since_run_start=True, comment="Backend: iteration {}: generated solution array". format(i + 1), ) if build_only: results = xr.Dataset() else: # Concatenate results over the timestep dimension to get a single # xarray Dataset of interest results = xr.concat(result_array, dim="timesteps") if all(i == "optimal" for i in terminations): results.attrs["termination_condition"] = "optimal" elif all(i in ["optimal", "feasible"] for i in terminations): results.attrs["termination_condition"] = "feasible" else: results.attrs["termination_condition"] = ",".join(terminations) log_time( logger, timings, "run_solution_returned", time_since_run_start=True, comment="Backend: generated full solution array", ) return results, backend_model
def run_operate(model_data, timings, backend, build_only): """ For use when mode is 'operate', to allow the model to be built, edited, and iteratively run within Pyomo. """ log_time(timings, 'run_start', comment='Backend: starting model run in operational mode') defaults = ruamel.yaml.load(model_data.attrs['defaults'], Loader=ruamel.yaml.Loader) operate_params = ['purchased'] + [ i.replace('_max', '') for i in defaults if i[-4:] == '_max' ] # Capacity results (from plan mode) can be used as the input to operate mode if (any(model_data.filter_by_attrs(is_result=1).data_vars) and model_data.attrs.get('run.operation.use_cap_results', False)): # Anything with is_result = 1 will be ignored in the Pyomo model for varname, varvals in model_data.data_vars.items(): if varname in operate_params: varvals.attrs['is_result'] = 1 varvals.attrs['operate_param'] = 1 else: cap_max = xr.merge([ v.rename(k.replace('_max', '')) for k, v in model_data.data_vars.items() if '_max' in k ]) cap_equals = xr.merge([ v.rename(k.replace('_equals', '')) for k, v in model_data.data_vars.items() if '_equals' in k ]) caps = cap_max.update(cap_equals) for cap in caps.data_vars.values(): cap.attrs['is_result'] = 1 cap.attrs['operate_param'] = 1 model_data.update(caps) # Storage initial is carried over between iterations, so must be defined along with storage if ('loc_techs_store' in model_data.dims.keys() and 'storage_initial' not in model_data.data_vars.keys()): model_data['storage_initial'] = ( xr.DataArray([0 for loc_tech in model_data.loc_techs_store.values], dims='loc_techs_store') ) model_data['storage_initial'].attrs['is_result'] = 0 exceptions.ModelWarning( 'Initial stored energy not defined, set to zero for all ' 'loc::techs in loc_techs_store, for use in iterative optimisation' ) # Operated units is carried over between iterations, so must be defined in a milp model if ('loc_techs_milp' in model_data.dims.keys() and 'operated_units' not in model_data.data_vars.keys()): model_data['operated_units'] = ( xr.DataArray([0 for loc_tech in model_data.loc_techs_milp.values], dims='loc_techs_milp') ) model_data['operated_units'].attrs['is_result'] = 1 model_data['operated_units'].attrs['operate_param'] = 1 exceptions.ModelWarning( 'daily operated units not defined, set to zero for all ' 'loc::techs in loc_techs_milp, for use in iterative optimisation' ) comments, warnings, errors = checks.check_operate_params(model_data) exceptions.print_warnings_and_raise_errors(warnings=warnings, errors=errors) # Initialize our variables solver = model_data.attrs['run.solver'] solver_io = model_data.attrs.get('run.solver_io', None) solver_options = model_data.attrs.get('run.solver_options', None) save_logs = model_data.attrs.get('run.save_logs', None) window = model_data.attrs['run.operation.window'] horizon = model_data.attrs['run.operation.horizon'] window_to_horizon = horizon - window # get the cumulative sum of timestep resolution, to find where we hit our window and horizon timestep_cumsum = model_data.timestep_resolution.cumsum('timesteps').to_pandas() # get the timesteps at which we start and end our windows window_ends = timestep_cumsum.where( (timestep_cumsum % window == 0) | (timestep_cumsum == timestep_cumsum[-1]) ) window_starts = timestep_cumsum.where( (~np.isnan(window_ends.shift(1))) | (timestep_cumsum == timestep_cumsum[0]) ).dropna() window_ends = window_ends.dropna() horizon_ends = timestep_cumsum[timestep_cumsum.isin(window_ends.values + window_to_horizon)] if not any(window_starts): raise exceptions.ModelError( 'Not enough timesteps or incorrect timestep resolution to run in ' 'operational mode with an optimisation window of {}'.format(window) ) # We will only update timseries parameters timeseries_data_vars = [ k for k, v in model_data.data_vars.items() if 'timesteps' in v.dims and v.attrs['is_result'] == 0 ] # Loop through each window, solve over the horizon length, and add result to # result_array we only go as far as the end of the last horizon, which may # clip the last bit of data result_array = [] # track whether each iteration finds an optimal solution or not terminations = [] if build_only: iterations = [0] else: iterations = range(len(window_starts)) for i in iterations: start_timestep = window_starts.index[i] # Build full model in first instance if i == 0: warmstart = False end_timestep = horizon_ends.index[i] timesteps = slice(start_timestep, end_timestep) window_model_data = model_data.loc[dict(timesteps=timesteps)] log_time( timings, 'model_gen_1', comment='Backend: generating initial model' ) backend_model = backend.generate_model(window_model_data) # Build the full model in the last instance(s), # where number of timesteps is less than the horizon length elif i > len(horizon_ends) - 1: warmstart = False end_timestep = window_ends.index[i] timesteps = slice(start_timestep, end_timestep) window_model_data = model_data.loc[dict(timesteps=timesteps)] log_time( timings, 'model_gen_{}'.format(i + 1), comment=( 'Backend: iteration {}: generating new model for ' 'end of timeseries, with horizon = {} timesteps' .format(i + 1, window_ends[i] - window_starts[i]) ) ) backend_model = backend.generate_model(window_model_data) # Update relevent Pyomo Params in intermediate instances else: warmstart = True end_timestep = horizon_ends.index[i] timesteps = slice(start_timestep, end_timestep) window_model_data = model_data.loc[dict(timesteps=timesteps)] log_time( timings, 'model_gen_{}'.format(i + 1), comment='Backend: iteration {}: updating model parameters'.format(i + 1) ) # Pyomo model sees the same timestamps each time, we just change the # values associated with those timestamps for var in timeseries_data_vars: # New values var_series = window_model_data[var].to_series().dropna().replace('inf', np.inf) # Same timestamps var_series.index = backend_model.__calliope_model_data__['data'][var].keys() var_dict = var_series.to_dict() # Update pyomo Param with new dictionary for k, v in getattr(backend_model, var).items(): if k in var_dict: v.set_value(var_dict[k]) if not build_only: log_time( timings, 'model_run_{}'.format(i + 1), time_since_start=True, comment='Backend: iteration {}: sending model to solver'.format(i + 1) ) # After iteration 1, warmstart = True, which should speed up the process # Note: Warmstart isn't possible with GLPK (dealt with later on) _results = backend.solve_model( backend_model, solver=solver, solver_io=solver_io, solver_options=solver_options, save_logs=save_logs, warmstart=warmstart, ) log_time( timings, 'run_solver_exit_{}'.format(i + 1), time_since_start=True, comment='Backend: iteration {}: solver finished running'.format(i + 1) ) # xarray dataset is built for each iteration _termination = backend.load_results(backend_model, _results) terminations.append(_termination) _results = backend.get_result_array(backend_model, model_data) # We give back the actual timesteps for this iteration and take a slice # equal to the window length _results['timesteps'] = window_model_data.timesteps.copy() # We always save the window data. Until the last window(s) this will crop # the window_to_horizon timesteps. In the last window(s), optimistion will # only be occurring over a window length anyway _results = _results.loc[dict(timesteps=slice(None, window_ends.index[i]))] result_array.append(_results) # Set up initial storage for the next iteration if 'loc_techs_store' in model_data.dims.keys(): storage_initial = _results.storage.loc[{'timesteps': window_ends.index[i]}].drop('timesteps') model_data['storage_initial'].loc[storage_initial.coords] = storage_initial.values for k, v in backend_model.storage_initial.items(): v.set_value(storage_initial.to_series().dropna().to_dict()[k]) # Set up total operated units for the next iteration if 'loc_techs_milp' in model_data.dims.keys(): operated_units = _results.operating_units.sum('timesteps').astype(np.int) model_data['operated_units'].loc[{}] += operated_units.values for k, v in backend_model.operated_units.items(): v.set_value(operated_units.to_series().dropna().to_dict()[k]) log_time( timings, 'run_solver_exit_{}'.format(i + 1), time_since_start=True, comment='Backend: iteration {}: generated solution array'.format(i + 1) ) if build_only: results = xr.Dataset() else: # Concatenate results over the timestep dimension to get a single # xarray Dataset of interest results = xr.concat(result_array, dim='timesteps') if all(i == 'optimal' for i in terminations): results.attrs['termination_condition'] = 'optimal' else: results.attrs['termination_condition'] = ','.join(terminations) log_time( timings, 'run_solution_returned', time_since_start=True, comment='Backend: generated full solution array' ) return results, backend_model