Exemplo n.º 1
0
    def declareObjective(self, pyM):
        """
        Declare objective function by obtaining the contributions to the objective function from all modeling classes
        Currently, the only objective function which can be selected is the sum of the total annual cost of all
        components.
        """
        utils.output('Declaring objective function...', self.verbose, 0)

        def objective(pyM):
            TAC = sum(mdl.getObjectiveFunctionContribution(self, pyM) for mdl in self.componentModelingDict.values())
            return TAC
        pyM.Obj = pyomo.Objective(rule=objective)
Exemplo n.º 2
0
    def declareTimeSets(self, pyM, timeSeriesAggregation):
        """ Set and initialize basic time parameters and sets """

        # Store the information if aggregated time series data is considered for modeling the energy system in the pyomo
        # model instance and set the time series which is again considered for modeling in all components accordingly
        pyM.hasTSA = timeSeriesAggregation
        for mdl in self.componentModelingDict.values():
            for comp in mdl.componentsDict.values():
                comp.setTimeSeriesData(pyM.hasTSA)

        # Set the time set and the inter time steps set. The time set is a set of tuples. A tuple consists of two
        # entries, the first one indicating an index of a period and the second one indicating a time step inside that
        # period. If time series aggregation is not considered, only one period (period 0) exists and the time steps
        # range from 0 up until the specified number of total time steps - 1. Otherwise the time set is initialized for
        # each typical period (0 ... numberOfTypicalPeriods-1) and the number of time steps per period (0 ...
        # numberOfTimeStepsPerPeriod-1).
        # The inter time steps set is a set of tuples as well, which again consist of two values. The first value again
        # indicates the period, however the second one now refers to a point in time right before or after a time step
        # (or between two time steps). Hence, the second value reaches values from (0 ... numberOfTimeStepsPerPeriod).
        if not pyM.hasTSA:
            # Reset timeStepsPerPeriod in case it was overwritten by the clustering function
            self.timeStepsPerPeriod = self.totalTimeSteps
            self.interPeriodTimeSteps = list(range(int(len(self.totalTimeSteps) /
                                                        len(self.timeStepsPerPeriod)) + 1))
            self.periods = [0]
            self.periodsOrder = [0]
            self.periodOccurrences = [1]

            # Define sets
            def initTimeSet(pyM):
                return ((p, t) for p in self.periods for t in self.timeStepsPerPeriod)

            def initInterTimeStepsSet(pyM):
                return ((p, t) for p in self.periods for t in range(len(self.timeStepsPerPeriod) + 1))
        else:
            utils.output('Time series aggregation specifications:\n'
                         'Number of typical periods:' + str(len(self.typicalPeriods)) +
                         ', number of time steps per periods:' + str(len(self.timeStepsPerPeriod)) + '\n',
                         self.verbose, 0)

            # Define sets
            def initTimeSet(pyM):
                return ((p, t) for p in self.typicalPeriods for t in self.timeStepsPerPeriod)

            def initInterTimeStepsSet(pyM):
                return ((p, t) for p in self.typicalPeriods for t in range(len(self.timeStepsPerPeriod) + 1))

        # Initialize sets
        pyM.timeSet = pyomo.Set(dimen=2, initialize=initTimeSet)
        pyM.interTimeStepsSet = pyomo.Set(dimen=2, initialize=initInterTimeStepsSet)
Exemplo n.º 3
0
    def declareObjective(self, pyM):
        """
        Declare the objective function by obtaining the contributions to the objective function from all modeling
        classes. Currently, the only objective function which can be selected is the sum of the total annual cost of all
        components.

        :param pyM: a pyomo ConcreteModel instance which contains parameters, sets, variables,
            constraints and objective required for the optimization set up and solving.
        :type pyM: pyomo ConcreteModel
        """
        utils.output('Declaring objective function...', self.verbose, 0)

        def objective(pyM):
            TAC = sum(
                mdl.getObjectiveFunctionContribution(self, pyM)
                for mdl in self.componentModelingDict.values())
            return TAC

        pyM.Obj = pyomo.Objective(rule=objective)
Exemplo n.º 4
0
    def declareCommodityBalanceConstraints(self, pyM):
        """
        Declare commodity balance constraints (one balance constraint for each commodity, location and time step)
        """
        utils.output('Declaring commodity balances...', self.verbose, 0)

        # Declare and initialize a set that states for which location and commodity the commodity balance constraints
        # are non-trivial (i.e. not 0 == 0; trivial constraints raise errors in pyomo).
        def initLocationCommoditySet(pyM):
            return ((loc, commod) for loc in self.locations for commod in self.commodities
                    if any([mdl.hasOpVariablesForLocationCommodity(self, loc, commod)
                            for mdl in self.componentModelingDict.values()]))
        pyM.locationCommoditySet = pyomo.Set(dimen=2, initialize=initLocationCommoditySet)

        # Declare and initialize commodity balance constraints by checking for each location and commodity in the
        # locationCommoditySet and for each period and time step within the period if the commodity source and sink
        # terms add up to zero. For this, get the contribution to commodity balance from each modeling class
        def commodityBalanceConstraint(pyM, loc, commod, p, t):
            return sum(mdl.getCommodityBalanceContribution(pyM, commod, loc, p, t)
                       for mdl in self.componentModelingDict.values()) == 0
        pyM.commodityBalanceConstraint = pyomo.Constraint(pyM.locationCommoditySet, pyM.timeSet,
                                                          rule=commodityBalanceConstraint)
Exemplo n.º 5
0
    def declareSharedPotentialConstraints(self, pyM):
        """
        Declare shared potential constraints, e.g. if a maximum potential of salt caverns has to be shared by
        salt cavern storing methane and salt caverns storing hydrogen.

        :param pyM: a pyomo ConcreteModel instance which contains parameters, sets, variables,
            constraints and objective required for the optimization set up and solving.
        :type pyM: pyomo ConcreteModel
        """
        utils.output('Declaring shared potential constraint...', self.verbose,
                     0)

        # Create shared potential dictionary (maps a shared potential ID and a location to components who share the
        # potential)
        potentialDict = {}
        for mdl in self.componentModelingDict.values():
            for compName, comp in mdl.componentsDict.items():
                if comp.sharedPotentialID is not None:
                    [
                        potentialDict.setdefault((comp.sharedPotentialID, loc),
                                                 []).append(compName)
                        for loc in comp.locationalEligibility.index
                        if comp.capacityMax[loc] != 0
                    ]
        pyM.sharedPotentialDict = potentialDict

        # Define and initialize constraints for each instance and location where components have to share an available
        # potential. Sum up the relative contributions to the shared potential and ensure that the total share is
        # <= 100%. For this, get the contributions to the shared potential for the corresponding ID and
        # location from each modeling class.
        def sharedPotentialConstraint(pyM, ID, loc):
            return sum(
                mdl.getSharedPotentialContribution(pyM, ID, loc)
                for mdl in self.componentModelingDict.values()) <= 1
        pyM.ConstraintSharedPotentials = \
            pyomo.Constraint(pyM.sharedPotentialDict.keys(), rule=sharedPotentialConstraint)
Exemplo n.º 6
0
def writeEnergySystemModelToNetCDF(
    esM,
    outputFilePath="my_esm.nc",
    overwriteExisting=False,
    optSumOutputLevel=0,
    optValOutputLevel=1,
    groupPrefix=None,
):
    """
    Write energySystemModel (input and if exists, output) to netCDF file.

    :param esM: EnergySystemModel instance in which the model is held
    :type esM: EnergySystemModel instance

    :param outputFilePath: output file name (can include full path)
        |br| * the default value is "my_esm.nc"
    :type file_path: string

    :param overwriteExisting: Overwrite existing netCDF file
        |br| * the default value is False
    :type outputFileName: boolean

    :param optSumOutputLevel: Output level of the optimization summary (see
        EnergySystemModel). Either an integer (0,1,2) which holds for all model
        classes or a dictionary with model class names as keys and an integer
        (0,1,2) for each key (e.g. {'StorageModel':1,'SourceSinkModel':1,...}
        |br| * the default value is 2
    :type optSumOutputLevel: int (0,1,2) or dict

    :param optValOutputLevel: Output level of the optimal values. Either an
        integer (0,1) which holds for all model classes or a dictionary with
        model class names as keys and an integer (0,1) for each key (e.g.
        {'StorageModel':1,'SourceSinkModel':1,...}

        * 0: all values are kept.
        * 1: Lines containing only zeroes are dropped.

        |br| * the default value is 1
    :type optValOutputLevel: int (0,1) or dict

    :param groupPrefix: if specified, multiple xarray datasets (with esM
        instance data) are saved to the same netcdf file. The dictionary
        structure is then {group_prefix}/{group}/{...} instead of {group}/{...}
        |br| * the default value is None
    :type group_prefix: string

    :return: Nested dictionary containing xr.Dataset with all result values
        for each component.
    :rtype: Dict[str, Dict[str, xr.Dataset]]
    """

    if overwriteExisting:
        if Path(outputFilePath).is_file():
            Path(outputFilePath).unlink()

    utils.output("\nWriting output to netCDF... ", esM.verbose, 0)
    _t = time.time()

    xr_dss_input = convertOptimizationInputToDatasets(esM)

    writeDatasetsToNetCDF(xr_dss_input, outputFilePath, groupPrefix=groupPrefix)

    if esM.objectiveValue != None:  # model was optimized
        xr_dss_output = convertOptimizationOutputToDatasets(
            esM, optSumOutputLevel, optValOutputLevel
        )
        writeDatasetsToNetCDF(xr_dss_output, outputFilePath, groupPrefix=groupPrefix)

    utils.output("Done. (%.4f" % (time.time() - _t) + " sec)", esM.verbose, 0)
Exemplo n.º 7
0
def convertOptimizationOutputToDatasets(esM, optSumOutputLevel=0, optValOutputLevel=1):
    """
    Takes esM instance output and converts it into an xarray dataset.

    :param esM: EnergySystemModel instance in which the optimized model is held
    :type esM: EnergySystemModel instance

    :param optSumOutputLevel: Output level of the optimization summary (see
        EnergySystemModel). Either an integer (0,1,2) which holds for all model
        classes or a dictionary with model class names as keys and an integer
        (0,1,2) for each key (e.g. {'StorageModel':1,'SourceSinkModel':1,...}
        |br| * the default value is 2
    :type optSumOutputLevel: int (0,1,2) or dict

    :param optValOutputLevel: Output level of the optimal values. Either an
        integer (0,1) which holds for all model classes or a dictionary with
        model class names as keys and an integer (0,1) for each key (e.g.
        {'StorageModel':1,'SourceSinkModel':1,...}

        - 0: all values are kept.
        - 1: Lines containing only zeroes are dropped.

        |br| * the default value is 1
    :type optValOutputLevel: int (0,1) or dict

    :return: xr_ds - EnergySystemModel instance output data in xarray dataset format
    :rtype: xarray.dataset
    """

    # Create the netCDF file and the xr.Dataset dict for all components
    xr_dss = dict.fromkeys(esM.componentModelingDict.keys())
    for model_dict in esM.componentModelingDict.keys():
        xr_dss[model_dict] = {
            key: xr.Dataset()
            for key in esM.componentModelingDict[model_dict].componentsDict.keys()
        }

    # Write output from esM.getOptimizationSummary to datasets
    for name in esM.componentModelingDict.keys():
        utils.output("\tProcessing " + name + " ...", esM.verbose, 0)
        oL = optSumOutputLevel
        oL_ = oL[name] if type(oL) == dict else oL
        optSum = esM.getOptimizationSummary(name, outputLevel=oL_)
        if esM.componentModelingDict[name].dimension == "1dim":
            for component in optSum.index.get_level_values(0).unique():

                variables = optSum.loc[component].index.get_level_values(0)
                units = optSum.loc[component].index.get_level_values(1)
                variables_unit = dict(zip(variables, units))

                for variable in (
                    optSum.loc[component].index.get_level_values(0).unique()
                ):
                    df = optSum.loc[(component, variable)]
                    df = df.iloc[-1]
                    df.name = variable
                    df.index.rename("space", inplace=True)
                    df = pd.to_numeric(df)
                    xr_da = df.to_xarray()

                    # add variable [e.g. 'TAC'] and units to attributes of xarray
                    unit = variables_unit[variable]
                    xr_da.attrs[variable] = unit

                    xr_dss[name][component] = xr.merge(
                        [xr_dss[name][component], xr_da], combine_attrs="drop_conflicts"
                    )
        elif esM.componentModelingDict[name].dimension == "2dim":
            for component in optSum.index.get_level_values(0).unique():

                variables = optSum.loc[component].index.get_level_values(0)
                units = optSum.loc[component].index.get_level_values(1)
                variables_unit = dict(zip(variables, units))

                for variable in (
                    optSum.loc[component].index.get_level_values(0).unique()
                ):
                    df = optSum.loc[(component, variable)]
                    if len(df.index.get_level_values(0).unique()) > 1:
                        idx = df.index.get_level_values(0).unique()[-1]
                        df = df.xs(idx, level=0)
                    else:
                        df.index = df.index.droplevel(0)
                    # df = df.iloc[-1]
                    df = df.stack()
                    # df.name = (name, component, variable)
                    df.name = variable
                    df.index.rename(["space", "space_2"], inplace=True)
                    df = pd.to_numeric(df)
                    xr_da = df.to_xarray()

                    # add variable [e.g. 'TAC'] and units to attributes of xarray
                    unit = variables_unit[variable]
                    xr_da.attrs[variable] = unit

                    xr_dss[name][component] = xr.merge(
                        [xr_dss[name][component], xr_da], combine_attrs="drop_conflicts"
                    )

        # Write output from esM.esM.componentModelingDict[name].getOptimalValues() to datasets
        data = esM.componentModelingDict[name].getOptimalValues()
        oL = optValOutputLevel
        oL_ = oL[name] if type(oL) == dict else oL
        dataTD1dim, indexTD1dim, dataTD2dim, indexTD2dim = [], [], [], []
        dataTI, indexTI = [], []
        for key, d in data.items():
            if d["values"] is None:
                continue
            if d["timeDependent"]:
                if d["dimension"] == "1dim":
                    dataTD1dim.append(d["values"]), indexTD1dim.append(key)
                elif d["dimension"] == "2dim":
                    dataTD2dim.append(d["values"]), indexTD2dim.append(key)
            else:
                dataTI.append(d["values"]), indexTI.append(key)
        # One dimensional time dependent data
        if dataTD1dim:
            names = ["Variable", "Component", "Location"]
            dfTD1dim = pd.concat(dataTD1dim, keys=indexTD1dim, names=names)
            # dfTD1dim = dfTD1dim.loc[
            #    ((dfTD1dim != 0) & (~dfTD1dim.isnull())).any(axis=1)
            # ]
            for variable in dfTD1dim.index.get_level_values(0).unique():
                # for component in dfTD1dim.index.get_level_values(1).unique():
                for component in (
                    dfTD1dim.loc[variable].index.get_level_values(0).unique()
                ):
                    df = dfTD1dim.loc[(variable, component)].T.stack()
                    # df.name = (name, component, variable)
                    df.name = variable
                    df.index.rename(["time", "space"], inplace=True)
                    xr_da = df.to_xarray()
                    xr_dss[name][component] = xr.merge([xr_dss[name][component], xr_da])
        # Two dimensional time dependent data
        if dataTD2dim:
            names = ["Variable", "Component", "LocationIn", "LocationOut"]
            dfTD2dim = pd.concat(dataTD2dim, keys=indexTD2dim, names=names)
            # dfTD2dim = dfTD2dim.loc[
            #    ((dfTD2dim != 0) & (~dfTD2dim.isnull())).any(axis=1)
            # ]
            for variable in dfTD2dim.index.get_level_values(0).unique():
                # for component in dfTD2dim.index.get_level_values(1).unique():
                for component in (
                    dfTD2dim.loc[variable].index.get_level_values(0).unique()
                ):
                    df = dfTD2dim.loc[(variable, component)].stack()
                    # df.name = (name, component, variable)
                    df.name = variable
                    df.index.rename(["space", "space_2", "time"], inplace=True)
                    df.index = df.index.reorder_levels([2, 0, 1])
                    xr_da = df.to_xarray()
                    xr_dss[name][component] = xr.merge([xr_dss[name][component], xr_da])
        # Time independent data
        if dataTI:
            # One dimensional
            if esM.componentModelingDict[name].dimension == "1dim":
                names = ["Variable type", "Component"]
                dfTI = pd.concat(dataTI, keys=indexTI, names=names)
                # dfTI = dfTI.loc[((dfTI != 0) & (~dfTI.isnull())).any(axis=1)]
                for variable in dfTI.index.get_level_values(0).unique():
                    # for component in dfTI.index.get_level_values(1).unique():
                    for component in (
                        dfTI.loc[variable].index.get_level_values(0).unique()
                    ):
                        df = dfTI.loc[(variable, component)].T
                        # df.name = (name, component, variable)
                        df.name = variable
                        df.index.rename("space", inplace=True)
                        xr_da = df.to_xarray()
                        xr_dss[name][component] = xr.merge(
                            [xr_dss[name][component], xr_da]
                        )
            # Two dimensional
            elif esM.componentModelingDict[name].dimension == "2dim":
                names = ["Variable type", "Component", "Location"]
                dfTI = pd.concat(dataTI, keys=indexTI, names=names)
                # dfTI = dfTI.loc[((dfTI != 0) & (~dfTI.isnull())).any(axis=1)]
                for variable in dfTI.index.get_level_values(0).unique():
                    # for component in dfTI.index.get_level_values(1).unique():
                    for component in (
                        dfTI.loc[variable].index.get_level_values(0).unique()
                    ):
                        df = dfTI.loc[(variable, component)].T.stack()
                        # df.name = (name, component, variable)
                        df.name = variable
                        df.index.rename(["space", "space_2"], inplace=True)
                        xr_da = df.to_xarray()
                        xr_dss[name][component] = xr.merge(
                            [xr_dss[name][component], xr_da]
                        )

    for name in esM.componentModelingDict.keys():
        for component in esM.componentModelingDict[name].componentsDict.keys():
            if list(xr_dss[name][component].data_vars) == []:
                # Delete components that have not been built.
                del xr_dss[name][component]
            else:
                # Cast space coordinats to str. If this is not done then dtype will be object.
                xr_dss[name][component].coords["space"] = (
                    xr_dss[name][component].coords["space"].astype(str)
                )
                if esM.componentModelingDict[name].dimension == "2dim":
                    xr_dss[name][component].coords["space_2"] = (
                        xr_dss[name][component].coords["space_2"].astype(str)
                    )

    xr_dss = {"Results": xr_dss}

    return xr_dss
Exemplo n.º 8
0
def writeOptimizationOutputToExcel(esM,
                                   outputFileName='scenarioOutput',
                                   optSumOutputLevel=2,
                                   optValOutputLevel=1):
    """
    Write optimization output to an Excel file.

    :param esM: EnergySystemModel instance in which the optimized model is hold
    :type esM: EnergySystemModel instance

    :param outputFileName: name of the Excel output file (without .xlsx ending)
        |br| * the default value is 'scenarioOutput'
    :type outputFileName: string

    :param optSumOutputLevel: output level of the optimization summary (see EnergySystemModel). Either an integer
        (0,1,2) which holds for all model classes or a dictionary with model class names as keys and an integer
        (0,1,2) for each key (e.g. {'StorageModel':1,'SourceSinkModel':1,...}
        |br| * the default value is 2
    :type optSumOutputLevel: int (0,1,2) or dict

    :param optValOutputLevel: output level of the optimal values. Either an integer (0,1) which holds for all
        model classes or a dictionary with model class names as keys and an integer (0,1) for each key
        (e.g. {'StorageModel':1,'SourceSinkModel':1,...}
        - 0: all values are kept.
        - 1: Lines containing only zeroes are dropped.
        |br| * the default value is 1
    :type optValOutputLevel: int (0,1) or dict
    """
    utils.output('\nWriting output to Excel... ', esM.verbose, 0)
    _t = time.time()
    writer = pd.ExcelWriter(outputFileName + '.xlsx')

    for name in esM.componentModelingDict.keys():
        utils.output('\tProcessing ' + name + ' ...', esM.verbose, 0)
        oL = optSumOutputLevel
        oL_ = oL[name] if type(oL) == dict else oL
        optSum = esM.getOptimizationSummary(name, outputLevel=oL_)
        if not optSum.empty:
            optSum.to_excel(
                writer, name[:-5] + 'OptSummary_' +
                esM.componentModelingDict[name].dimension)

        data = esM.componentModelingDict[name].getOptimalValues()
        oL = optValOutputLevel
        oL_ = oL[name] if type(oL) == dict else oL
        dataTD1dim, indexTD1dim, dataTD2dim, indexTD2dim = [], [], [], []
        dataTI, indexTI = [], []
        for key, d in data.items():
            if d['values'] is None:
                continue
            if d['timeDependent']:
                if d['dimension'] == '1dim':
                    dataTD1dim.append(d['values']), indexTD1dim.append(key)
                elif d['dimension'] == '2dim':
                    dataTD2dim.append(d['values']), indexTD2dim.append(key)
            else:
                dataTI.append(d['values']), indexTI.append(key)
        if dataTD1dim:
            names = ['Variable', 'Component', 'Location']
            dfTD1dim = pd.concat(dataTD1dim, keys=indexTD1dim, names=names)
            if oL_ == 1:
                dfTD1dim = dfTD1dim.loc[((dfTD1dim != 0) &
                                         (~dfTD1dim.isnull())).any(axis=1)]
            if not dfTD1dim.empty:
                dfTD1dim.to_excel(writer, name[:-5] + '_TDoptVar_1dim')
        if dataTD2dim:
            names = ['Variable', 'Component', 'LocationIn', 'LocationOut']
            dfTD2dim = pd.concat(dataTD2dim, keys=indexTD2dim, names=names)
            if oL_ == 1:
                dfTD2dim = dfTD2dim.loc[((dfTD2dim != 0) &
                                         (~dfTD2dim.isnull())).any(axis=1)]
            if not dfTD2dim.empty:
                dfTD2dim.to_excel(writer, name[:-5] + '_TDoptVar_2dim')
        if dataTI:
            if esM.componentModelingDict[name].dimension == '1dim':
                names = ['Variable type', 'Component']
            elif esM.componentModelingDict[name].dimension == '2dim':
                names = ['Variable type', 'Component', 'Location']
            dfTI = pd.concat(dataTI, keys=indexTI, names=names)
            if oL_ == 1:
                dfTI = dfTI.loc[((dfTI != 0) & (~dfTI.isnull())).any(axis=1)]
            if not dfTI.empty:
                dfTI.to_excel(
                    writer, name[:-5] + '_TIoptVar_' +
                    esM.componentModelingDict[name].dimension)

    periodsOrder = pd.DataFrame([esM.periodsOrder],
                                index=['periodsOrder'],
                                columns=esM.periods)
    periodsOrder.to_excel(writer, 'Misc')
    utils.output('\tSaving file...', esM.verbose, 0)
    writer.save()
    utils.output('Done. (%.4f' % (time.time() - _t) + ' sec)', esM.verbose, 0)
Exemplo n.º 9
0
    def optimize(self,
                 declaresOptimizationProblem=True,
                 timeSeriesAggregation=False,
                 logFileName='',
                 threads=3,
                 solver='gurobi',
                 timeLimit=None,
                 optimizationSpecs='',
                 warmstart=False):
        """
        Optimize the specified energy system for which a pyomo ConcreteModel instance is built or called upon.
        A pyomo instance is optimized with the specified inputs, and the optimization results are further
        processed.

        **Default arguments:**

        :param declaresOptimizationProblem: states if the optimization problem should be declared (True) or not (False).
            (a) If true, the declareOptimizationProblem function is called and a pyomo ConcreteModel instance is built.
            (b) If false a previously declared pyomo ConcreteModel instance is used.
            |br| * the default value is True
        :type declaresOptimizationProblem: boolean

        :param timeSeriesAggregation: states if the optimization of the energy system model should be done with
            (a) the full time series (False) or
            (b) clustered time series data (True).
            |br| * the default value is False
        :type timeSeriesAggregation: boolean

        :param logFileName: logFileName is used for naming the log file of the optimization solver output
            if gurobi is used as the optimization solver.
            If the logFileName is given as an absolute path (e.g. logFileName = os.path.join(os.getcwd(),
            'Results', 'logFileName.txt')) the log file will be stored in the specified directory. Otherwise,
            it will be stored by default in the directory where the executing python script is called.
            |br| * the default value is 'job'
        :type logFileName: string

        :param threads: number of computational threads used for solving the optimization (solver dependent
            input) if gurobi is used as the solver. A value of 0 results in using all available threads. If
            a value larger than the available number of threads are chosen, the value will reset to the maximum
            number of threads.
            |br| * the default value is 3
        :type threads: positive integer

        :param solver: specifies which solver should solve the optimization problem (which of course has to be
            installed on the machine on which the model is run).
            |br| * the default value is 'gurobi'
        :type solver: string

        :param timeLimit: if not specified as None, indicates the maximum solve time of the optimization problem
            in seconds (solver dependent input). The use of this parameter is suggested when running models in
            runtime restricted environments (such as clusters with job submission systems). If the runtime
            limitation is triggered before an optimal solution is available, the best solution obtained up
            until then (if available) is processed.
            |br| * the default value is None
        :type timeLimit: strictly positive integer or None

        :param optimizationSpecs: specifies parameters for the optimization solver (see the respective solver
            documentation for more information)
            |br| * the default value is 'LogToConsole=1 OptimalityTol=1e-6'
        :type timeLimit: string

        :param warmstart: specifies if a warm start of the optimization should be considered
            (not always supported by the solvers).
            |br| * the default value is False
        :type warmstart: boolean

        Last edited: August 10, 2018
        |br| @author: Lara Welder
        """
        if declaresOptimizationProblem:
            self.declareOptimizationProblem(
                timeSeriesAggregation=timeSeriesAggregation)
        else:
            if self.pyM is None:
                raise TypeError(
                    'The optimization problem is not declared yet. Set the argument declaresOptimization'
                    ' problem to True or call the declareOptimizationProblem function first.'
                )

        # Get starting time of the optimization to, later on, obtain the total run time of the optimize function call
        timeStart = time.time()

        # Check correctness of inputs
        utils.checkOptimizeInput(timeSeriesAggregation,
                                 self.isTimeSeriesDataClustered, logFileName,
                                 threads, solver, timeLimit, optimizationSpecs,
                                 warmstart)

        # Store keyword arguments in the EnergySystemModel instance
        self.solverSpecs['logFileName'], self.solverSpecs[
            'threads'] = logFileName, threads
        self.solverSpecs['solver'], self.solverSpecs[
            'timeLimit'] = solver, timeLimit
        self.solverSpecs['optimizationSpecs'], self.solverSpecs[
            'hasTSA'] = optimizationSpecs, timeSeriesAggregation

        ################################################################################################################
        #                                  Solve the specified optimization problem                                    #
        ################################################################################################################

        # Set which solver should solve the specified optimization problem
        optimizer = opt.SolverFactory(solver)

        # Set, if specified, the time limit
        if self.solverSpecs['timeLimit'] is not None and solver == 'gurobi':
            optimizer.options['timelimit'] = timeLimit

        # Set the specified solver options
        if 'LogToConsole=' not in optimizationSpecs:
            if self.verbose == 2:
                optimizationSpecs += ' LogToConsole=0'

        # Solve optimization problem. The optimization solve time is stored and the solver information is printed.
        if solver == 'gurobi':
            optimizer.set_options('Threads=' + str(threads) + ' logfile=' +
                                  logFileName + ' ' + optimizationSpecs)
            solver_info = optimizer.solve(self.pyM,
                                          warmstart=warmstart,
                                          tee=True)
        else:
            solver_info = optimizer.solve(self.pyM, tee=True)
        self.solverSpecs['solvetime'] = time.time() - timeStart
        utils.output(solver_info.solver(), self.verbose,
                     0), utils.output(solver_info.problem(), self.verbose, 0)
        utils.output(
            'Solve time: ' + str(self.solverSpecs['solvetime']) + ' sec.',
            self.verbose, 0)

        ################################################################################################################
        #                                      Post-process optimization output                                        #
        ################################################################################################################

        _t = time.time()

        # Post-process the optimization output by differentiating between different solver statuses and termination
        # conditions. First, check if the status and termination_condition of the optimization are acceptable.
        # If not, no output is generated.
        # TODO check if this is still compatible with the latest pyomo version
        status, termCondition = solver_info.solver.status, solver_info.solver.termination_condition
        if status == opt.SolverStatus.error or status == opt.SolverStatus.aborted or status == opt.SolverStatus.unknown:
            utils.output(
                'Solver status:  ' + str(status) +
                ', termination condition:  ' + str(termCondition) +
                '. No output is generated.', self.verbose, 0)
        elif solver_info.solver.termination_condition == opt.TerminationCondition.infeasibleOrUnbounded or \
            solver_info.solver.termination_condition == opt.TerminationCondition.infeasible or \
                solver_info.solver.termination_condition == opt.TerminationCondition.unbounded:
            utils.output(
                'Optimization problem is ' +
                str(solver_info.solver.termination_condition) +
                '. No output is generated.', self.verbose, 0)
        else:
            # If the solver status is not okay (hence either has a warning, an error, was aborted or has an unknown
            # status), show a warning message.
            if not solver_info.solver.termination_condition == opt.TerminationCondition.optimal and self.verbose < 2:
                warnings.warn(
                    'Output is generated for a non-optimal solution.')
            utils.output("\nProcessing optimization output...", self.verbose,
                         0)
            # Declare component specific sets, variables and constraints
            w = str(len(max(self.componentModelingDict.keys())) + 6)
            for key, mdl in self.componentModelingDict.items():
                __t = time.time()
                mdl.setOptimalValues(self, self.pyM)
                outputString = ('for {:' + w + '}').format(
                    key + ' ...') + "(%.4f" % (time.time() - __t) + "sec)"
                utils.output(outputString, self.verbose, 0)

        utils.output('\t\t(%.4f' % (time.time() - _t) + ' sec)\n',
                     self.verbose, 0)

        # Store the runtime of the optimize function call in the EnergySystemModel instance
        self.solverSpecs['runtime'] = self.solverSpecs[
            'buildtime'] + time.time() - timeStart
Exemplo n.º 10
0
    def declareOptimizationProblem(self, timeSeriesAggregation=False):
        """
        Declare the optimization problem belonging to the specified energy system for which a pyomo concrete model
        instance is built and filled with
        * basic time sets,
        * sets, variables and constraints contributed by the component modeling classes,
        * basic, component overreaching constraints, and
        * an objective function.

        **Default arguments:**

        :param timeSeriesAggregation: states if the optimization of the energy system model should be done with
            (a) the full time series (False) or
            (b) clustered time series data (True).
            |br| * the default value is False
        :type timeSeriesAggregation: boolean

        Last edited: November 10, 2018
        |br| @author: Lara Welder
        """
        # Get starting time of the optimization to, later on, obtain the total run time of the optimize function call
        timeStart = time.time()

        # Check correctness of inputs
        utils.checkDeclareOptimizationProblemInput(
            timeSeriesAggregation, self.isTimeSeriesDataClustered)

        ################################################################################################################
        #                           Initialize mathematical model (ConcreteModel) instance                             #
        ################################################################################################################

        # Initialize a pyomo ConcreteModel which will be used to store the mathematical formulation of the model.
        # The ConcreteModel instance is stored in the EnergySystemModel instance, which makes it available for
        # post-processing or debugging. A pyomo Suffix with the name dual is declared to make dual values associated
        # to the model's constraints available after optimization.
        self.pyM = pyomo.ConcreteModel()
        pyM = self.pyM
        pyM.dual = pyomo.Suffix(direction=pyomo.Suffix.IMPORT)

        # Set time sets for the model instance
        self.declareTimeSets(pyM, timeSeriesAggregation)

        ################################################################################################################
        #                         Declare component specific sets, variables and constraints                           #
        ################################################################################################################

        for key, mdl in self.componentModelingDict.items():
            _t = time.time()
            utils.output(
                'Declaring sets, variables and constraints for ' + key,
                self.verbose, 0)
            utils.output('\tdeclaring sets... ', self.verbose,
                         0), mdl.declareSets(self, pyM)
            utils.output('\tdeclaring variables... ', self.verbose,
                         0), mdl.declareVariables(self, pyM)
            utils.output('\tdeclaring constraints... ', self.verbose,
                         0), mdl.declareComponentConstraints(self, pyM)
            utils.output('\t\t(%.4f' % (time.time() - _t) + ' sec)\n',
                         self.verbose, 0)

        ################################################################################################################
        #                              Declare cross-componential sets and constraints                                 #
        ################################################################################################################

        # Declare constraints for enforcing shared capacities
        _t = time.time()
        self.declareSharedPotentialConstraints(pyM)
        utils.output('\t\t(%.4f' % (time.time() - _t) + ' sec)\n',
                     self.verbose, 0)

        # Declare commodity balance constraints (one balance constraint for each commodity, location and time step)
        _t = time.time()
        self.declareCommodityBalanceConstraints(pyM)
        utils.output('\t\t(%.4f' % (time.time() - _t) + ' sec)\n',
                     self.verbose, 0)

        ################################################################################################################
        #                                         Declare objective function                                           #
        ################################################################################################################

        # Declare objective function by obtaining the contributions to the objective function from all modeling classes
        _t = time.time()
        self.declareObjective(pyM)
        utils.output('\t\t(%.4f' % (time.time() - _t) + ' sec)\n',
                     self.verbose, 0)

        # Store the build time of the optimize function call in the EnergySystemModel instance
        self.solverSpecs['buildtime'] = time.time() - timeStart
Exemplo n.º 11
0
    def cluster(self,
                numberOfTypicalPeriods=7,
                numberOfTimeStepsPerPeriod=24,
                clusterMethod='hierarchical',
                sortValues=True,
                storeTSAinstance=False,
                **kwargs):
        """
        Cluster the time series data of all components considered in the EnergySystemModel instance and then
        stores the clustered data in the respective components. For this, the time series data is broken down
        into an ordered sequence of periods (e.g. 365 days) and to each period a typical period (e.g. 7 typical
        days with 24 hours) is assigned. For the clustering itself, the tsam package is used (cf.
        https://github.com/FZJ-IEK3-VSA/tsam). Additional keyword arguments for the TimeSeriesAggregation instance
        can be added (facilitated by kwargs). As an example: it might be useful to add extreme periods to the
        clustered typical periods.

        **Default arguments:**

        :param numberOfTypicalPeriods: states the number of typical periods into which the time series data
            should be clustered. The number of time steps per period must be an integer multiple of the total
            number of considered time steps in the energy system.
            Note: Please refer to the tsam package documentation of the parameter noTypicalPeriods for more
            information.
            |br| * the default value is 7
        :type numberOfTypicalPeriods: strictly positive integer

        :param numberOfTimeStepsPerPeriod: states the number of time steps per period
            |br| * the default value is 24
        :type numberOfTimeStepsPerPeriod: strictly positive integer

        :param clusterMethod: states the method which is used in the tsam package for clustering the time series
            data. Options are for example 'averaging','k_means','exact k_medoid' or 'hierarchical'.
            Note: Please refer to the tsam package documentation of the parameter clusterMethod for more information.
            |br| * the default value is 'hierarchical'
        :type clusterMethod: string

        :param sortValues: states if the algorithm in the tsam package should use
            (a) the sorted duration curves (-> True) or
            (b) the original profiles (-> False)
            of the time series data within a period for clustering.
            Note: Please refer to the tsam package documentation of the parameter sortValues for more information.
            |br| * the default value is True
        :type sortValues: boolean

        :param storeTSAinstance: states if the TimeSeriesAggregation instance created during clustering should be
            stored in the EnergySystemModel instance.
            |br| * the default value is False
        :type storeTSAinstance: boolean

        Last edited: August 10, 2018
        |br| @author: Lara Welder
        """

        # Check input arguments which have to fit the temporal representation of the energy system
        utils.checkClusteringInput(numberOfTypicalPeriods,
                                   numberOfTimeStepsPerPeriod,
                                   len(self.totalTimeSteps))

        timeStart = time.time()
        utils.output(
            '\nClustering time series data with ' +
            str(numberOfTypicalPeriods) + ' typical periods and ' +
            str(numberOfTimeStepsPerPeriod) + ' time steps per period...',
            self.verbose, 0)

        # Format data to fit the input requirements of the tsam package:
        # (a) append the time series data from all components stored in all initialized modeling classes to a pandas
        #     DataFrame with unique column names
        # (b) thereby collect the weights which should be considered for each time series as well in a dictionary
        timeSeriesData, weightDict = [], {}
        for mdlName, mdl in self.componentModelingDict.items():
            for compName, comp in mdl.componentsDict.items():
                compTimeSeriesData, compWeightDict = comp.getDataForTimeSeriesAggregation(
                )
                if compTimeSeriesData is not None:
                    timeSeriesData.append(
                        compTimeSeriesData), weightDict.update(compWeightDict)
        timeSeriesData = pd.concat(timeSeriesData, axis=1)
        # Note: Sets index for the time series data. The index is of no further relevance in the energy system model.
        timeSeriesData.index = pd.date_range('2050-01-01 00:30:00',
                                             periods=len(self.totalTimeSteps),
                                             freq=(str(self.hoursPerTimeStep) +
                                                   'H'),
                                             tz='Europe/Berlin')

        # Cluster data with tsam package (the reindex_axis call is here for reproducibility of TimeSeriesAggregation
        # call)
        timeSeriesData = timeSeriesData.reindex_axis(sorted(
            timeSeriesData.columns),
                                                     axis=1)
        clusterClass = TimeSeriesAggregation(
            timeSeries=timeSeriesData,
            noTypicalPeriods=numberOfTypicalPeriods,
            hoursPerPeriod=numberOfTimeStepsPerPeriod * self.hoursPerTimeStep,
            clusterMethod=clusterMethod,
            sortValues=sortValues,
            weightDict=weightDict,
            **kwargs)

        # Convert the clustered data to a pandas DataFrame and store the respective clustered time series data in the
        # associated components
        data = pd.DataFrame.from_dict(clusterClass.clusterPeriodDict)
        for mdlName, mdl in self.componentModelingDict.items():
            for compName, comp in mdl.componentsDict.items():
                comp.setAggregatedTimeSeriesData(data)

        # Store time series aggregation parameters in class instance
        if storeTSAinstance:
            self.tsaInstance = clusterClass
        self.typicalPeriods = clusterClass.clusterPeriodIdx
        self.timeStepsPerPeriod = list(range(numberOfTimeStepsPerPeriod))
        self.periods = list(
            range(int(len(self.totalTimeSteps) /
                      len(self.timeStepsPerPeriod))))
        self.interPeriodTimeSteps = list(
            range(
                int(len(self.totalTimeSteps) / len(self.timeStepsPerPeriod)) +
                1))
        self.periodsOrder = clusterClass.clusterOrder
        self.periodOccurrences = [
            (self.periodsOrder == tp).sum() / self.numberOfYears
            for tp in self.typicalPeriods
        ]

        # Set cluster flag to true (used to ensure consistently clustered time series data)
        self.isTimeSeriesDataClustered = True
        utils.output("\t\t(%.4f" % (time.time() - timeStart) + " sec)\n",
                     self.verbose, 0)
Exemplo n.º 12
0
def writeOptimizationOutputToExcel(
    esM, outputFileName="scenarioOutput", optSumOutputLevel=2, optValOutputLevel=1
):
    """
    Write optimization output to an Excel file.

    :param esM: EnergySystemModel instance in which the optimized model is hold
    :type esM: EnergySystemModel instance

    :param outputFileName: name of the Excel output file (without .xlsx ending)
        |br| * the default value is 'scenarioOutput'
    :type outputFileName: string

    :param optSumOutputLevel: output level of the optimization summary (see EnergySystemModel). Either an integer
        (0,1,2) which holds for all model classes or a dictionary with model class names as keys and an integer
        (0,1,2) for each key (e.g. {'StorageModel':1,'SourceSinkModel':1,...}
        |br| * the default value is 2
    :type optSumOutputLevel: int (0,1,2) or dict

    :param optValOutputLevel: output level of the optimal values. Either an integer (0,1) which holds for all
        model classes or a dictionary with model class names as keys and an integer (0,1) for each key
        (e.g. {'StorageModel':1,'SourceSinkModel':1,...}

        * 0: all values are kept.
        * 1: Lines containing only zeroes are dropped.

        |br| * the default value is 1
    :type optValOutputLevel: int (0,1) or dict
    """
    utils.output("\nWriting output to Excel... ", esM.verbose, 0)
    _t = time.time()
    writer = pd.ExcelWriter(outputFileName + ".xlsx")

    for name in esM.componentModelingDict.keys():
        utils.output("\tProcessing " + name + " ...", esM.verbose, 0)
        oL = optSumOutputLevel
        oL_ = oL[name] if type(oL) == dict else oL
        optSum = esM.getOptimizationSummary(name, outputLevel=oL_)
        if not optSum.empty:
            optSum.to_excel(
                writer,
                name[:-5] + "OptSummary_" + esM.componentModelingDict[name].dimension,
            )

        data = esM.componentModelingDict[name].getOptimalValues()
        oL = optValOutputLevel
        oL_ = oL[name] if type(oL) == dict else oL
        dataTD1dim, indexTD1dim, dataTD2dim, indexTD2dim = [], [], [], []
        dataTI, indexTI = [], []
        for key, d in data.items():
            if d["values"] is None:
                continue
            if d["timeDependent"]:
                if d["dimension"] == "1dim":
                    dataTD1dim.append(d["values"]), indexTD1dim.append(key)
                elif d["dimension"] == "2dim":
                    dataTD2dim.append(d["values"]), indexTD2dim.append(key)
            else:
                dataTI.append(d["values"]), indexTI.append(key)
        if dataTD1dim:
            names = ["Variable", "Component", "Location"]
            dfTD1dim = pd.concat(dataTD1dim, keys=indexTD1dim, names=names)
            if oL_ == 1:
                dfTD1dim = dfTD1dim.loc[
                    ((dfTD1dim != 0) & (~dfTD1dim.isnull())).any(axis=1)
                ]
            if not dfTD1dim.empty:
                dfTD1dim.to_excel(writer, name[:-5] + "_TDoptVar_1dim")
        if dataTD2dim:
            names = ["Variable", "Component", "LocationIn", "LocationOut"]
            dfTD2dim = pd.concat(dataTD2dim, keys=indexTD2dim, names=names)
            if oL_ == 1:
                dfTD2dim = dfTD2dim.loc[
                    ((dfTD2dim != 0) & (~dfTD2dim.isnull())).any(axis=1)
                ]
            if not dfTD2dim.empty:
                dfTD2dim.to_excel(writer, name[:-5] + "_TDoptVar_2dim")
        if dataTI:
            if esM.componentModelingDict[name].dimension == "1dim":
                names = ["Variable type", "Component"]
            elif esM.componentModelingDict[name].dimension == "2dim":
                names = ["Variable type", "Component", "Location"]
            dfTI = pd.concat(dataTI, keys=indexTI, names=names)
            if oL_ == 1:
                dfTI = dfTI.loc[((dfTI != 0) & (~dfTI.isnull())).any(axis=1)]
            if not dfTI.empty:
                dfTI.to_excel(
                    writer,
                    name[:-5]
                    + "_TIoptVar_"
                    + esM.componentModelingDict[name].dimension,
                )

    periodsOrder = pd.DataFrame(
        [esM.periodsOrder], index=["periodsOrder"], columns=esM.periods
    )
    periodsOrder.to_excel(writer, "Misc")
    if esM.segmentation:
        ls = []
        for i in esM.periodsOrder.tolist():
            ls.append(esM.timeStepsPerSegment[i])
        segmentDuration = pd.concat(ls, axis=1).rename(
            columns={"Segment Duration": "timeStepsPerSegment"}
        )
        segmentDuration.index.name = "segmentNumber"
        segmentDuration.to_excel(writer, "Misc", startrow=3)
    utils.output("\tSaving file...", esM.verbose, 0)
    writer.save()
    utils.output("Done. (%.4f" % (time.time() - _t) + " sec)", esM.verbose, 0)