Ejemplo n.º 1
0
    def setup_optim_problem(self,
                            prices: dict,
                            timegrid: Timegrid = None,
                            costs_only: bool = False) -> OptimProblem:
        """ Set up optimization problem for asset

        Args:
            prices (dict): Dictionary of price arrays needed by assets in portfolio
            timegrid (Timegrid, optional): Discretization grid for asset. Defaults to None, 
                                           in which case it must have been set previously
            costs_only (bool): Only create costs vector (speed up e.g. for sampling prices). Defaults to False 
        Returns:
            OptimProblem: Optimization problem to be used by optimizer
        """
        # set timegrid if given as optional argument
        if not timegrid is None:
            self.set_timegrid(timegrid)
        # check: timegrid set?
        if not hasattr(self, 'timegrid'):
            raise ValueError(
                'Set timegrid of asset before creating optim problem. Asset: '
                + self.name)

        if self.costs_time_series is None:
            costs_time_series = np.zeros(self.timegrid.T)
        else:
            if not (self.costs_time_series in prices):
                raise ValueError(
                    'Costs not found in given price time series. Asset: ' +
                    self.name)
            costs_time_series = prices[self.costs_time_series].copy()
            if not (len(costs_time_series) == self.timegrid.T
                    ):  # vector must have right size for discretization
                raise ValueError(
                    'Length of costs array must be equal to length of time grid. Asset: '
                    + self.name)

        ##### using restricted timegrid for asset lifetime (save resources)
        I = self.timegrid.restricted.I  # indices of restricted time grid
        T = self.timegrid.restricted.T  # length of restr. grid
        discount_factors = self.timegrid.restricted.discount_factors  # disc fctrs of restr. grid
        if not len(costs_time_series
                   ) == 1:  # if not  scalar, restrict to time window
            costs_time_series = costs_time_series[
                I]  # prices only in asset time window

        # Make vector of single min/max capacities.
        if isinstance(self.max_cap, (float, int)):
            max_cap = self.max_cap * np.ones(T)
        else:  # given in form of dict (start/end/values)
            max_cap = timegrid.restricted.values_to_grid(self.max_cap)
        if isinstance(self.min_cap, (float, int)):
            min_cap = self.min_cap * np.ones(T)
        else:  # given in form of dict (start/end/values)
            min_cap = timegrid.restricted.values_to_grid(self.min_cap)
        # need to scale to discretization step since: flow * dT = volume in time step
        min_cap = min_cap * self.timegrid.restricted.dt
        max_cap = max_cap * self.timegrid.restricted.dt

        mapping = pd.DataFrame()  ## mapping of variables for use in portfolio

        if (all(max_cap <= 0.)) or (all(min_cap >= 0.)):
            # in this case no need one variable per time step and node needed
            # upper / lower bound for dispatch Node1 / Node2
            l = np.hstack((-max_cap, min_cap))
            u = np.hstack((-min_cap, max_cap))
            # costs always act on abs(dispatch)
            if (all(max_cap <= 0.)):  # dispatch always negative
                costs = -costs_time_series - self.costs_const
            if (all(min_cap >= 0.)):  # dispatch always positive
                costs = costs_time_series + self.costs_const
            c = costs * discount_factors  # set costs and discount
            c = np.hstack(
                (np.zeros(T), c)
            )  # linking two nodes, assigning costs only to receiving node
            if costs_only:
                return c
            mapping['time_step'] = np.hstack((I, I))
            # first set belongs to node 1, second to node 2
            mapping['node'] = np.vstack(
                (np.tile(self.nodes[0].name,
                         (T, 1)), np.tile(self.nodes[1].name, (T, 1))))
            # restriction: in and efficiency*out must add to zero
            A = sp.hstack((sp.identity(T), self.efficiency * sp.identity(T)))
            b = np.zeros(T)
            cType = 'S' * T  # equal type restriction
        else:
            raise NotImplementedError(
                'For transport all capacities mus be positive or all negative for clarity purpose. Please use two transport assets'
            )

        ## other information (only here as this way we have the right length)
        mapping['asset'] = self.name
        mapping[
            'type'] = 'd'  # only dispatch variables (needed to impose nodal restrictions in portfolio)

        return OptimProblem(c=c,
                            l=l,
                            u=u,
                            A=A,
                            b=b,
                            cType=cType,
                            mapping=mapping)
Ejemplo n.º 2
0
    def setup_optim_problem(self,
                            prices: dict,
                            timegrid: Timegrid = None,
                            costs_only: bool = False) -> OptimProblem:
        """ Set up optimization problem for asset

        Args:
            prices (dict): Dictionary of price arrays needed by assets in portfolio
            timegrid (Timegrid, optional): Discretization grid for asset. Defaults to None, 
                                           in which case it must have been set previously
            costs_only (bool): Only create costs vector (speed up e.g. for sampling prices). Defaults to False                                           

        Returns:
            OptimProblem: Optimization problem to be used by optimizer
        """
        # set timegrid if given as optional argument
        if not timegrid is None:
            self.set_timegrid(timegrid)
        # check: timegrid set?
        if not hasattr(self, 'timegrid'):
            raise ValueError(
                'Set timegrid of asset before creating optim problem. Asset: '
                + self.name)

        if not self.price is None:
            assert (self.price in prices)
            price = prices[self.price].copy()
        else:
            price = np.zeros(timegrid.T)

        if not (len(price) == self.timegrid.T
                ):  # price vector must have right size for discretization
            raise ValueError(
                'Length of price array must be equal to length of time grid. Asset: '
                + self.name)

        ##### using restricted timegrid for asset lifetime (save resources)
        I = self.timegrid.restricted.I  # indices of restricted time grid
        T = self.timegrid.restricted.T  # length of restr. grid
        discount_factors = self.timegrid.restricted.discount_factors  # disc fctrs of restr. grid
        price = price[I]  # prices only in asset time window

        ##### important distinction:
        ## if extra costs are given, we need dispatch IN and OUT
        ## if it's zero, one variable is enough

        # Make vector of single min/max capacities.
        if isinstance(self.max_cap, (float, int, np.ndarray)):
            max_cap = self.max_cap * np.ones(T)
        else:  # given in form of dict (start/end/values)
            max_cap = timegrid.restricted.values_to_grid(self.max_cap)
        if isinstance(self.min_cap, (float, int, np.ndarray)):
            min_cap = self.min_cap * np.ones(T)
        else:  # given in form of dict (start/end/values)
            min_cap = timegrid.restricted.values_to_grid(self.min_cap)
        # check integrity
        if any(min_cap > max_cap):
            raise ValueError(
                'Asset --' + self.name +
                '--: Contract with min_cap > max_cap leads to ill-posed optimization problem'
            )
        # need to scale to discretization step since: flow * dT = volume in time step
        min_cap = min_cap * self.timegrid.restricted.dt
        max_cap = max_cap * self.timegrid.restricted.dt

        mapping = pd.DataFrame()  ## mapping of variables for use in portfolio
        if (self.extra_costs
                == 0) or (all(max_cap <= 0.)) or (all(min_cap >= 0.)):
            # in this case no need for two variables per time step
            u = max_cap  # upper bound
            l = min_cap  # lower
            if self.extra_costs != 0:
                if (all(max_cap <= 0.)):  # dispatch always negative
                    price = price - self.extra_costs
                if (all(min_cap >= 0.)):  # dispatch always negative
                    price = price + self.extra_costs
            c = price * discount_factors  # set price and discount
            mapping['time_step'] = I
        else:
            u = np.hstack((np.minimum(0., max_cap), np.maximum(0., max_cap)))
            l = np.hstack((np.minimum(0., min_cap), np.maximum(0., min_cap)))
            # set price  for in/out dispatch
            # in full contract there may be different prices for in/out
            c = np.tile(price, (2, 1))
            # add extra costs to in/out dispatch
            ec = np.vstack((-np.ones(T) * self.extra_costs,
                            np.ones(T) * self.extra_costs))
            c = c + ec
            # discount the cost vectors:
            c = c * (np.tile(discount_factors, (2, 1)))
            c = c.flatten('C')
            # mapping to be able to extract information later on
            # infos:             'asset', 'node', 'type'
            mapping['time_step'] = np.hstack((I, I))
        ## other information (only here as this way we have the right length)
        mapping['asset'] = self.name
        mapping['node'] = self.nodes[0].name
        mapping[
            'type'] = 'd'  # only dispatch variables (needed to impose nodal restrictions in portfolio)
        if costs_only:
            return c
        return OptimProblem(c=c, l=l, u=u, mapping=mapping)
Ejemplo n.º 3
0
    def setup_optim_problem(self,
                            prices: dict,
                            timegrid: Timegrid = None,
                            costs_only: bool = False) -> OptimProblem:
        """ Set up optimization problem for asset

        Args:
            prices (dict): Dictionary of price arrays needed by assets in portfolio
            timegrid (Timegrid, optional): Discretization grid for asset. Defaults to None, 
                                           in which case it must have been set previously
            costs_only (bool): Only create costs vector (speed up e.g. for sampling prices). Defaults to False

        Returns:
            OptimProblem: Optimization problem to be used by optimizer
        """
        # set timegrid if given as optional argument
        if not timegrid is None:
            self.set_timegrid(timegrid)
        # check: timegrid set?
        if not hasattr(self, 'timegrid'):
            raise ValueError(
                'Set timegrid of asset before creating optim problem. Asset: '
                + self.name)

        dt = self.timegrid.restricted.dt
        n = self.timegrid.restricted.T  # moved to Timegrid

        ct = self.cap_out * dt  #  Adjust capacity (unit is in vol/h)
        cp = self.cap_in * dt  #  Adjust capacity (unit is in vol/h)
        inflow = np.cumsum(self.inflow * dt)
        discount = self.timegrid.restricted.discount_factors

        if self.price is not None:
            assert (self.price in prices)
            price = prices[self.price].copy()
            if not (len(price) == self.timegrid.T
                    ):  # price vector must have right size for discretization
                raise ValueError(
                    'Length of price array must be equal to length of time grid. Asset: '
                    + self.name)
        # separation in/out needed?  Only one or two dispatch variable per time step
        sep_needed = (self.eff_in != 1) or (self.cost_in !=
                                            0) or (self.cost_out != 0)

        if sep_needed:
            u = np.hstack((np.zeros(n, float), ct))
            l = np.hstack((-cp, np.zeros(n, float)))
            c = np.ones((2, n), float)
            c[0, :] = -c[0, :] * self.cost_in
            c[1, :] = c[1, :] * self.cost_out
            c = c * (np.tile(discount, (2, 1)))
        else:
            u = ct
            l = -cp
            c = np.zeros(n)
        if self.price is not None:
            c -= np.asarray(price[self.timegrid.restricted.I]) * discount
        c = c.flatten('C')  # make all one columns
        # switch to return costs only
        if costs_only:
            return c
        # Storage restriction --  cumulative sums must fit into reservoir
        if self.block_size == 1 or self.block_size is None:
            A = -sp.tril(np.ones((n, n), float))
            # Maximum: max volume not exceeded
            b = (self.size - self.start_level) * np.ones(n) - inflow
            b[-1] = self.end_level - self.start_level - inflow[-1]
            # Minimum: empty
            b_min = -self.start_level * np.ones(n, float) - inflow
            b_min[-1] = self.end_level - self.start_level - inflow[-1]
        else:
            A = sp.lil_matrix((n, n))
            b = np.empty(n)
            b.fill(np.nan)
            b_min = np.empty(n)
            b_min.fill(np.nan)
            aa = np.arange(0, n, self.block_size)
            if aa[-1] != n:
                aa = np.append(aa, n)
            for i, a in enumerate(aa[0:-1]):  # go through the blocks
                diff = aa[i + 1] - a
                A[a:a + diff,
                  a:a + diff] = -np.tril(np.ones((diff, diff), float))
                # Maximum: max volume not exceeded
                parts_b = (self.size - self.start_level
                           ) * np.ones(diff) - inflow[a:a + diff]
                parts_b[-1] = self.end_level - self.start_level - inflow[-1]
                b[a:a + diff] = parts_b
                # Minimum: empty
                parts_b_min = -self.start_level * np.ones(diff) - inflow[a:a +
                                                                         diff]
                parts_b_min[
                    -1] = self.end_level - self.start_level - inflow[-1]
                b_min[a:a + diff] = parts_b_min
        if sep_needed:
            A = sp.hstack((A * self.eff_in, A))  # for in and out
        # join restrictions for in, out, full, empty
        b = np.hstack((b, b_min))
        A = sp.vstack((A, A))
        cType = 'U' * n + 'L' * n
        mapping = pd.DataFrame()
        if sep_needed:
            mapping['time_step'] = np.hstack(
                (self.timegrid.restricted.I, self.timegrid.restricted.I))
        else:
            mapping['time_step'] = self.timegrid.restricted.I
        mapping['node'] = self.nodes[0].name
        mapping['asset'] = self.name
        mapping['type'] = 'd'
        return OptimProblem(c=c,
                            l=l,
                            u=u,
                            A=A,
                            b=b,
                            cType=cType,
                            mapping=mapping)
Ejemplo n.º 4
0
    def setup_optim_problem(self,
                            prices: dict = None,
                            timegrid: Timegrid = None,
                            costs_only: bool = False,
                            skip_nodes: list = [],
                            fix_time_window: Dict = None) -> OptimProblem:
        """ Set up optimization problem for portfolio

        Args:
            prices (dict): Dictionary of price arrays needed by assets in portfolio. Defaults to None
            timegrid (Timegrid, optional): Discretization grid for portfolio and all assets within. 
                                           Defaults to None, in which case it must have been set previously
            costs_only (bool): Only create costs vector (speed up e.g. for sampling prices). Defaults to False  
            skip_nodes (List): Nodes to be skipped in nodal restrictions (defaults to [])

            fix_time_window (Dict): Fix results for given indices on time grid to given values. Defaults to None
                           fix_time_window['I']: Indices on timegrid or alternatively date (all dates before date taken)
                           fix_time_window['x']: Results.x that results are to be fixed to in time window(full array, all times)

        Returns:
            OptimProblem: Optimization problem to be used by optimizer
        """
        ################################################## checks and preparations
        # set timegrid if given as optional argument
        if not timegrid is None:
            self.set_timegrid(timegrid)
        # check: timegrid set?
        if not hasattr(self, 'timegrid'):
            raise ValueError(
                'Set timegrid of portfolio before creating optim problem.')
        ################################################## set up optim problems for assets
        ## bounds
        l = np.array([])
        u = np.array([])
        c = np.array([])
        opt_probs = {}  # dictionary to collect optim. problem for each asset
        mapping = pd.DataFrame()  # variable mappings collected from assets
        for a in self.assets:
            opt_probs[a.name] = a.setup_optim_problem(prices=prices,
                                                      timegrid=self.timegrid,
                                                      costs_only=costs_only)
            if not costs_only:
                mapping = pd.concat([mapping, opt_probs[a.name].mapping])
                # add together bounds and costs
                l = np.concatenate((l, opt_probs[a.name].l), axis=None)
                u = np.concatenate((u, opt_probs[a.name].u), axis=None)
                c = np.concatenate((c, opt_probs[a.name].c), axis=None)
            else:
                c = np.concatenate((c, opt_probs[a.name]), axis=None)
        if costs_only:
            return c
        n_vars = len(l)  # total number of variables
        n_nodes = len(self.nodes)  # number of nodes
        T = self.timegrid.T  # number of time steps
        # new index refers to portfolio
        mapping.reset_index(inplace=True)
        mapping.rename(columns={'index': 'index_assets'}, inplace=True)

        ################################################## put together asset restrictions
        A = sp.lil_matrix((0, n_vars))  # sparse format to incrementally change
        b = np.zeros(0)
        cType = ''
        for a in self.assets:
            if not opt_probs[a.name].A is None:
                n, m = opt_probs[a.name].A.shape
                ind = mapping.index[mapping['asset'] == a.name]
                myA = sp.lil_matrix((n, n_vars))
                myA[:, ind] = opt_probs[a.name].A
                opt_probs[a.name].A = None  # free storage
                A = sp.vstack((A, myA))
                b = np.hstack((b, opt_probs[a.name].b))
                cType = cType + opt_probs[a.name].cType
        ################################################## create nodal restriction (flows add to zero)
        # record mapping for nodal restrictions to be able to assign e.g. duals to nodes and time steps
        n_restr = len(b)  # so many restr so far
        mapping['nodal_restr'] = None
        # mapping['index_restr'] = None # index of the nodal restriction in the op (rows of A & b)  # Note: Not needed and probably not well defined
        n_nodal_restr = 0  # counter
        for i_node, n in enumerate(self.nodes):
            if not n in skip_nodes:
                for t in self.timegrid.I:
                    # identify variables belonging to this node n and time step t
                    I = (mapping['type']=='d') & \
                        (mapping['node']==n)   & \
                        (mapping['time_step'] == t).values
                    if any(I):  # only then restriction needed
                        myA = sp.lil_matrix((1, n_vars))  # one row only
                        myA[0, I] = 1  # all filtered variables contribute
                        mapping.loc[I, 'nodal_restr'] = n_nodal_restr
                        # mapping.loc[I, 'index_restr'] = n_nodal_restr+n_restr # Note: Not needed and probably not well defined
                        n_nodal_restr += 1
                        A = sp.vstack((A, myA))
        b = np.hstack((b, np.zeros(n_nodal_restr)))  # must add to zero
        cType = cType + ('N') * n_nodal_restr

        # in case a certain time window is to be fixed, set l and u to given value
        # potentially expensive, as variables remain variable. however, assuming
        # this is fixed in optimization
        if not fix_time_window is None:
            assert 'I' in fix_time_window.keys(
            ), 'fix_time_window must contain key "I"'
            assert 'x' in fix_time_window.keys(
            ), 'fix_time_window must contain key "x"'
            if isinstance(fix_time_window['I'], (dt.date, dt.datetime)):
                fix_time_window['I'] = (timegrid.timepoints <= pd.Timestamp(
                    fix_time_window['I']))
            assert (isinstance(
                fix_time_window['I'],
                (np.ndarray,
                 list))), 'fix_time_window["I"] must be date or array'
            # get index of variables for those time points
            I = mapping['time_step'].isin(timegrid.I[fix_time_window['I']])
            l[I] = fix_time_window['x'][I]
            u[I] = fix_time_window['x'][I]
        return OptimProblem(c=c,
                            l=l,
                            u=u,
                            A=A,
                            b=b,
                            cType=cType,
                            mapping=mapping)
Ejemplo n.º 5
0
def make_slp(optim_problem: OptimProblem, portf: Portfolio, timegrid: Timegrid,
             start_future: dt.datetime, samples: List[Dict]) -> OptimProblem:
    """ Create a two stage SLP (stochastic linear program) from a given OptimProblem

    Args:
        optim_problem (OptimProblem)   : start problem
        portf (Portfolio)              : portfolio that is the basis of the optim_problem (e.g. to translate price samples to effect on LP)
        timegrid      (TimeGrid)       : timegrid consistent with optimproblem
        start_future  (dt.datetime)    : divides timegrid into present with certain prices 
                                            and future with uncertain prices, represented by samples
        samples (List[Dict])           : price samples for future. 
                                         (!) the future part of the original portfolio is added as an additional sample

    Returns:
        OptimProblem: Two stage SLP formulated as OptimProblem
    """
    assert pd.Timestamp(start_future) < pd.Timestamp(
        timegrid.end), 'Start of future must be before end for SLP'
    # (1) identify present and future on timegrid
    # future
    timegrid.set_restricted_grid(start=start_future)
    future_tg = deepcopy(timegrid.restricted)
    # present
    timegrid.set_restricted_grid(end=start_future)
    present_tg = deepcopy(timegrid.restricted)

    #### abbreviations
    # time grid
    T = timegrid.T
    Tf = future_tg.T
    Tp = present_tg.T
    #ind_f = future_tg.I[0]  # index of start_future in time grid
    # number of samples
    nS = len(samples)
    # optim problem
    n, m = optim_problem.A.shape

    # The SLP two stage model is the following:
    # \begin{eqnarray}
    #    \mbox{min}\left[ \bc^{dT} \bx^d + \frac{1}{S} \sum_s \hat \bc^{dsT} \bx^{ds}  \right] \\
    #    \mbox{with}\; A^s \colvec{\bx^d}{\hat\bx^{ds}}  \le \colvec{\bb^d}{\hat\bb^{ds}} \;\;\forall s = 1\dots S
    # \end{eqnarray}

    # (2) map variables to present & future --- and extend future variables by number of samples
    # the mapping information enables us to map variables to present and future and extend the problem
    # for future values, the dispatch information becomes somewhat irrelevant, but will
    # have an effect on decisions for the present
    slp_col = 'slp_step_' + str(future_tg.I[0])
    optim_problem.mapping[slp_col] = np.nan
    ### mapping may contain duplicate rows for variables. Drop those
    temp_df = optim_problem.mapping[~optim_problem.mapping.index.duplicated(
        keep='first')]
    If = temp_df['time_step'].isin(
        future_tg.I)  # does variable belong to future?
    del temp_df
    # future part of original cost vector gets number -1
    optim_problem.mapping.loc[If, slp_col] = -1
    map_f = optim_problem.mapping.loc[If, :].copy()
    n_f = len(
        map_f.index.unique()
    )  #### now allowing for duplicate rows ... before len(map_f) # number of future variables
    #n_p      = m-n_f       # number of present variables
    # concatenate for each sample
    for i in range(0, nS):
        map_f[slp_col] = i
        optim_problem.mapping = pd.concat((optim_problem.mapping, map_f))
    optim_problem.mapping.reset_index(drop=True, inplace=True)
    ### aendern, falls auch nur samples für Zukunft gewuenscht ... so gehts nicht
    # # (3) translate price samples for future to cost samples (c vectors in LP)
    #     # The portfolio can only build the full LP (present & future). Thus we need to
    #     # append future samples with the (irrelevant) present prices, build the c's
    #     # and then ignore the present part.
    #     # In case the length of the samples is already the full timegrid, step is ignored
    # for i, mys in enumerate(samples):
    #     for myk in mys:
    #         if len(mys[myk]) == T:
    #             pass
    #         elif len(mys[myk]) == Tf:
    #             samples[i][myk] = np.hstack((optim_problem.c[:ind_f], mys[myk]))
    #         else:
    #             raise ValueError('All price samples for future must have length of full OR future part of timegrid')
    c_samples = portf.create_cost_samples(price_samples=samples,
                                          timegrid=timegrid)

    # (4) extend LP (A, b, l, u, c, cType)
    #### Reminder: The original cost vector is interpreted as another sample.

    ## extend vectors with nS times the future (the easy part)
    optim_problem.l = np.hstack(
        (optim_problem.l, np.tile(optim_problem.l[If], nS)))
    optim_problem.u = np.hstack(
        (optim_problem.u, np.tile(optim_problem.u[If], nS)))
    ## Attention with the cost vector. In order to obtain the MEAN across samples, divide by (nS+1) [new samples plus original]
    optim_problem.c[If] = optim_problem.c[If] / (nS + 1)  # orig. future sample
    for myc in c_samples:  # add each future cost sample (and scale down to get mean)
        optim_problem.c = np.hstack((optim_problem.c, myc[If] / (nS + 1)))

    ## different logic - restrictions simply multiply in number
    optim_problem.b = np.tile(optim_problem.b, nS + 1)
    optim_problem.cType = optim_problem.cType * (nS + 1)

    ## extending A & b (the trickier part)
    optim_problem.A = sp.lil_matrix(
        optim_problem.A)  # convert to subscriptable format
    # Note: Check and ideally avoid any such conversion (agree on one format)
    # futures only matric
    Af = optim_problem.A[:, If]
    # "present only" matrix -- set future elements to zero to decouply
    Ap = optim_problem.A.copy()
    Ap[:, If] = 0.
    # start extending the matrix
    optim_problem.A = sp.hstack((optim_problem.A, sp.lil_matrix(
        (n, nS * n_f))))
    #### add  rows, that encode the same restriction as the orig. A for the orig. set - only with new set of future vars
    for i in range(0, nS):
        myA = sp.hstack((Ap, sp.lil_matrix(
            (n, (i) * n_f)), Af, sp.lil_matrix((n, (nS - i - 1) * n_f))))
        optim_problem.A = sp.vstack((optim_problem.A, myA))

    return optim_problem
Ejemplo n.º 6
0
    def setup_optim_problem(self,
                            prices: dict = None,
                            timegrid: Timegrid = None,
                            costs_only: bool = False,
                            skip_nodes: list = [],
                            fix_time_window: Dict = None) -> OptimProblem:
        """ Set up optimization problem for portfolio

        Args:
            prices (dict): Dictionary of price arrays needed by assets in portfolio. Defaults to None
            timegrid (Timegrid, optional): Discretization grid for portfolio and all assets within. 
                                           Defaults to None, in which case it must have been set previously
            costs_only (bool): Only create costs vector (speed up e.g. for sampling prices). Defaults to False  
            skip_nodes (List): Nodes to be skipped in nodal restrictions (defaults to [])

            fix_time_window (Dict): Fix results for given indices on time grid to given values. Defaults to None
                           fix_time_window['I']: Indices on timegrid or alternatively date (all dates before date taken)
                           fix_time_window['x']: Results.x that results are to be fixed to in time window(full array, all times)

        Returns:
            OptimProblem: Optimization problem to be used by optimizer
        """
        ################################################## checks and preparations
        # set timegrid if given as optional argument
        if not timegrid is None:
            self.set_timegrid(timegrid)
        # check: timegrid set?
        if not hasattr(self, 'timegrid'):
            raise ValueError(
                'Set timegrid of portfolio before creating optim problem.')
        ################################################## set up optim problems for assets
        ## bounds
        l = np.array([])
        u = np.array([])
        c = np.array([])
        opt_probs = {}  # dictionary to collect optim. problem for each asset
        mapping = pd.DataFrame()  # variable mappings collected from assets
        for a in self.assets:
            opt_probs[a.name] = a.setup_optim_problem(prices=prices,
                                                      timegrid=self.timegrid,
                                                      costs_only=costs_only)
            if not costs_only:
                mapping = pd.concat([mapping, opt_probs[a.name].mapping])
                # add together bounds and costs
                l = np.concatenate((l, opt_probs[a.name].l), axis=None)
                u = np.concatenate((u, opt_probs[a.name].u), axis=None)
                c = np.concatenate((c, opt_probs[a.name].c), axis=None)
            else:
                c = np.concatenate((c, opt_probs[a.name]), axis=None)
        if costs_only:
            return c
        n_vars = len(l)  # total number of variables
        n_nodes = len(self.nodes)  # number of nodes
        T = self.timegrid.T  # number of time steps
        # new index refers to portfolio
        mapping.index.name = None
        mapping.reset_index(inplace=True)
        mapping.rename(columns={'index': 'index_assets'}, inplace=True)
        #### mapping may come with several rows per variable
        ## ensure index refers to variables: go through assets and their index, make unique
        mapping['keys'] = mapping['index_assets'].astype(
            str) + mapping['asset'].astype(str)
        idx = pd.DataFrame()
        idx['keys'] = mapping['keys'].unique()
        idx.reset_index(inplace=True)
        mapping = pd.merge(mapping,
                           idx,
                           left_on='keys',
                           right_on='keys',
                           how='left')
        mapping.drop(columns=['keys'], inplace=True)
        mapping.set_index('index', inplace=True)
        ################################################## put together asset restrictions
        A = sp.lil_matrix((0, n_vars))  # sparse format to incrementally change
        b = np.zeros(0)
        cType = ''
        for a in self.assets:
            if not opt_probs[a.name].A is None:
                n, m = opt_probs[a.name].A.shape
                ind = mapping.index[mapping['asset'] == a.name].unique()
                myA = sp.lil_matrix((n, n_vars))
                myA[:, ind] = opt_probs[a.name].A
                opt_probs[a.name].A = None  # free storage
                A = sp.vstack((A, myA))
                b = np.hstack((b, opt_probs[a.name].b))
                cType = cType + opt_probs[a.name].cType
        ################################################## create nodal restriction (flows add to zero)
        # record mapping for nodal restrictions to be able to assign e.g. duals to nodes and time steps
        # some assets work with a mapping column "disp_factor" that allows to account for a disp variable
        # only up to a factor (example transport; higher efficiency in setting up the problem)
        if 'disp_factor' not in mapping.columns:
            mapping['disp_factor'] = 1.
        mapping['disp_factor'].fillna(1., inplace=True)
        mapping['nodal_restr'] = None

        def create_nodal_restr(nodes, map_nodes, map_types, map_idx, map_dispf,
                               map_times, timegrid_I, skip_nodes, n_vars):
            """ Specific function creating nodal restrictions """
            map_nodal_restr = np.zeros(map_idx.shape[0])
            n_nodal_restr = 0
            cols = np.zeros(0)
            rows = np.zeros(0)
            vals = np.zeros(0)
            for n in nodes:
                if (skip_nodes is None) or (not n in skip_nodes):
                    Inode = (map_types == 'd') & (map_nodes == n)
                    for t in timegrid_I:
                        # identify variables belonging to this node n and time step t
                        I = (map_times[Inode] == t)
                        if I.sum() > 0:  # only then restriction needed
                            # myA = sp.lil_matrix((1, n_vars)) # one row only
                            # myA[0, map_idx[Inode][I]] = map_dispf[Inode][I]
                            newcols = map_idx[Inode][I]
                            cols = np.append(cols, newcols)
                            rows = np.append(
                                rows, n_nodal_restr * np.ones(len(newcols)))
                            vals = np.append(vals, map_dispf[Inode][I])
                            Itemp = Inode.copy()
                            Itemp[Itemp] = I
                            map_nodal_restr[Itemp] = n_nodal_restr
                            n_nodal_restr += 1
                            # A = sp.vstack((A, myA))
            return cols, rows, vals, map_nodal_restr, n_nodal_restr

        # # easily readable version -  loop
        # perf = time.perf_counter()
        # n_nodal_restr = 0
        # for n in self.nodes:
        #     if not n in skip_nodes:
        #         for t in self.timegrid.I:
        #             # identify variables belonging to this node n and time step t
        #             I = (mapping['type']=='d') & \
        #                 (mapping['node']==n)   & \
        #                 (mapping['time_step'] == t).values
        #             if any(I): # only then restriction needed
        #                 myA = sp.lil_matrix((1, n_vars)) # one row only
        #                 myA[0, mapping.index[I]] = mapping.loc[I, 'disp_factor'].values ## extended with disp_factor logic
        #                 mapping.loc[I, 'nodal_restr'] = n_nodal_restr
        #                 n_nodal_restr +=1
        #                 A = sp.vstack((A, myA))
        # print('loop 1  duration '+'{:0.1f}'.format(time.perf_counter()-perf)+'s')
        ### start cryptic but much faster version, all in numpy
        map_nodes = mapping['node'].values
        map_types = mapping['type'].values
        map_idx = mapping.index.values
        map_dispf = mapping['disp_factor'].values
        map_times = mapping['time_step'].values
        if len(skip_nodes) == 0:
            my_skip_nodes = None
        else:
            my_skip_nodes = skip_nodes
        cols, rows, vals, map_nodal_restr, n_nodal_restr = create_nodal_restr(
            list(self.nodes.keys()), map_nodes, map_types, map_idx, map_dispf,
            map_times, self.timegrid.I, my_skip_nodes, n_vars)
        A = sp.vstack(
            (A,
             sp.csr_matrix(
                 (vals, (rows.astype(np.int64), cols.astype(np.int64))),
                 shape=(n_nodal_restr, n_vars))))
        mapping['nodal_restr'] = map_nodal_restr.astype(np.int64)
        ### end cryptic version

        b = np.hstack((b, np.zeros(n_nodal_restr)))  # must add to zero
        cType = cType + ('N') * n_nodal_restr

        # in case a certain time window is to be fixed, set l and u to given value
        # potentially expensive, as variables remain variable. however, assuming
        # this is fixed in optimization
        if not fix_time_window is None:
            assert 'I' in fix_time_window.keys(
            ), 'fix_time_window must contain key "I" (time steps to fix)'
            assert 'x' in fix_time_window.keys(
            ), 'fix_time_window must contain key "x" (values to fix)'
            if isinstance(fix_time_window['I'], (dt.date, dt.datetime)):
                fix_time_window['I'] = (timegrid.timepoints <= pd.Timestamp(
                    fix_time_window['I']))
            assert (isinstance(
                fix_time_window['I'],
                (np.ndarray,
                 list))), 'fix_time_window["I"] must be date or array'
            # in case of SLP, the problems may not be of same size (SLP is extended problem)
            # ---> then cut x to fix to size of the problem
            assert len(
                fix_time_window['x']
            ) >= n_vars, 'fixing: values to fix appear to have the wrong size'
            if len(fix_time_window['x']) > n_vars:
                fix_time_window['x'] = fix_time_window['x'][0:n_vars]
            # get index of variables for those time points
            I = mapping['time_step'].isin(timegrid.I[fix_time_window['I']])
            l[I] = fix_time_window['x'][I]
            u[I] = fix_time_window['x'][I]
        return OptimProblem(c=c,
                            l=l,
                            u=u,
                            A=A,
                            b=b,
                            cType=cType,
                            mapping=mapping)