Пример #1
0
    def _bound_dual(self, cur_lp: CyClpSimplex) -> CyClpSimplex:
        """ Place a bound on each index of the dual variable associated with the
        constraints of this node's LP relaxation and resolve. We do this by adding
        to each constraint i the slack variable 's_i' in the node's LP relaxation,
        and we give each new variable a large, positive coefficient in the objective.
        By duality, we get the desired dual LP constraints. Therefore, for nodes
        with infeasible primal LP relaxations and unbounded dual LP relaxations,
        resolving gives us a finite (albeit very large) dual solution, which can
        be used to parametrically lower bound the objective value of this node as
        we change its right hand side.

        :param cur_lp: the CyClpSimplex instance for which we want to bound its
        dual solution and resolve
        :return: A CyClpSimplex instance representing the same model as input,
        with the additions prescribed in the method description
        """

        # we add slack at end so user does not have to worry about adding to each node they create
        # and so we don't have to go back and update needlessly
        assert isinstance(cur_lp, CyClpSimplex), 'must give CyClpSimplex instance'
        for i, constr in enumerate(cur_lp.constraints):
            assert f's_{i}' not in [v.name for v in cur_lp.variables], \
                f"variable 's_{i}' is a reserved name. please name your variable something else"

        # cylp lacks (a documented) way to add a column, so rebuild the LP :[
        new_lp = CyClpSimplex()
        new_lp.logLevel = 0  # quiet output when resolving

        # recreate variables
        var_map = {v: new_lp.addVariable(v.name, v.dim) for v in cur_lp.variables}

        # bound them
        for orig_v, new_v in var_map.items():
            new_lp += CyLPArray(orig_v.lower) <= new_v <= CyLPArray(orig_v.upper)

        # re-add constraints, with slacks this time
        s = {}
        for i, constr in enumerate(cur_lp.constraints):
            s[i] = new_lp.addVariable(f's_{i}', constr.nRows)
            new_lp += s[i] >= CyLPArray(np.zeros(constr.nRows))
            new_lp += CyLPArray(constr.lower) <= \
                sum(constr.varCoefs[v] * var_map[v] for v in constr.variables) \
                + np.matrix(np.identity(constr.nRows))*s[i] <= CyLPArray(constr.upper)

        # set objective
        new_lp.objective = sum(
            CyLPArray(cur_lp.objectiveCoefficients[orig_v.indices]) * new_v for
            orig_v, new_v in var_map.items()
        ) + sum(self._M * v.sum() for v in s.values())

        # warm start
        orig_var_status, orig_slack_status = cur_lp.getBasisStatus()
        # each s_i at lower bound of 0 when added - status 3
        np.concatenate((orig_var_status, np.ones(sum(v.dim for v in s.values()))*3))
        new_lp.setBasisStatus(orig_var_status, orig_slack_status)

        # rerun and reassign
        new_lp.dual(startFinishOptions='x')
        return new_lp
Пример #2
0
    def _find_split_inequality(self: G, idx: int, **kwargs: Any):
        assert idx in self._integer_indices, 'must lift and project on integer index'
        x_idx = self.solution[idx]
        assert self._is_fractional(
            x_idx), "must lift and project on index with fractional value"

        # build the CGLP model from ISE 418 Lecture 15 Slide 7 but for LP with >= constraints
        lp = CyClpSimplex()

        # declare variables
        pi = lp.addVariable('pi', self.lp.nVariables)
        pi0 = lp.addVariable('pi0', 1)
        u1 = lp.addVariable('u1', self.lp.nConstraints)
        u2 = lp.addVariable('u2', self.lp.nConstraints)
        w1 = lp.addVariable('w1', self.lp.nVariables)
        w2 = lp.addVariable('w2', self.lp.nVariables)

        # set bounds
        lp += u1 >= CyLPArray(np.zeros(self.lp.nConstraints))
        lp += u2 >= CyLPArray(np.zeros(self.lp.nConstraints))

        w_ub = CyLPArray(np.zeros(self.lp.nVariables))
        w_ub[idx] = float('inf')
        lp += w_ub >= w1 >= CyLPArray(np.zeros(self.lp.nVariables))
        lp += w_ub >= w2 >= CyLPArray(np.zeros(self.lp.nVariables))

        # set constraints
        # (pi, pi0) must be valid for both parts of the disjunction
        lp += 0 >= -pi + self.lp.coefMatrix.T * u1 - w1
        lp += 0 >= -pi + self.lp.coefMatrix.T * u2 + w2
        lp += 0 <= -pi0 + CyLPArray(
            self.lp.constraintsLower) * u1 - floor(x_idx) * w1.sum()
        lp += 0 <= -pi0 + CyLPArray(
            self.lp.constraintsLower) * u2 + ceil(x_idx) * w2.sum()
        # normalize variables
        lp += u1.sum() + u2.sum() + w1.sum() + w2.sum() == 1

        # set objective: find the deepest cut
        # since pi * x >= pi0 for all x in disjunction, we want min pi * x_star - pi0
        lp.objective = CyLPArray(self.solution) * pi - pi0

        # solve
        lp.primal(startFinishOptions='x')
        assert lp.getStatusCode() == 0, 'we should get optimal solution'
        assert lp.objectiveValue <= 0, 'pi * x >= pi -> pi * x - pi >= 0 -> ' \
                                       'negative objective at x^* since it gets cut off'

        # get solution
        return lp.primalVariableSolution['pi'], lp.primalVariableSolution[
            'pi0']
Пример #3
0
    def find_strong_disjunctive_cut(self, root_id: int) -> Tuple[CyLPArray, float]:
        """ Generate a strong cut valid for the disjunction encoded in the subtree
        rooted at node <root_id>. This cut is optimized to maximize the violation
        of the LP relaxation solution at node <root_id>

        see ISE 418 Lecture 13 slide 3, Lecture 14 slide 9, and Lecture 15 slides
        6-7 for derivation

        :param root_id: id of the node off which we will base the disjunction
        :return: a valid inequality (pi, pi0), i.e. pi^T x >= pi0 for all x in
        the convex hull of the disjunctive terms' LP relaxations
        """
        # sanity checks
        assert root_id in self.tree, 'parent must already exist in tree'
        root = self.tree.get_node_instances(root_id)
        # get each disjunctive term
        terminal_nodes = self.tree.get_leaves(root_id)
        # terminal nodes pruned for infeasibility do not expand disjunction, so remove them
        disjunctive_nodes = {n.idx: n for n in terminal_nodes if n.lp_feasible is not False}
        var_dicts = [{v.name: v.dim for v in n.lp.variables} for n in disjunctive_nodes.values()]
        assert all(var_dicts[0] == d for d in var_dicts), \
            'Each disjunctive term should have the same variables. The feature allowing' \
            ' otherwise remains to be developed.'

        # useful constants
        num_vars = sum(var_dim for var_dim in var_dicts[0].values())
        inf = root.lp.getCoinInfinity()

        # set infinite lower/upper bounds to 0 so they don't create numerical issues in constraints
        lb = {idx: CyLPArray([val if val > -inf else 0 for val in n.lp.variablesLower])
              for idx, n in disjunctive_nodes.items()}  # adjusted lower bound
        ub = {idx: CyLPArray([val if val < inf else 0 for val in n.lp.variablesUpper])
              for idx, n in disjunctive_nodes.items()}  # adjusted upper bound

        # set corresponding variables in cglp to 0 to reflect there is no bound
        # i.e. this variable should not exist in cglp
        wb = {idx: CyLPArray([inf if val > -inf else 0 for val in n.lp.variablesLower])
              for idx, n in disjunctive_nodes.items()}  # w bounds - variable on lb constraints
        vb = {idx: CyLPArray([inf if val < inf else 0 for val in n.lp.variablesUpper])
              for idx, n in disjunctive_nodes.items()}  # v bounds - variable on ub constraints

        # instantiate LP
        cglp = CyClpSimplex()
        cglp.logLevel = 0  # quiet output when resolving

        # declare variables (what to do with case when we have degenerate constraint)
        pi = cglp.addVariable('pi', num_vars)
        pi0 = cglp.addVariable('pi0', 1)
        u = {idx: cglp.addVariable(f'u_{idx}', n.lp.nConstraints) for idx, n in
             disjunctive_nodes.items()}
        w = {idx: cglp.addVariable(f'w_{idx}', n.lp.nVariables) for idx, n in
             disjunctive_nodes.items()}
        v = {idx: cglp.addVariable(f'v_{idx}', n.lp.nVariables) for idx, n in
             disjunctive_nodes.items()}

        # bound them
        for idx in disjunctive_nodes:
            cglp += u[idx] >= 0
            cglp += 0 <= w[idx] <= wb[idx]
            cglp += 0 <= v[idx] <= vb[idx]

        # add constraints
        for i, n in disjunctive_nodes.items():
            # (pi, pi0) must be valid for each disjunctive term's LP relaxation
            cglp += 0 >= -pi + n.lp.coefMatrix.T * u[i] + \
                np.matrix(np.eye(num_vars)) * w[i] - np.matrix(np.eye(num_vars)) * v[i]
            cglp += 0 <= -pi0 + CyLPArray(n.lp.constraintsLower) * u[i] + \
                lb[i] * w[i] - ub[i] * v[i]
        # normalize variables so they don't grow arbitrarily
        cglp += sum(var.sum() for var_dict in [u, w, v] for var in var_dict.values()) == 1

        # set objective: find the deepest cut
        # since pi * x >= pi0 for all x in disjunction, we want min pi * x_star - pi0
        cglp.objective = CyLPArray(root.solution) * pi - pi0

        # solve
        cglp.primal(startFinishOptions='x')
        assert cglp.getStatusCode() == 0, 'we should get optimal solution'
        assert cglp.objectiveValue <= 0, 'pi * x >= pi0 -> pi * x - pi0 >= 0 -> ' \
            'negative objective at x^* since it gets cut off'

        # get solution
        return cglp.primalVariableSolution['pi'], cglp.primalVariableSolution['pi0']