示例#1
0
def test_ipopt_solver(model_name):
    current_dir = pathlib.Path(__file__).parent
    osil_file = current_dir / 'models' / (model_name + '.osil')
    pyomo_model = read_pyomo_model(osil_file)
    problem = problem_from_pyomo_model(pyomo_model)

    atol = rtol = 1e-4

    galini = Galini()
    galini.update_configuration({
        'galini': {
            'constraint_violation_tol': 1e-2,
        },
        'logging': {
            'stdout': True,
        },
        'branch_and_cut': {
            'tolerance': atol,
            'relative_tolerance': rtol,
            'root_node_feasible_solution_search_timelimit': 0,
            'cuts': {
                'maxiter': 100,
            }
        },
        'cuts_generator': {
            'generators': ['outer_approximation'],
        },
        'ipopt': {
            'ipopt': {
                'acceptable_constr_viol_tol': 1e-3
            },
        },
    })
    set_timelimit(30)
    start_timelimit()
    solver = BranchAndBoundSolver(galini)
    solver.before_solve(pyomo_model, problem)
    solution = solver.solve(problem)

    assert solution.status.is_success()

    sol_file = current_dir / 'solutions' / (model_name + '.sol')
    expected_solution = read_solution(sol_file)

    expected_objective = expected_solution['objective']
    assert solution.objective is not None
    assert is_close(expected_objective,
                    solution.objective.value,
                    atol=atol,
                    rtol=rtol)

    expected_variables = expected_solution['variables']

    for var_sol in solution.variables:
        assert is_close(
            expected_variables[var_sol.name],
            var_sol.value,
            atol=atol,
            rtol=rtol,
        )
示例#2
0
def solve_primal_with_starting_point(
        run_id, problem, starting_point, solver, fix_all=False):
    """Solve primal using mip_solution as starting point and fixing variables.

    Parameters
    ----------
    run_id
        the run_id used for logging
    problem
        the mixed integer, (possibly) non convex problem
    starting_point : array-like
        the starting point
    solver : Solver
        the NLP solver used to solve the problem
    fix_all
        if `True`, fix all variables, otherwise fix integer variables only
    Returns
    -------
    A solution to the problem
    """
    for v, point in zip(problem.variables, starting_point):
        domain = problem.domain(v)
        view = problem.variable_view(v)
        if point is None:
            lb = view.lower_bound()
            if lb is None:
                lb = -mc.infinity
            ub = view.upper_bound()
            if ub is None:
                ub = mc.infinity

            value = lb + (ub - lb) / 2.0
        else:
            value = point

        if domain != Domain.REAL:
            # Solution (from pool) can contain non integer values for
            # integer variables. Simply round these values up
            if not is_close(np.trunc(value), value, atol=mc.epsilon):
                value = min(view.upper_bound(), np.ceil(value))
            problem.fix(v, value)
        elif fix_all:
            problem.fix(v, value)
        else:
            problem.set_starting_point(v, value)

    try:
        solution = solver.solve(problem)

        # unfix all variables
        for v in problem.variables:
            problem.unfix(v)
    except Exception as ex:
        # unfix all variables, then rethrow
        for v in problem.variables:
            problem.unfix(v)
        raise ex

    return solution
示例#3
0
    def _compute_gamma(self, optimal_obj, value):
        # Unknown primal
        if value is None or is_inf(value):
            return 1.0

        # |opt| = |obj| = 0.0
        if is_close(np.abs(optimal_obj), 0.0, atol=mc.epsilon):
            if is_close(np.abs(value), 0.0, atol=mc.epsilon):
                return 0.0

        # opt * obj < 0
        if np.sign(optimal_obj) * np.sign(value) < 0:
            return 1.0

        num = np.abs(optimal_obj - value)
        den = max(np.abs(optimal_obj), np.abs(value))
        return num / den
示例#4
0
 def branch(self, node, tree):
     root_problem = tree.root.storage.branching_data()
     node_problem = node.storage.branching_data()
     var = least_reduced_variable(node_problem, root_problem)
     if var is None:
         return None
     if is_close(var.upper_bound(), var.lower_bound(), atol=self.tolerance):
         return None
     return self._branch_on_var(var)
示例#5
0
def _detect_rlt_expression_linear(root_expr):
    """Matches expression of type x1 - w12 - w13 - ... - w1n"""
    aux_variables = [v for v in root_expr.children if v.is_auxiliary]
    non_aux_variables = [v for v in root_expr.children if not v.is_auxiliary]

    if len(non_aux_variables) != 1:
        return False, None, None

    non_aux_variable = non_aux_variables[0]
    non_aux_coef = root_expr.coefficient(non_aux_variable)
    non_aux_coef_is_one = is_close(np.abs(non_aux_coef), 1.0, atol=mc.epsilon)

    if not non_aux_coef_is_one:
        return False, None, None

    non_aux_coef_sign = np.sign(non_aux_coef)

    sum_vars = []
    for var in aux_variables:
        ref = var.reference

        if not ref:
            return False, None, None

        # Check coef is 1.0...
        coef = root_expr.coefficient(var)
        if not is_close(np.abs(coef), 1.0, atol=mc.epsilon):
            return False, None, None

        # ... and opposite sign of aux var
        if np.sign(coef) != -1.0 * non_aux_coef_sign:
            return False, None, None

        # ... and one aux variable is the non aux one
        if ref.var1 != non_aux_variable and ref.var2 != non_aux_variable:
            return False, None, None

        if ref.var1 == non_aux_variable:
            sum_vars.append(ref.var2)

        if ref.var2 == non_aux_variable:
            sum_vars.append(ref.var1)

    return True, non_aux_variable, sum_vars
示例#6
0
def detect_auxiliary_variables(problem):
    bilinear_aux_variables = dict()
    for constraint in problem.constraints:
        root_expr = constraint.root_expr

        if (not isinstance(root_expr, core.SumExpression)
                or len(root_expr.children) != 2):
            continue

        a, b = root_expr.children
        if (isinstance(a, core.QuadraticExpression)
                and isinstance(b, core.LinearExpression)):
            quadratic = a
            linear = b
        elif (isinstance(b, core.QuadraticExpression)
              and isinstance(a, core.LinearExpression)):
            quadratic = b
            linear = a
        else:
            continue

        if len(linear.children) != 1 or len(quadratic.terms) != 1:
            continue

        if not is_close(linear.constant_term, 0.0, atol=mc.epsilon):
            continue

        var = linear.children[0]
        coef = linear.coefficient(var)
        term = quadratic.terms[0]

        if is_close(coef, -1.0, atol=mc.epsilon):
            if is_close(term.coefficient, 1.0, atol=mc.epsilon):
                var.reference = core.BilinearTermReference(
                    term.var1, term.var2)
                bilinear_aux_variables[(term.var1.idx, term.var2.idx)] = var

        if is_close(coef, 1.0, atol=mc.epsilon):
            if is_close(term.coefficient, -1.0, atol=mc.epsilon):
                var.reference = core.BilinearTermReference(
                    term.var1, term.var2)
                bilinear_aux_variables[(term.var1.idx, term.var2.idx)] = var

    problem.metadata[PROBLEM_BILINEAR_AUX_VAR_META] = bilinear_aux_variables
示例#7
0
 def _cuts_converged(self, state):
     cuts_close =  (
             state.latest_solution is not None and
             state.previous_solution is not None and
             is_close(
                 state.latest_solution,
                 state.previous_solution,
                 rtol=self._cut_tolerance
             )
     )
     if cuts_close:
         return True
     return self._cuts_generators_manager.has_converged(state)
示例#8
0
def relative_bound_improvement(first_solution, prev_solution, latest_solution):
    """Lower bound improvement between the last two consecutive cut rounds.

    The relative bound improvement is defined as:

        (latest_solution - prev_solution)
        --------------------------------------
        (latest_solution - first_solution)

    Parameters
    ----------
    first_solution : float
    prev_solution : float
    latest_solution : float
    """
    if is_close(latest_solution, prev_solution, atol=mc.epsilon):
        return 0.0
    improvement = latest_solution - prev_solution
    lower_bound_improvement = latest_solution - first_solution
    if is_close(lower_bound_improvement, 0.0, atol=mc.epsilon):
        return 0.0
    return improvement / lower_bound_improvement
示例#9
0
def compute_branching_variable(problem, linear_problem, mip_solution, weights):
    """Branch on variable with max nonlinear infeasibility.

    Parameters
    ----------
    problem : Problem
        the user problem
    linear_problem : Problem
        the linear relaxation of problem
    mip_solution : Solution
        the solution to linear_problem
    weights : dict
        the weights for the sum, max, and min nonlinear components

    Returns
    -------
    Variable
        the branching variable or None if it should not branch

    """
    nonlinear_infeasibility = \
        compute_nonlinear_infeasiblity_components(linear_problem, mip_solution)

    # score each variable and pick the one with the maximum
    # ignore variables that can be considered "fixed"
    branching_var = None
    branching_var_score = None
    for var_idx in nonlinear_infeasibility['sum'].keys():
        vv = problem.variable_view(var_idx)

        if is_close(vv.lower_bound(), vv.upper_bound(), rtol=mc.epsilon):
            continue

        if branching_var is None:
            branching_var = var_idx
            branching_var_score = _infeasibility_score(
                var_idx, nonlinear_infeasibility, weights
            )
        else:
            var_score = _infeasibility_score(
                var_idx, nonlinear_infeasibility, weights
            )
            if var_score > branching_var_score:
                branching_var = var_idx
                branching_var_score = var_score

    if branching_var is None:
        return None
    return problem.variable_view(branching_var)
    def has_converged(self, state):
        if self._relaxation_is_linear:
            return True

        if self._nlp_solution is None:
            return False

        if is_inf(state.lower_bound):
            return False

        return is_close(
            state.lower_bound,
            self._nlp_solution,
            rtol=self.convergence_relative_tol,
        )
示例#11
0
    def has_converged(self, state):
        rel_gap = relative_gap(state.lower_bound, state.upper_bound,
                               self.galini.mc)
        abs_gap = absolute_gap(state.lower_bound, state.upper_bound,
                               self.galini.mc)

        bounds_close = is_close(
            state.lower_bound,
            state.upper_bound,
            rtol=self.bab_config['relative_gap'],
            atol=self.bab_config['absolute_gap'],
        )

        if self.galini.paranoid_mode:
            assert (state.lower_bound <= state.upper_bound or bounds_close)

        return (rel_gap <= self.bab_config['relative_gap']
                or abs_gap <= self.bab_config['absolute_gap'])
示例#12
0
    def _has_converged(self, state):
        rel_gap = relative_gap(state.lower_bound, state.upper_bound)
        abs_gap = absolute_gap(state.lower_bound, state.upper_bound)

        bounds_close = is_close(
            state.lower_bound,
            state.upper_bound,
            rtol=self.relative_tolerance,
            atol=self.tolerance,
        )

        if self.galini.paranoid_mode:
            assert (state.lower_bound <= state.upper_bound or bounds_close)

        return (
            rel_gap <= self.relative_tolerance or
            abs_gap <= self.tolerance
        )
示例#13
0
    def _get_constraints(self, problem, problem_eval):
        constraints = []
        for i, constraint in enumerate(problem.constraints):
            lb = constraint.lower_bound
            ub = constraint.upper_bound

            if lb is None or is_inf(lb):
                constraints.append({
                    "type": "ineq",
                    "fun": problem_eval.eval_constraint,
                    "jac": problem_eval.eval_constraint_jacobian,
                    "args": [i, ub, True, True],
                })
            elif ub is None or is_inf(ub):
                constraints.append({
                    "type": "ineq",
                    "fun": problem_eval.eval_constraint,
                    "jac": problem_eval.eval_constraint_jacobian,
                    "args": [i, lb, True, False],
                })
            elif is_close(lb, ub, atol=mc.epsilon):
                constraints.append({
                    "type": "eq",
                    "fun": problem_eval.eval_constraint,
                    "jac": problem_eval.eval_constraint_jacobian,
                    "args": [i, lb, False, False],
                })
            else:
                # Constraint has both lower and upper bounds, but it's not
                # an equality constraint.
                constraints.append({
                    "type": "ineq",
                    "fun": problem_eval.eval_constraint,
                    "jac": problem_eval.eval_constraint_jacobian,
                    "args": [i, lb, True, False],
                })
                constraints.append({
                    "type": "ineq",
                    "fun": problem_eval.eval_constraint,
                    "jac": problem_eval.eval_constraint_jacobian,
                    "args": [i, ub, True, True],
                })

        return constraints
示例#14
0
    def update(self, solution, paranoid=False, atol=None, rtol=None):
        """Update cut state with `solution`."""
        self.round += 1
        current_objective = solution.objective.value
        if paranoid:
            close = is_close(current_objective,
                             self.lower_bound,
                             atol=atol,
                             rtol=rtol)
            increased = (current_objective >= self.lower_bound or close)
            if not increased:
                msg = 'Lower bound in cuts phase decreased: {} to {}'
                raise RuntimeError(
                    msg.format(self.lower_bound, current_objective))

        self.lower_bound = current_objective
        if self.first_solution is None:
            self.first_solution = current_objective
        self.previous_solution = self.latest_solution
        self.latest_solution = current_objective
示例#15
0
def range_ratio(problem, root_problem):
    assert problem.num_variables == root_problem.num_variables
    lower_bounds = np.array(problem.lower_bounds)
    upper_bounds = np.array(problem.upper_bounds)
    root_lower_bounds = np.array(root_problem.lower_bounds)
    root_upper_bounds = np.array(root_problem.upper_bounds)
    denominator = np.abs(root_upper_bounds - root_lower_bounds) + mc.epsilon
    numerator = upper_bounds - lower_bounds

    numerator_mask = ~is_inf(numerator)
    denominator_mask = ~is_inf(denominator)
    finite_numerator = np.zeros_like(numerator)
    finite_denominator = np.zeros_like(denominator) + mc.epsilon
    finite_numerator[numerator_mask] = numerator[numerator_mask]
    finite_denominator[denominator_mask] = denominator[denominator_mask]

    # All bounded variables are fixed, range ratio is not possible to compute
    if is_close(np.sum(np.abs(finite_numerator)), 0.0, atol=mc.epsilon):
        return None
    return np.nan_to_num(finite_numerator / finite_denominator)
示例#16
0
def best_upper_bound(domain, a, b):
    """Returns the best upper bound between `a` and `b`.

    Parameters
    ----------
    domain : Domain
        the variable domain
    a : float or None
    b : float or None
    """
    if b is None:
        ub = a
    elif a is not None:
        ub = min(a, b)
    else:
        return None

    if domain.is_integer() and ub is not None:
        if is_close(np.ceil(ub), ub, atol=mc.epsilon, rtol=0.0):
            return np.ceil(ub)
        return np.floor(ub)

    return ub
示例#17
0
def best_upper_bound(var, a, b, eps):
    """Returns the best upper bound between `a` and `b`.

    Parameters
    ----------
    var : Var
        the variable
    a : float or None
    b : float or None
    """
    if b is None:
        ub = a
    elif a is not None:
        ub = min(a, b)
    else:
        return None

    if (var.is_integer() or var.is_binary()) and ub is not None:
        if is_close(np.ceil(ub), ub, atol=eps, rtol=0.0):
            return np.ceil(ub)
        return np.floor(ub)

    return ub
示例#18
0
def _variable_and_quadratic(a, b):
    if isinstance(a, core.LinearExpression):
        if len(a.children) != 1:
            return False, None, None, None

        if not isinstance(b, core.QuadraticExpression):
            return False, None, None, None

        var = a.children[0]
        coef = a.coefficient(var)

        if not is_close(np.abs(coef), 1.0, atol=mc.epsilon):
            return False, None, None, None

        coef_sign = np.sign(coef)
        return True, var, coef_sign, b

    elif isinstance(a, core.Variable):
        if not isinstance(b, core.QuadraticExpression):
            return False, None, None

        return True, a, 1.0, b

    return False, None, None, None
示例#19
0
def _detect_rlt_expression_bilinear(root_expr):
    """Matches expression of type x1 - x1x2 - x1x3 - ... - x1xn"""
    if len(root_expr.children) != 2:
        return False, None, None

    a, b = root_expr.children
    matches, var, var_sign, quadratic = _variable_and_quadratic(a, b)
    if not matches:
        matches, var, var_sign, quadratic = _variable_and_quadratic(b, a)
        if not matches:
            return False, None, None

    sum_vars = []
    for term in quadratic.terms:
        if not is_close(term.coefficient, -1.0 * var_sign, atol=mc.epsilon):
            return False, None, None
        if term.var1 != var and term.var2 != var:
            return False, None, None
        if term.var1 == var:
            sum_vars.append(term.var2)
        else:
            sum_vars.append(term.var1)

    return True, var, sum_vars
示例#20
0
    def _solve_problem_at_node(self, run_id, problem, relaxed_problem,
                               tree, node):
        logger.info(
            run_id,
            'Starting Cut generation iterations. Maximum iterations={}',
            self.cuts_maxiter,
        )
        generators_name = [
            g.name for g in self._cuts_generators_manager.generators
        ]
        logger.info(
            run_id,
            'Using cuts generators: {}',
            ', '.join(generators_name)
        )

        solution = self._try_solve_convex_problem(problem)
        if solution is not None:
            return solution

        if not node.has_parent:
            feasible_solution = node.initial_feasible_solution
        else:
            feasible_solution = None

        log_problem(
            logger, run_id, DEBUG, relaxed_problem,
            title='Convex Relaxation',
        )

        linear_problem = self._build_linear_relaxation(relaxed_problem)

        log_problem(
            logger, run_id, DEBUG, linear_problem.relaxed,
            title='Linearized Relaxation',
        )

        cuts_state = None
        lower_bound_search_start_time = current_time()
        if self._use_lp_cut_phase:
            logger.info(run_id, 'Start LP cut phase')
            originally_integer = []
            if not self._use_milp_cut_phase:
                for var in linear_problem.relaxed.variables:
                    vv = linear_problem.relaxed.variable_view(var)
                    if vv.domain.is_integer():
                        originally_integer.append(var)
                        linear_problem.relaxed.set_domain(var, core.Domain.REAL)

            feasible, cuts_state, mip_solution = self._perform_cut_loop(
                run_id, tree, node, problem, relaxed_problem, linear_problem,
            )

            for var in originally_integer:
                linear_problem.relaxed.set_domain(var, core.Domain.INTEGER)

            if not feasible:
                logger.info(run_id, 'LP solution is not feasible')
                self._bac_telemetry.increment_lower_bound_time(
                    seconds_elapsed_since(lower_bound_search_start_time)
                )
                return NodeSolution(mip_solution, feasible_solution)

            # Solve MILP to obtain MILP solution
            mip_solution = self._mip_solver.solve(linear_problem.relaxed)
            logger.info(
                run_id,
                'MILP solution after LP cut phase: {} {}',
                mip_solution.status,
                mip_solution,
            )
            if mip_solution.status.is_success():
                logger.update_variable(
                    run_id,
                    iteration=self._cut_loop_outer_iteration,
                    var_name='milp_solution',
                    value=mip_solution.objective_value()
                )

        self._update_node_branching_decision(
            linear_problem, mip_solution, node, problem
        )

        if self._use_milp_cut_phase:
            logger.info(run_id, 'Using MILP cut phase')
            feasible, cuts_state, mip_solution = self._perform_cut_loop(
                run_id, tree, node, problem, relaxed_problem, linear_problem,
            )

            if not feasible:
                logger.info(run_id, 'MILP cut phase solution is not feasible')
                self._bac_telemetry.increment_lower_bound_time(
                    seconds_elapsed_since(lower_bound_search_start_time)
                )
                return NodeSolution(mip_solution, feasible_solution)

        assert cuts_state is not None
        self._bac_telemetry.increment_lower_bound_time(
            seconds_elapsed_since(lower_bound_search_start_time)
        )

        if cuts_state.lower_bound >= tree.upper_bound and \
                not is_close(cuts_state.lower_bound, tree.upper_bound,
                             atol=mc.epsilon):
            # No improvement
            return NodeSolution(mip_solution, None)

        if self._timeout():
            # No time for finding primal solution
            return NodeSolution(mip_solution, None)

        upper_bound_search_start_time = current_time()

        starting_point = [v.value for v in mip_solution.variables]
        primal_solution = solve_primal_with_starting_point(
            run_id, problem, starting_point, self._nlp_solver, fix_all=True
        )
        new_primal_solution = solve_primal(
            run_id, problem, mip_solution, self._nlp_solver
        )
        if new_primal_solution is not None:
            primal_solution = new_primal_solution

        self._bac_telemetry.increment_upper_bound_time(
            seconds_elapsed_since(upper_bound_search_start_time)
        )

        if not primal_solution.status.is_success() and \
                feasible_solution is not None:
            # Could not get primal solution, but have a feasible solution
            return NodeSolution(mip_solution, feasible_solution)

        return NodeSolution(mip_solution, primal_solution)
示例#21
0
    def _generate(self, run_id, problem, _relaxed_problem, linear_problem, solution, tree, node):
        triple_cliques = self.__problem_info_triangle[1]
        rank_list_tri = self._get_triangle_violations(linear_problem, solution)
        # Remove non-violated constraints and sort by density first and then violation second as in manuscript
        rank_list_tri_viol = [
            el for el in rank_list_tri if el[2] >= self._thres_tri_viol
        ]
        rank_list_tri_viol.sort(key=lambda tup: tup[2], reverse=True)

        # Determine number of triangle cuts to add (proportion/absolute with upper & lower thresholds)
        nb_cuts = int(np.floor(self._sel_size * len(rank_list_tri_viol))) \
            if self._sel_size <= 1 else int(np.floor(self._sel_size))
        max_tri_cuts = min(
            max(self._min_tri_cuts, nb_cuts),
            min(self._max_tri_cuts, len(rank_list_tri_viol)))
        max_tri_cuts = int(max_tri_cuts)
        l = self._lbs
        u = self._ubs
        d = self._dbs

        # Add all triangle cuts (ranked by violation) within selection size
        logger.debug(run_id, 'Adding {} cuts', max_tri_cuts)
        for ix in range(0, max_tri_cuts):
            ineq_type = rank_list_tri_viol[ix][1]
            i, j, k = triple_cliques[rank_list_tri_viol[ix][0]]
            xi, xj, xk = problem.variables[i], problem.variables[j], problem.variables[k]
            # Generate constraints for the 4 different triangle inequality types
            cut_lb = 0
            logger.debug(run_id, 'Cut {} is of type {}', ix, ineq_type)
            logger.debug(run_id, 'd[i] = {}, d[j] = {}, d[k] = {}', d[i], d[j], d[k])
            logger.debug(run_id, 'l[i] = {}, l[j] = {}, l[k] = {}', l[i], l[j], l[k])
            logger.debug(run_id, 'u[i] = {}, u[j] = {}, u[k] = {}', u[i], u[j], u[k])
            if is_close(d[i], 0.0, atol=mc.epsilon):
                logger.warning(run_id, 'Skip Cut {}, d[i] is zero', ix)
                continue

            if is_close(d[j], 0.0, atol=mc.epsilon):
                logger.warning(run_id, 'Skip Cut {}, d[j] is zero', ix)
                continue

            if is_close(d[k], 0.0, atol=mc.epsilon):
                logger.warning(run_id, 'Skip Cut {}, d[k] is zero', ix)
                continue

            if ineq_type == 3:
                sum_expr = SumExpression([
                    QuadraticExpression([xi, xj, xk], [xj, xk, xi],
                                        [1.0/d[i]/d[j], 1.0/d[j]/d[k], 1.0/d[k]/d[i]]),
                    LinearExpression([xi, xj, xk],
                                     [
                                        -1.0/d[i] -l[j]/d[i]/d[j] -l[k]/d[i]/d[k],
                                        -1.0/d[j] -l[i]/d[j]/d[i] -l[k]/d[j]/d[k],
                                        -1.0/d[k] -l[i]/d[i]/d[k] -l[j]/d[j]/d[k]
                                     ],
                                     +l[i]*l[j]/d[i]/d[j] +l[i]*l[k]/d[i]/d[k] +l[j]*l[k]/d[j]/d[k]
                                     +l[i]/d[i] +l[j]/d[j] +l[k]/d[k])
                ])
                cut_lb = -1.0
            else:
                if ineq_type == 0:
                    sum_expr = SumExpression([
                        QuadraticExpression([xi, xj, xk], [xj, xk, xi],
                                            [-1.0/d[i]/d[j], 1.0/d[j]/d[k], -1.0/d[k]/d[i]]),
                        LinearExpression([xi, xj, xk],
                                         [
                                            1.0/d[i] +l[j]/d[i]/d[j] +l[k]/d[i]/d[k],
                                                    +l[i]/d[j]/d[i] -l[k]/d[j]/d[k],
                                                    +l[i]/d[i]/d[k] -l[j]/d[j]/d[k]
                                         ],
                                         -l[i]*l[j]/d[i]/d[j] - l[i]*l[k]/d[i]/d[k] + l[j]*l[k]/d[j]/d[k] -l[i]/d[i])
                    ])
                elif ineq_type == 1:
                    sum_expr = SumExpression([
                        QuadraticExpression([xi, xj, xk], [xj, xk, xi],
                                            [-1.0/d[i]/d[j], -1.0/d[j]/d[k], 1.0/d[k]/d[i]]),
                        LinearExpression([xi, xj, xk],
                                         [
                                                    +l[j]/d[i]/d[j] -l[k]/d[i]/d[k],
                                            1.0/d[j] +l[i]/d[j]/d[i] +l[k]/d[j]/d[k],
                                                    -l[i]/d[i]/d[k] +l[j]/d[j]/d[k]
                                         ],
                                         -l[i]*l[j]/d[i]/d[j] +l[i]*l[k]/d[i]/d[k] - l[j]*l[k]/d[j]/d[k] -l[j]/d[j])
                    ])
                elif ineq_type == 2:
                    sum_expr = SumExpression([
                        QuadraticExpression([xi, xj, xk], [xj, xk, xi],
                                            [1.0/d[i]/d[j], -1.0/d[j]/d[k], -1.0/d[k]/d[i]]),
                        LinearExpression([xi, xj, xk],
                                         [
                                                    -l[j]/d[i]/d[j] +l[k]/d[i]/d[k],
                                                    -l[i]/d[j]/d[i] +l[k]/d[j]/d[k],
                                            1.0/d[k] +l[i]/d[i]/d[k] +l[j]/d[j]/d[k]
                                         ],
                                        +l[i]*l[j]/d[i]/d[j] -l[i]*l[k]/d[i]/d[k] - l[j]*l[k]/d[j]/d[k] - l[k]/d[k])
                    ])

            cut_name = 'triangle_cut_{}_{}_{}_{}'.format(
                self._cut_outer_iteration, self._cut_round, ix, ineq_type
            )
            yield Cut(CutType.LOCAL, cut_name, sum_expr, cut_lb, None)
示例#22
0
def detect_rlt_constraints(problem):
    # TODO(fra): double check this is working as intended
    possible_rlt = dict()
    seen_sum = set()

    for constraint in problem.constraints:
        root_expr = constraint.root_expr
        cons_lb = constraint.lower_bound
        if cons_lb is None:
            cons_lb = -np.inf
        cons_ub = constraint.upper_bound
        if cons_ub is None:
            cons_ub = np.inf

        bounds_zero = (is_close(cons_lb, 0.0, atol=mc.epsilon)
                       and is_close(cons_ub, 0.0, atol=mc.epsilon))

        # If it's a linear expression it can be the summation to 1 or RLT
        # IF it's bilinear it can be RLT
        # if it's product it can be RLT
        if isinstance(root_expr, core.LinearExpression):
            if bounds_zero:
                is_rlt, var, summed_vars = \
                    _detect_rlt_expression_linear(root_expr)
            else:
                is_rlt = False

            bounds_one = (is_close(cons_lb, 1.0, atol=mc.epsilon)
                          and is_close(cons_ub, 1.0, atol=mc.epsilon))

            if bounds_one:
                # Check all coefficients
                is_sum = True
                for v in root_expr.children:
                    is_one = is_close(root_expr.coefficient(v),
                                      1.0,
                                      atol=mc.epsilon)
                    if not is_one:
                        is_sum = False
                if is_sum:
                    summed_vars_idx = \
                        tuple(sorted(v.idx for v in root_expr.children))
                    seen_sum.add(summed_vars_idx)

        elif isinstance(root_expr, core.SumExpression):
            if not bounds_zero:
                continue
            is_rlt, var, summed_vars = _detect_rlt_expression_product(
                root_expr)
            if not is_rlt:
                is_rlt, var, summed_vars = \
                    _detect_rlt_expression_bilinear(root_expr)

        else:
            is_rlt = False

        if not is_rlt:
            continue

        summed_vars_idx = tuple(sorted(v.idx for v in summed_vars))
        if summed_vars_idx not in possible_rlt:
            possible_rlt[summed_vars_idx] = []
        possible_rlt[summed_vars_idx].append((constraint, var, summed_vars))

    if len(possible_rlt) < len(seen_sum):
        for var_indexes, constraints in possible_rlt.items():
            if var_indexes in seen_sum:
                # it's a RLT
                for constraint, var, aux_vars in constraints:
                    constraint.metadata[PROBLEM_RLT_CONS_INFO] = (var,
                                                                  aux_vars)
    else:
        for var_indexes in seen_sum:
            constraints = possible_rlt.get(var_indexes, [])
            for constraint, var, aux_vars in constraints:
                constraint.metadata[PROBLEM_RLT_CONS_INFO] = (var, aux_vars)
示例#23
0
    def _get_sdp_decomposition(self, problem, relaxed_problem):
        start_time = current_time()
        time_limit = self._preprocess_time_limit

        dim = self._dim
        agg_list = []

        variables = [
            var for var in problem.component_data_objects(
                pe.Var, active=True, descend_into=True)
        ]
        self._variables = variables

        num_vars = len(variables)
        self._num_vars = num_vars

        var_idx_map = pe.ComponentMap([(var, idx)
                                       for idx, var in enumerate(variables)])
        self._var_idx_map = var_idx_map

        constraints = [
            constraint for constraint in problem.component_data_objects(
                pe.Constraint, active=True, descend_into=True)
        ]
        self._constraints = constraints

        objective = next(
            problem.component_data_objects(pe.Objective,
                                           active=True,
                                           descend_into=True))
        self._objective = objective

        quad_terms_per_con = [[] for _ in range(1 + len(constraints))]

        if seconds_elapsed_since(start_time) > time_limit:
            return []

        # Find all quadratic terms (across all objectives + constraints) and form an adjacency matrix for their indices
        adj_mat = np.zeros((num_vars, num_vars))

        for con_idx, constraint in enumerate([objective, *constraints]):
            if isinstance(constraint, pe.Objective):
                root_expr = constraint.expr
            else:
                root_expr = constraint.body

            quadratic_expr = None

            if isinstance(root_expr, QuadraticExpression):
                quadratic_expr = root_expr
            elif isinstance(root_expr, SumExpression):
                for arg in root_expr.args:
                    if isinstance(arg, QuadraticExpression):
                        quadratic_expr = arg
                        break

            if seconds_elapsed_since(start_time) > time_limit:
                return []

            if quadratic_expr is not None:
                for term in quadratic_expr.terms:
                    if not is_close(term.coefficient,
                                    0.0,
                                    atol=self.galini.mc.epsilon):
                        idx_var1 = var_idx_map[term.var1]
                        idx_var2 = var_idx_map[term.var2]

                        adj_mat[idx_var1, idx_var2] = 1
                        adj_mat[idx_var2, idx_var1] = 1

                        quad_terms_per_con[con_idx].append(
                            (idx_var1, idx_var2, term.coefficient))

        # Get only cliques up the the dimension of the SDP decomposition
        all_cliques_iterator = enumerate_all_cliques(
            from_numpy_matrix(adj_mat))
        for clique in all_cliques_iterator:
            if len(clique) < 2:
                continue
            elif len(clique) <= dim:
                agg_list.append(set(clique))
            else:
                break

        # Eliminate cliques that are subsets of other cliques
        agg_list = [(x, []) for x in agg_list
                    if not any(x <= y for y in agg_list if x is not y)]

        # Look in each constraint at a time for cliques up to dim in size
        nb_objs = 1
        for con_idx, constraint in enumerate([objective, *constraints]):
            if seconds_elapsed_since(start_time) > time_limit:
                return []

            adj_mat_con = np.zeros((num_vars, num_vars))
            coeff_mat_con = np.zeros((num_vars, num_vars))

            G = Graph()
            for (idx_var1, idx_var2,
                 term_coeff) in quad_terms_per_con[con_idx]:
                adj_mat_con[idx_var1, idx_var2] = 1
                adj_mat_con[idx_var2, idx_var1] = 1
                G.add_edge(idx_var1, idx_var2)
                coeff_mat_con[idx_var1, idx_var2] = term_coeff
                coeff_mat_con[idx_var2, idx_var1] = term_coeff

            # Get only cliques up the the dimension of the SDP decomposition
            agg_list_con = []
            for clique in enumerate_all_cliques(G):
                if seconds_elapsed_since(start_time) > time_limit:
                    return []

                if len(clique) < 2:
                    continue
                elif len(clique) <= dim:
                    agg_list_con.append(set(clique))
                else:
                    break

            # Eliminate cliques that are subsets of other cliques
            agg_list_con = [
                x for x in agg_list_con
                if not any(x <= y for y in agg_list_con if x is not y)
            ]

            # Aggregate coefficient info (input_nn) used as input for neural networks for each constraint
            for agg_idx, (clique, _) in enumerate(agg_list):
                for clique_con in agg_list_con:
                    if clique_con <= clique and len(
                            clique_con.intersection(clique)) > 1:
                        mat_idxs = list(
                            combinations_with_replacement(sorted(clique), 2))
                        input_nn = itemgetter(*mat_idxs)(coeff_mat_con)
                        agg_list[agg_idx][1].append(
                            (np.asarray(input_nn), 1, con_idx - nb_objs))

        # Sort clique elements after done with them as sets (since neural networks are not invariant on order)
        agg_list = [(sorted(clique), _) for (clique, _) in agg_list]

        return agg_list
示例#24
0
    def _bab_loop(self, problem, run_id, **kwargs):
        known_optimal_objective = kwargs.get('known_optimal_objective', None)
        if known_optimal_objective is not None:
            if not problem.objective.original_sense.is_minimization():
                known_optimal_objective = -known_optimal_objective

        self._bac_telemetry.start_timing(
            known_optimal_objective,
            elapsed_time(),
        )

        branching_strategy = self._algo.branching_strategy
        node_selection_strategy = self._algo.node_selection_strategy

        bab_iteration = 0

        root_node_storage = RootNodeStorage(problem)
        tree = BabTree(root_node_storage, branching_strategy,
                       node_selection_strategy)
        self._tree = tree

        logger.info(run_id, 'Finding initial feasible solution')
        initial_solution = self._algo.find_initial_solution(
            run_id, problem, tree, tree.root)
        if initial_solution is not None:
            tree.add_initial_solution(initial_solution)
            self._bac_telemetry.update_at_end_of_iteration(
                tree, elapsed_time())
            self._telemetry.log_at_end_of_iteration(run_id, bab_iteration)
            if self._algo.should_terminate(tree.state):
                return

        logger.info(run_id, 'Solving root problem')
        root_solution = self._algo.solve_problem_at_root(
            run_id, problem, tree, tree.root)
        tree.update_root(root_solution)

        self._bac_telemetry.update_at_end_of_iteration(
            tree, elapsed_time(), update_nodes_visited=False)
        self._telemetry.log_at_end_of_iteration(run_id, bab_iteration)
        bab_iteration += 1

        logger.info(run_id, 'Root problem solved, tree state {}', tree.state)
        logger.log_add_bab_node(
            run_id,
            coordinate=[0],
            lower_bound=root_solution.lower_bound,
            upper_bound=root_solution.upper_bound,
        )

        while not self._algo.should_terminate(tree.state):
            logger.info(run_id, 'Tree state at beginning of iteration: {}',
                        tree.state)
            if not tree.has_nodes():
                logger.info(run_id, 'No more nodes to visit.')
                break

            current_node = tree.next_node()
            if current_node.parent is None:
                # This is the root node.
                node_children, branching_point = tree.branch_at_node(
                    current_node)
                logger.info(run_id, 'Branched at point {}', branching_point)
                continue
            else:
                current_node_problem = current_node.storage.problem
                var_view = \
                    current_node_problem.variable_view(current_node.variable)

            logger.info(
                run_id,
                'Visiting node {}: parent state={}',
                current_node.coordinate,
                current_node.parent.state,
            )

            node_can_not_improve_solution = is_close(
                current_node.parent.lower_bound,
                tree.upper_bound,
                atol=self._algo.tolerance,
                rtol=self._algo.relative_tolerance,
            ) or current_node.parent.lower_bound > tree.upper_bound

            if node_can_not_improve_solution:
                logger.info(
                    run_id,
                    "Fathom node because it won't improve bound: node.lower_bound={}, tree.upper_bound={}",
                    current_node.parent.lower_bound,
                    tree.upper_bound,
                )
                logger.log_prune_bab_node(run_id, current_node.coordinate)
                tree.fathom_node(current_node, update_nodes_visited=True)
                self._bac_telemetry.update_at_end_of_iteration(
                    tree, elapsed_time())
                self._telemetry.log_at_end_of_iteration(run_id, bab_iteration)
                bab_iteration += 1
                continue

            solution = self._algo.solve_problem_at_node(
                run_id, current_node.storage.problem, tree, current_node)

            tree.update_node(current_node, solution)
            logger.log_add_bab_node(
                run_id,
                coordinate=current_node.coordinate,
                lower_bound=solution.lower_bound,
                upper_bound=solution.upper_bound,
            )
            current_node_converged = is_close(
                solution.lower_bound,
                solution.upper_bound,
                atol=self._algo.tolerance,
                rtol=self._algo.relative_tolerance,
            )

            if not current_node_converged and solution.upper_bound_solution is not None:
                node_children, branching_point = tree.branch_at_node(
                    current_node)
                logger.info(run_id, 'Branched at point {}', branching_point)
            else:
                # We won't explore this part of the tree anymore.
                # Add to fathomed nodes.
                logger.info(
                    run_id,
                    'Fathom node {}, converged? {}, upper_bound_solution {}',
                    current_node.coordinate, current_node_converged,
                    solution.upper_bound_solution)
                logger.log_prune_bab_node(run_id, current_node.coordinate)
                tree.fathom_node(current_node, update_nodes_visited=False)

            self._log_problem_information_at_node(run_id,
                                                  current_node.storage.problem,
                                                  solution, current_node)
            logger.info(run_id, 'New tree state at {}: {}',
                        current_node.coordinate, tree.state)
            logger.update_variable(run_id, 'z_l', tree.nodes_visited,
                                   tree.lower_bound)
            logger.update_variable(run_id, 'z_u', tree.nodes_visited,
                                   tree.upper_bound)
            logger.info(
                run_id,
                'Child {} has solutions: LB={} UB={}',
                current_node.coordinate,
                solution.lower_bound_solution,
                solution.upper_bound_solution,
            )
            self._bac_telemetry.update_at_end_of_iteration(
                tree, elapsed_time())
            self._telemetry.log_at_end_of_iteration(run_id, bab_iteration)
            bab_iteration += 1

        logger.info(run_id, 'Branch & Bound Finished: {}', tree.state)
        logger.info(run_id, 'Branch & Bound Converged?: {}',
                    self._algo._has_converged(tree.state))
        logger.info(run_id, 'Branch & Bound Timeout?: {}',
                    self._algo._timeout())
        logger.info(run_id, 'Branch & Bound Node Limit Exceeded?: {}',
                    self._algo._node_limit_exceeded(tree.state))
示例#25
0
def perform_obbt_on_model(model, problem, upper_bound, timelimit,
                          simplex_maxiter):
    """Perform OBBT on Pyomo model using Coramin.

    Parameters
    ----------
    model : ConcreteModel
        the pyomo concrete model
    problem : Problem
        the GALINI problem
    upper_bound : float or None
        the objective value upper bound, if known
    timelimit : int
        a timelimit, in seconds
    simplex_maxiter : int
        the maximum number of simplex iterations

    """
    obbt_start_time = current_time()

    for var in model.component_data_objects(ctype=pe.Var):
        var.domain = pe.Reals

        if not (var.lb is None or np.isfinite(var.lb)):
            var.setlb(None)

        if not (var.ub is None or np.isfinite(var.ub)):
            var.setub(None)

    relaxed_model = relax(model)

    solver = pe.SolverFactory('cplex_persistent')
    solver.set_instance(relaxed_model)
    # TODO(fra): make this non-cplex specific
    simplex_limits = solver._solver_model.parameters.simplex.limits  # pylint: disable=protected-access
    simplex_limits.iterations.set(simplex_maxiter)
    # collect variables in nonlinear constraints
    nonlinear_variables = ComponentSet()
    for constraint in model.component_data_objects(ctype=pe.Constraint):
        # skip linear constraint
        if constraint.body.polynomial_degree() == 1:
            continue

        for var in identify_variables(constraint.body, include_fixed=False):
            # Coramin will complain about variables that are fixed
            # Note: Coramin uses an hard-coded 1e-6 tolerance
            if not var.has_lb() or not var.has_ub():
                nonlinear_variables.add(var)
            else:
                if not np.abs(var.ub - var.lb) < 1e-6:
                    nonlinear_variables.add(var)

    relaxed_vars = [
        getattr(relaxed_model, v.name) for v in nonlinear_variables
    ]

    logger.info(0, 'Performing OBBT on {} variables', len(relaxed_vars))

    # Avoid Coramin raising an exception if the problem has no objective
    # value but we set an upper bound.
    objectives = model.component_data_objects(pe.Objective,
                                              active=True,
                                              sort=True,
                                              descend_into=True)
    if len(list(objectives)) == 0:
        upper_bound = None

    time_left = timelimit - seconds_elapsed_since(obbt_start_time)
    with timeout(time_left, 'Timeout in OBBT'):
        result = coramin_obbt.perform_obbt(relaxed_model,
                                           solver,
                                           varlist=relaxed_vars,
                                           objective_bound=upper_bound)

    if result is None:
        return

    logger.debug(0, 'New Bounds')
    for v, new_lb, new_ub in zip(relaxed_vars, *result):
        vv = problem.variable_view(v.name)
        if new_lb is None or new_ub is None:
            logger.warning(0, 'Could not tighten variable {}', v.name)
        old_lb = vv.lower_bound()
        old_ub = vv.upper_bound()
        new_lb = best_lower_bound(vv.domain, new_lb, old_lb)
        new_ub = best_upper_bound(vv.domain, new_ub, old_ub)
        if not new_lb is None and not new_ub is None:
            if is_close(new_lb, new_ub, atol=mc.epsilon):
                if old_lb is not None and \
                        is_close(new_lb, old_lb, atol=mc.epsilon):
                    new_ub = new_lb
                else:
                    new_lb = new_ub
        vv.set_lower_bound(new_lb)
        vv.set_upper_bound(new_ub)
        logger.debug(0, '  {}: [{}, {}]', v.name, vv.lower_bound(),
                     vv.upper_bound())
示例#26
0
    def generate(self, problem, relaxed_problem, mip_solution, tree, node):
        self._cut_round += 1
        self._cut_outer_iteration += 1

        clique_with_rank = self._get_triangle_violations()
        # Remove non-violated constraints and sort by density first and then violation second as in manuscript
        clique_with_rank = [
            clique for clique in clique_with_rank
            if clique[2] >= self._thres_tri_viol
        ]
        clique_with_rank.sort(key=lambda clique: clique[2], reverse=True)

        # Determine number of triangle cuts to add (proportion/absolute with upper & lower thresholds)
        if self._sel_size <= 1:
            nb_cuts = int(np.floor(self._sel_size * len(clique_with_rank)))
        else:
            nb_cuts = int(np.floor(self._sel_size))

        max_tri_cuts = min(max(self._min_tri_cuts, nb_cuts),
                           min(self._max_tri_cuts, len(clique_with_rank)))
        max_tri_cuts = int(max_tri_cuts)

        lb = self._lower_bounds
        ub = self._upper_bounds
        dom = self._domains
        mc = self.galini.mc

        # Add all triangle cuts (ranked by violation) within selection size
        self.logger.debug('Adding {} cuts', max_tri_cuts)

        cuts = []
        for ix, (clique, ineq_type,
                 viol) in enumerate(clique_with_rank[:max_tri_cuts]):
            xi, xj, xk = clique
            # Generate constraints for the 4 different triangle inequality types
            self.logger.debug('Cut {} is of type {}', ix, ineq_type)
            self.logger.debug('d[i] = {}, d[j] = {}, d[k] = {}', dom[xi],
                              dom[xj], dom[xk])
            self.logger.debug('l[i] = {}, l[j] = {}, l[k] = {}', lb[xi],
                              lb[xj], lb[xk])
            self.logger.debug('u[i] = {}, u[j] = {}, u[k] = {}', ub[xi],
                              ub[xj], ub[xk])

            if is_close(dom[xi], 0.0, atol=mc.epsilon):
                self.logger.warning('Skip Cut {}, d[i] is zero', ix)
                continue

            if is_close(dom[xj], 0.0, atol=mc.epsilon):
                self.logger.warning('Skip Cut {}, d[j] is zero', ix)
                continue

            if is_close(dom[xk], 0.0, atol=mc.epsilon):
                self.logger.warning('Skip Cut {}, d[k] is zero', ix)
                continue

            xi_xj = self._aux_vars[id(xi), id(xj)]
            xi_xk = self._aux_vars[id(xi), id(xk)]
            xj_xk = self._aux_vars[id(xj), id(xk)]

            if ineq_type == 0:
                cut_expr = (
                    xi_xj * (-1.0 / dom[xi] / dom[xj]) + xj_xk *
                    (+1.0 / dom[xj] / dom[xk]) + xi_xk *
                    (-1.0 / dom[xk] / dom[xi])
                ) + (
                    xi * (1.0 / dom[xi] + lb[xj] / dom[xi] / dom[xj] +
                          lb[xk] / dom[xi] / dom[xk]) + xj *
                    (lb[xi] / dom[xj] / dom[xi] - lb[xk] / dom[xj] / dom[xk]) +
                    xk *
                    (lb[xi] / dom[xi] / dom[xk] - lb[xj] / dom[xj] / dom[xk])
                ) + (-lb[xi] * lb[xj] / dom[xi] / dom[xj] +
                     -lb[xi] * lb[xk] / dom[xi] / dom[xk] +
                     +lb[xj] * lb[xk] / dom[xj] / dom[xk] + -lb[xi] / dom[xi])
                cuts.append(cut_expr >= 0)
            elif ineq_type == 1:
                cut_expr = (
                    xi_xj * (-1.0 / dom[xi] / dom[xj]) + xj_xk *
                    (-1.0 / dom[xj] / dom[xk]) + xi_xk *
                    (+1.0 / dom[xk] / dom[xi])
                ) + (
                    xi *
                    (lb[xj] / dom[xi] / dom[xj] - lb[xk] / dom[xi] / dom[xk]) +
                    xj * (1.0 / dom[xj] + lb[xi] / dom[xj] / dom[xi] +
                          lb[xk] / dom[xj] / dom[xk]) + xk *
                    (-lb[xi] / dom[xi] / dom[xk] + lb[xj] / dom[xj] / dom[xk])
                ) + (-lb[xi] * lb[xj] / dom[xi] / dom[xj] +
                     +lb[xi] * lb[xk] / dom[xi] / dom[xk] +
                     -lb[xj] * lb[xk] / dom[xj] / dom[xk] + -lb[xj] / dom[xj])
                cuts.append(cut_expr >= 0)
            elif ineq_type == 2:
                cut_expr = (
                    xi_xj * (1.0 / dom[xi] / dom[xj]) + xj_xk *
                    (-1.0 / dom[xj] / dom[xk]) + xi_xk *
                    (-1.0 / dom[xk] / dom[xi])
                ) + (xi *
                     (-lb[xj] / dom[xi] / dom[xj] + lb[xk] / dom[xi] / dom[xk])
                     + xj *
                     (-lb[xi] / dom[xj] / dom[xi] + lb[xk] / dom[xj] / dom[xk])
                     + xk * (1.0 / dom[xk] + lb[xi] / dom[xi] / dom[xk] +
                             lb[xj] / dom[xj] / dom[xk])) + (
                                 +lb[xi] * lb[xj] / dom[xi] / dom[xj] +
                                 -lb[xi] * lb[xk] / dom[xi] / dom[xk] +
                                 -lb[xj] * lb[xk] / dom[xj] / dom[xk] +
                                 -lb[xk] / dom[xk])
                cuts.append(cut_expr >= 0)
            elif ineq_type == 3:
                cut_expr = (xi_xj * (1.0 / dom[xi] / dom[xj]) + xj_xk *
                            (1.0 / dom[xj] / dom[xk]) + xi_xk *
                            (1.0 / dom[xk] / dom[xi])) + (
                                xi *
                                (-1.0 / dom[xi] - lb[xj] / dom[xi] / dom[xj] -
                                 lb[xk] / dom[xi] / dom[xk]) + xj *
                                (-1.0 / dom[xj] - lb[xi] / dom[xj] / dom[xi] -
                                 lb[xk] / dom[xj] / dom[xk]) + xk *
                                (-1.0 / dom[xk] - lb[xi] / dom[xi] / dom[xk] -
                                 lb[xj] / dom[xj] / dom[xk])) + (
                                     lb[xi] * lb[xj] / dom[xi] / dom[xj] +
                                     lb[xi] * lb[xk] / dom[xi] / dom[xk] +
                                     lb[xj] * lb[xk] / dom[xj] / dom[xk] +
                                     lb[xi] / dom[xi] + lb[xj] / dom[xj] +
                                     lb[xk] / dom[xk])
                cuts.append(cut_expr + 1 >= 0)
            else:
                raise RuntimeError(
                    'Invalid inequality type {}'.format(ineq_type))

        return cuts
示例#27
0
    def _bab_loop(self, model, **kwargs):
        known_optimal_objective = kwargs.get('known_optimal_objective', None)
        if known_optimal_objective is not None:
            if not model._objective.is_originally_minimizing:
                known_optimal_objective = -known_optimal_objective

        branching_strategy = self.branching_strategy
        node_selection_strategy = self.node_selection_strategy

        root_node_storage = self.init_node_storage(model)
        tree = BabTree(root_node_storage, branching_strategy,
                       node_selection_strategy)
        self._tree = tree

        self.logger.info('Finding initial feasible solution')

        with self._telemetry.timespan(
                'branch_and_bound.find_initial_solution'):
            initial_solution = self.find_initial_solution(
                model, tree, tree.root)

        prev_elapsed_time = None

        if initial_solution is not None:
            tree.add_initial_solution(initial_solution, self.galini.mc)
            if self.should_terminate(tree.state):
                delta_t, prev_elapsed_time = _compute_delta_t(
                    self.galini, prev_elapsed_time)
                update_at_end_of_iteration(self.galini, tree, delta_t)
                return True

        self.logger.info('Solving root problem')
        with self._telemetry.timespan(
                'branch_and_bound.solve_problem_at_root'):
            root_solution = self.solve_problem_at_root(tree, tree.root)
        tree.update_root(root_solution)

        delta_t, prev_elapsed_time = _compute_delta_t(self.galini,
                                                      prev_elapsed_time)
        update_at_end_of_iteration(self.galini, tree, delta_t)

        self.logger.info('Root problem solved, tree state {}', tree.state)
        self.logger.info('Root problem solved, root solution {}',
                         root_solution)
        self.logger.log_add_bab_node(
            coordinate=[0],
            lower_bound=root_solution.lower_bound,
            upper_bound=root_solution.upper_bound,
        )

        if not root_solution.lower_bound_success:
            if not root_solution.upper_bound_success:
                return False

        mc = self.galini.mc

        while not self.should_terminate(tree.state):
            self.logger.info('Tree state at beginning of iteration: {}',
                             tree.state)
            if not tree.has_nodes():
                self.logger.info('No more nodes to visit.')
                break

            current_node = tree.next_node()
            if current_node.parent is None:
                # This is the root node.
                node_children, branching_point = tree.branch_at_node(
                    current_node, mc)
                self.logger.info('Branched at point {}', branching_point)
                continue

            self.logger.info(
                'Visiting node {}: parent state={}',
                current_node.coordinate,
                current_node.parent.state,
            )

            node_can_not_improve_solution = is_close(
                current_node.parent.lower_bound,
                tree.upper_bound,
                atol=self.bab_config['absolute_gap'],
                rtol=self.bab_config['relative_gap'],
            ) or current_node.parent.lower_bound > tree.upper_bound

            if node_can_not_improve_solution:
                self.logger.info(
                    "Fathom node because it won't improve bound: node.lower_bound={}, tree.upper_bound={}",
                    current_node.parent.lower_bound,
                    tree.upper_bound,
                )
                self.logger.log_prune_bab_node(current_node.coordinate)
                tree.fathom_node(current_node, update_nodes_visited=True)

                delta_t, prev_elapsed_time = _compute_delta_t(
                    self.galini, prev_elapsed_time)
                update_at_end_of_iteration(self.galini, tree, delta_t)

                continue

            with self._telemetry.timespan(
                    'branch_and_bound.solve_problem_at_node'):
                solution = self.solve_problem_at_node(tree, current_node)
            assert solution is not None

            tree.update_node(current_node, solution)
            self.logger.info('Node {} solution: {}', current_node.coordinate,
                             solution)
            self.logger.log_add_bab_node(
                coordinate=current_node.coordinate,
                lower_bound=solution.lower_bound,
                upper_bound=solution.upper_bound,
            )
            current_node_converged = is_close(
                solution.lower_bound,
                tree.upper_bound,
                atol=self.bab_config['absolute_gap'],
                rtol=self.bab_config['relative_gap'],
            ) or solution.lower_bound > tree.upper_bound

            node_relaxation_is_feasible_or_unbounded = (
                solution.lower_bound_solution is not None
                and (solution.lower_bound_solution.status.is_success()
                     or solution.lower_bound_solution.status.is_unbounded()))

            if not current_node_converged and node_relaxation_is_feasible_or_unbounded:
                node_children, branching_point = tree.branch_at_node(
                    current_node, mc)
                self.logger.info('Branched at point {}', branching_point)
            else:
                # We won't explore this part of the tree anymore.
                # Add to fathomed nodes.
                self.logger.info(
                    'Fathom node {}, lower_bound_solution: {}, tree upper bound: {}',
                    current_node.coordinate, solution.lower_bound,
                    tree.upper_bound)
                self.logger.log_prune_bab_node(current_node.coordinate)
                tree.fathom_node(current_node, update_nodes_visited=False)

            self.logger.info('New tree state at {}: {}',
                             current_node.coordinate, tree.state)
            self.logger.update_variable('z_l', tree.nodes_visited,
                                        tree.lower_bound)
            self.logger.update_variable('z_u', tree.nodes_visited,
                                        tree.upper_bound)
            self.logger.info(
                'Child {} has solutions: LB={} UB={}',
                current_node.coordinate,
                solution.lower_bound_solution,
                solution.upper_bound_solution,
            )

            delta_t, prev_elapsed_time = _compute_delta_t(
                self.galini, prev_elapsed_time)
            update_at_end_of_iteration(self.galini, tree, delta_t)

        self.logger.info('Branch & Bound Finished: {}', tree.state)
        self.logger.info('Branch & Bound Converged?: {}',
                         self.has_converged(tree.state))
        self.logger.info('Branch & Bound Timeout?: {}',
                         self.galini.timelimit.timeout())
        self.logger.info('Branch & Bound Node Limit Exceeded?: {}',
                         self.node_limit_exceeded(tree.state))

        return True