コード例 #1
0
ファイル: master_initialize.py プロジェクト: pazochoa/pyomo
def init_fixed_disjuncts(solve_data, config):
    """Initialize by solving the problem with the current disjunct values."""
    # TODO error checking to make sure that the user gave proper disjuncts

    # fix the disjuncts in the linear GDP and send for solution.
    solve_data.mip_iteration += 1
    linear_GDP = solve_data.linear_GDP.clone()
    config.logger.info(
        "Generating initial linear GDP approximation by "
        "solving subproblem with original user-specified disjunct values.")
    TransformationFactory('gdp.fix_disjuncts').apply_to(linear_GDP)
    mip_result = solve_linear_GDP(linear_GDP, solve_data, config)
    if mip_result:
        _, mip_var_values = mip_result
        # use the mip_var_values to create the NLP subproblem
        nlp_model = solve_data.working_model.clone()
        # copy in the discrete variable values
        copy_and_fix_mip_values_to_nlp(nlp_model.GDPopt_utils.working_var_list,
                                       mip_var_values, config)
        TransformationFactory('gdp.fix_disjuncts').apply_to(nlp_model)
        solve_data.nlp_iteration += 1
        nlp_result = solve_NLP(nlp_model, solve_data, config)
        nlp_feasible, nlp_var_values, nlp_duals = nlp_result
        if nlp_feasible:
            update_nlp_progress_indicators(nlp_model, solve_data, config)
            add_outer_approximation_cuts(nlp_var_values, nlp_duals, solve_data,
                                         config)
        add_integer_cut(mip_var_values,
                        solve_data,
                        config,
                        feasible=nlp_feasible)
    else:
        config.logger.error('Linear GDP infeasible for initial user-specified '
                            'disjunct values. '
                            'Skipping initialization.')
コード例 #2
0
def init_custom_disjuncts(solve_data, config):
    """Initialize by using user-specified custom disjuncts."""
    # TODO error checking to make sure that the user gave proper disjuncts
    for active_disjunct_set in config.custom_init_disjuncts:
        # custom_init_disjuncts contains a list of sets, giving the disjuncts
        # active at each initialization iteration

        # fix the disjuncts in the linear GDP and send for solution.
        solve_data.mip_iteration += 1
        linear_GDP = solve_data.linear_GDP.clone()
        config.logger.info(
            "Generating initial linear GDP approximation by "
            "solving subproblems with user-specified active disjuncts.")
        for orig_disj, clone_disj in zip(
                solve_data.original_model.GDPopt_utils.disjunct_list,
                linear_GDP.GDPopt_utils.disjunct_list):
            if orig_disj in active_disjunct_set:
                clone_disj.indicator_var.fix(True)
        mip_result = solve_linear_GDP(linear_GDP, solve_data, config)
        if mip_result.feasible:
            nlp_result = solve_disjunctive_subproblem(mip_result, solve_data,
                                                      config)
            if nlp_result.feasible:
                add_subproblem_cuts(nlp_result, solve_data, config)
            add_integer_cut(mip_result.var_values,
                            solve_data.linear_GDP,
                            solve_data,
                            config,
                            feasible=nlp_result.feasible)
        else:
            config.logger.error('Linear GDP infeasible for user-specified '
                                'custom initialization disjunct set %s. '
                                'Skipping that set and continuing on.' %
                                list(disj.name
                                     for disj in active_disjunct_set))
コード例 #3
0
def init_fixed_disjuncts(solve_data, config):
    """Initialize by solving the problem with the current disjunct values."""
    # TODO error checking to make sure that the user gave proper disjuncts

    # fix the disjuncts in the linear GDP and send for solution.
    solve_data.mip_iteration += 1
    config.logger.info(
        "Generating initial linear GDP approximation by "
        "solving subproblem with original user-specified disjunct values.")
    linear_GDP = solve_data.linear_GDP.clone()
    TransformationFactory('gdp.fix_disjuncts').apply_to(linear_GDP)
    mip_result = solve_linear_GDP(linear_GDP, solve_data, config)
    if mip_result.feasible:
        nlp_result = solve_disjunctive_subproblem(mip_result, solve_data,
                                                  config)
        if nlp_result.feasible:
            add_subproblem_cuts(nlp_result, solve_data, config)
        add_integer_cut(mip_result.var_values,
                        solve_data.linear_GDP,
                        solve_data,
                        config,
                        feasible=nlp_result.feasible)
    else:
        config.logger.error('Linear GDP infeasible for initial user-specified '
                            'disjunct values. '
                            'Skipping initialization.')
コード例 #4
0
ファイル: master_initialize.py プロジェクト: Pyomo/pyomo
def init_custom_disjuncts(solve_data, config):
    """Initialize by using user-specified custom disjuncts."""
    # TODO error checking to make sure that the user gave proper disjuncts
    for active_disjunct_set in config.custom_init_disjuncts:
        # custom_init_disjuncts contains a list of sets, giving the disjuncts
        # active at each initialization iteration

        # fix the disjuncts in the linear GDP and send for solution.
        solve_data.mip_iteration += 1
        linear_GDP = solve_data.linear_GDP.clone()
        config.logger.info(
            "Generating initial linear GDP approximation by "
            "solving subproblems with user-specified active disjuncts.")
        for orig_disj, clone_disj in zip(
                solve_data.original_model.GDPopt_utils.disjunct_list,
                linear_GDP.GDPopt_utils.disjunct_list
        ):
            if orig_disj in active_disjunct_set:
                clone_disj.indicator_var.fix(1)
        mip_result = solve_linear_GDP(linear_GDP, solve_data, config)
        if mip_result.feasible:
            nlp_result = solve_disjunctive_subproblem(mip_result, solve_data, config)
            if nlp_result.feasible:
                add_subproblem_cuts(nlp_result, solve_data, config)
            add_integer_cut(
                mip_result.var_values, solve_data.linear_GDP, solve_data,
                config, feasible=nlp_result.feasible)
        else:
            config.logger.error(
                'Linear GDP infeasible for user-specified '
                'custom initialization disjunct set %s. '
                'Skipping that set and continuing on.'
                % list(disj.name for disj in active_disjunct_set))
コード例 #5
0
ファイル: master_initialize.py プロジェクト: CanLi1/pyomo-1
def init_max_binaries(solve_data, config):
    """Initialize by maximizing binary variables and disjuncts.

    This function activates as many binary variables and disjucts as
    feasible.

    """
    solve_data.mip_iteration += 1
    linear_GDP = solve_data.linear_GDP.clone()
    config.logger.info(
        "Generating initial linear GDP approximation by "
        "solving a subproblem that maximizes "
        "the sum of all binary and logical variables.")
    # Set up binary maximization objective
    next(linear_GDP.component_data_objects(Objective, active=True)).deactivate()
    binary_vars = (
        v for v in linear_GDP.component_data_objects(
        ctype=Var, descend_into=(Block, Disjunct))
        if v.is_binary() and not v.fixed)
    linear_GDP.GDPopt_utils.max_binary_obj = Objective(
        expr=sum(binary_vars), sense=maximize)

    # Solve
    mip_results = solve_linear_GDP(linear_GDP, solve_data, config)
    if mip_results.feasible:
        nlp_result = solve_disjunctive_subproblem(mip_results, solve_data, config)
        if nlp_result.feasible:
            add_subproblem_cuts(nlp_result, solve_data, config)
        add_integer_cut(mip_results.var_values, solve_data.linear_GDP, solve_data, config,
                        feasible=nlp_result.feasible)
    else:
        config.logger.info(
            "Linear relaxation for initialization was infeasible. "
            "Problem is infeasible.")
        return False
コード例 #6
0
def GDPopt_iteration_loop(solve_data, config):
    """Algorithm main loop.

    Returns True if successful convergence is obtained. False otherwise.

    """
    while solve_data.master_iteration < config.iterlim:
        # Set iteration counters for new master iteration.
        solve_data.master_iteration += 1
        solve_data.mip_iteration = 0
        solve_data.nlp_iteration = 0

        # print line for visual display
        config.logger.info(
            '---GDPopt Master Iteration %s---'
            % solve_data.master_iteration)

        # solve linear master problem
        with time_code(solve_data.timing, 'mip'):
            mip_result = solve_LOA_master(solve_data, config)

        # Check termination conditions
        if algorithm_should_terminate(solve_data, config):
            break

        # Solve NLP subproblem
        if solve_data.active_strategy == 'LOA':
            with time_code(solve_data.timing, 'nlp'):
                nlp_result = solve_local_subproblem(mip_result, solve_data, config)
            if nlp_result.feasible:
                add_outer_approximation_cuts(nlp_result, solve_data, config)
        elif solve_data.active_strategy == 'GLOA':
            with time_code(solve_data.timing, 'nlp'):
                nlp_result = solve_global_subproblem(mip_result, solve_data, config)
            if nlp_result.feasible:
                add_affine_cuts(nlp_result, solve_data, config)
        elif solve_data.active_strategy == 'RIC':
            with time_code(solve_data.timing, 'nlp'):
                nlp_result = solve_local_subproblem(mip_result, solve_data, config)
        else:
            raise ValueError('Unrecognized strategy: ' + solve_data.active_strategy)

        # Add integer cut
        add_integer_cut(
            mip_result.var_values, solve_data.linear_GDP, solve_data, config,
            feasible=nlp_result.feasible)

        # Check termination conditions
        if algorithm_should_terminate(solve_data, config):
            break
コード例 #7
0
ファイル: master_initialize.py プロジェクト: pazochoa/pyomo
def init_max_binaries(solve_data, config):
    """Initialize by maximizing binary variables and disjuncts.

    This function activates as many binary variables and disjucts as
    feasible.

    """
    solve_data.mip_iteration += 1
    linear_GDP = solve_data.linear_GDP.clone()
    config.logger.info("Generating initial linear GDP approximation by "
                       "solving a subproblem that maximizes "
                       "the sum of all binary and logical variables.")
    # Set up binary maximization objective
    linear_GDP.GDPopt_utils.objective.deactivate()
    binary_vars = (v for v in linear_GDP.component_data_objects(
        ctype=Var, descend_into=(Block, Disjunct))
                   if v.is_binary() and not v.fixed)
    linear_GDP.GDPopt_utils.max_binary_obj = Objective(expr=sum(binary_vars),
                                                       sense=maximize)

    # Solve
    mip_results = solve_linear_GDP(linear_GDP, solve_data, config)
    if mip_results:
        _, mip_var_values = mip_results
        # use the mip_var_values to create the NLP subproblem
        nlp_model = solve_data.working_model.clone()
        # copy in the discrete variable values
        copy_and_fix_mip_values_to_nlp(nlp_model.GDPopt_utils.working_var_list,
                                       mip_var_values, config)
        TransformationFactory('gdp.fix_disjuncts').apply_to(nlp_model)
        solve_data.nlp_iteration += 1
        nlp_result = solve_NLP(nlp_model, solve_data, config)
        nlp_feasible, nlp_var_values, nlp_duals = nlp_result
        if nlp_feasible:
            update_nlp_progress_indicators(nlp_model, solve_data, config)
            add_outer_approximation_cuts(nlp_var_values, nlp_duals, solve_data,
                                         config)
        add_integer_cut(mip_var_values,
                        solve_data,
                        config,
                        feasible=nlp_feasible)
    else:
        config.logger.info(
            "Linear relaxation for initialization was infeasible. "
            "Problem is infeasible.")
        return False
コード例 #8
0
def GDPopt_iteration_loop(solve_data, config):
    """Algorithm main loop.

    Returns True if successful convergence is obtained. False otherwise.

    """
    while solve_data.master_iteration < config.iterlim:
        # Set iteration counters for new master iteration.
        solve_data.master_iteration += 1
        solve_data.mip_iteration = 0
        solve_data.nlp_iteration = 0

        # print line for visual display
        config.logger.info('---GDPopt Master Iteration %s---' %
                           solve_data.master_iteration)

        # solve linear master problem
        if solve_data.current_strategy == 'LOA':
            mip_result = solve_LOA_master(solve_data, config)
        elif solve_data.current_strategy == 'GLOA':
            mip_result = solve_GLOA_master(solve_data, config)

        # Check termination conditions
        if algorithm_should_terminate(solve_data, config):
            break

        # Solve NLP subproblem
        if solve_data.current_strategy == 'LOA':
            nlp_result = solve_LOA_subproblem(mip_result.var_values,
                                              solve_data, config)
            if nlp_result.feasible:
                add_outer_approximation_cuts(nlp_result, solve_data, config)
        elif solve_data.current_strategy == 'GLOA':
            nlp_result = solve_global_NLP(mip_result.var_values, solve_data,
                                          config)
            # TODO add affine cuts

        # Add integer cut
        add_integer_cut(mip_result.var_values,
                        solve_data,
                        config,
                        feasible=nlp_result.feasible)

        # Check termination conditions
        if algorithm_should_terminate(solve_data, config):
            break
コード例 #9
0
ファイル: master_initialize.py プロジェクト: pazochoa/pyomo
def init_custom_disjuncts(solve_data, config):
    """Initialize by using user-specified custom disjuncts."""
    # TODO error checking to make sure that the user gave proper disjuncts
    for active_disjunct_set in config.custom_init_disjuncts:
        # custom_init_disjuncts contains a list of sets, giving the disjuncts
        # active at each initialization iteration

        # fix the disjuncts in the linear GDP and send for solution.
        solve_data.mip_iteration += 1
        linear_GDP = solve_data.linear_GDP.clone()
        config.logger.info(
            "Generating initial linear GDP approximation by "
            "solving subproblems with user-specified active disjuncts.")
        for orig_disj, clone_disj in zip(
                solve_data.original_model.GDPopt_utils.orig_disjuncts_list,
                linear_GDP.GDPopt_utils.orig_disjuncts_list):
            if orig_disj in active_disjunct_set:
                clone_disj.indicator_var.fix(1)
        mip_result = solve_linear_GDP(linear_GDP, solve_data, config)
        if mip_result:
            _, mip_var_values = mip_result
            # use the mip_var_values to create the NLP subproblem
            nlp_model = solve_data.working_model.clone()
            # copy in the discrete variable values
            copy_and_fix_mip_values_to_nlp(
                nlp_model.GDPopt_utils.working_var_list, mip_var_values,
                config)
            TransformationFactory('gdp.fix_disjuncts').apply_to(nlp_model)
            solve_data.nlp_iteration += 1
            nlp_result = solve_NLP(nlp_model, solve_data, config)
            nlp_feasible, nlp_var_values, nlp_duals = nlp_result
            if nlp_feasible:
                update_nlp_progress_indicators(nlp_model, solve_data, config)
                add_outer_approximation_cuts(nlp_var_values, nlp_duals,
                                             solve_data, config)
            add_integer_cut(mip_var_values,
                            solve_data,
                            config,
                            feasible=nlp_feasible)
        else:
            config.logger.error('Linear GDP infeasible for user-specified '
                                'custom initialization disjunct set %s. '
                                'Skipping that set and continuing on.' %
                                list(disj.name
                                     for disj in active_disjunct_set))
コード例 #10
0
ファイル: iterate.py プロジェクト: mskarha/pyomo
def GDPopt_iteration_loop(solve_data, config):
    """Algorithm main loop.

    Returns True if successful convergence is obtained. False otherwise.

    """
    while solve_data.master_iteration < config.iterlim:
        # Set iteration counters for new master iteration.
        solve_data.master_iteration += 1
        solve_data.mip_iteration = 0
        solve_data.nlp_iteration = 0

        # print line for visual display
        config.logger.info(
            '---GDPopt Master Iteration %s---'
            % solve_data.master_iteration)

        # solve linear master problem
        with time_code(solve_data.timing, 'mip'):
            mip_result = solve_LOA_master(solve_data, config)

        # Check termination conditions
        if algorithm_should_terminate(solve_data, config):
            break

        # Solve NLP subproblem
        if solve_data.current_strategy == 'LOA':
            with time_code(solve_data.timing, 'nlp'):
                nlp_result = solve_local_subproblem(mip_result, solve_data, config)
            if nlp_result.feasible:
                add_outer_approximation_cuts(nlp_result, solve_data, config)
        elif solve_data.current_strategy == 'GLOA':
            with time_code(solve_data.timing, 'nlp'):
                nlp_result = solve_global_subproblem(mip_result, solve_data, config)
            if nlp_result.feasible:
                add_affine_cuts(nlp_result, solve_data, config)

        # Add integer cut
        add_integer_cut(
            mip_result.var_values, solve_data.linear_GDP, solve_data, config,
            feasible=nlp_result.feasible)

        # Check termination conditions
        if algorithm_should_terminate(solve_data, config):
            break
コード例 #11
0
ファイル: master_initialize.py プロジェクト: Pyomo/pyomo
def init_fixed_disjuncts(solve_data, config):
    """Initialize by solving the problem with the current disjunct values."""
    # TODO error checking to make sure that the user gave proper disjuncts

    # fix the disjuncts in the linear GDP and send for solution.
    solve_data.mip_iteration += 1
    config.logger.info(
        "Generating initial linear GDP approximation by "
        "solving subproblem with original user-specified disjunct values.")
    linear_GDP = solve_data.linear_GDP.clone()
    TransformationFactory('gdp.fix_disjuncts').apply_to(linear_GDP)
    mip_result = solve_linear_GDP(linear_GDP, solve_data, config)
    if mip_result.feasible:
        nlp_result = solve_disjunctive_subproblem(mip_result, solve_data, config)
        if nlp_result.feasible:
            add_subproblem_cuts(nlp_result, solve_data, config)
        add_integer_cut(
            mip_result.var_values, solve_data.linear_GDP, solve_data, config,
            feasible=nlp_result.feasible)
    else:
        config.logger.error(
            'Linear GDP infeasible for initial user-specified '
            'disjunct values. '
            'Skipping initialization.')
コード例 #12
0
ファイル: master_initialize.py プロジェクト: pazochoa/pyomo
def init_set_covering(solve_data, config):
    """Initialize by solving problems to cover the set of all disjuncts.

    The purpose of this initialization is to generate linearizations
    corresponding to each of the disjuncts.

    This work is based upon prototyping work done by Eloy Fernandez at
    Carnegie Mellon University.

    """
    config.logger.info(
        "Generating initial linear GDP approximation by solving subproblems "
        "to cover all nonlinear disjuncts.")
    disjunct_needs_cover = list(
        any(constr.body.polynomial_degree() not in (0, 1)
            for constr in disj.component_data_objects(
                ctype=Constraint, active=True, descend_into=True)) for disj in
        solve_data.working_model.GDPopt_utils.working_disjuncts_list)
    iter_count = 1
    while (any(disjunct_needs_cover)
           and iter_count <= config.set_cover_iterlim):
        solve_data.mip_iteration += 1
        linear_GDP = solve_data.linear_GDP.clone()
        linear_GDP.GDPopt_utils.no_backtracking.activate()
        # Solve set covering MIP
        mip_results = solve_set_cover_MIP(linear_GDP, disjunct_needs_cover,
                                          solve_data, config)
        if not mip_results:
            # problem is infeasible. break
            return False
        # solve local NLP
        _, mip_var_values, mip_disjunct_values = mip_results
        nlp_model = solve_data.working_model.clone()
        copy_and_fix_mip_values_to_nlp(nlp_model.GDPopt_utils.working_var_list,
                                       mip_var_values, config)
        TransformationFactory('gdp.fix_disjuncts').apply_to(nlp_model)
        solve_data.nlp_iteration += 1
        nlp_result = solve_NLP(nlp_model, solve_data, config)
        nlp_feasible, nlp_var_values, nlp_duals = nlp_result
        if nlp_feasible:
            # if successful, updated sets
            active_disjuncts = list(
                fabs(val - 1) <= config.integer_tolerance
                for val in mip_disjunct_values)
            disjunct_needs_cover = list((needed_cover and not was_active) for (
                needed_cover,
                was_active) in zip(disjunct_needs_cover, active_disjuncts))
            update_nlp_progress_indicators(nlp_model, solve_data, config)
            add_outer_approximation_cuts(nlp_var_values, nlp_duals, solve_data,
                                         config)
        add_integer_cut(mip_var_values,
                        solve_data,
                        config,
                        feasible=nlp_feasible)

        iter_count += 1

    if any(disjunct_needs_cover):
        # Iteration limit was hit without a full covering of all nonlinear
        # disjuncts
        config.logger.warning(
            'Iteration limit reached for set covering initialization '
            'without covering all disjuncts.')
        return False
    return True
コード例 #13
0
def init_set_covering(solve_data, config):
    """Initialize by solving problems to cover the set of all disjuncts.

    The purpose of this initialization is to generate linearizations
    corresponding to each of the disjuncts.

    This work is based upon prototyping work done by Eloy Fernandez at
    Carnegie Mellon University.

    """
    config.logger.info("Starting set covering initialization.")
    # List of True/False if the corresponding disjunct in
    # disjunct_list still needs to be covered by the initialization
    disjunct_needs_cover = list(
        any(constr.body.polynomial_degree() not in (0, 1)
            for constr in disj.component_data_objects(
                ctype=Constraint, active=True, descend_into=True))
        for disj in solve_data.working_model.GDPopt_utils.disjunct_list)
    # Set up set covering mip
    set_cover_mip = solve_data.linear_GDP.clone()
    # Deactivate nonlinear constraints
    for obj in set_cover_mip.component_data_objects(Objective, active=True):
        obj.deactivate()
    iter_count = 1
    while (any(disjunct_needs_cover)
           and iter_count <= config.set_cover_iterlim):
        config.logger.info("%s disjuncts need to be covered." %
                           disjunct_needs_cover.count(True))
        # Solve set covering MIP
        mip_result = solve_set_cover_mip(set_cover_mip, disjunct_needs_cover,
                                         solve_data, config)
        if not mip_result.feasible:
            # problem is infeasible. break
            return False
        # solve local NLP
        subprob_result = solve_disjunctive_subproblem(mip_result, solve_data,
                                                      config)
        if subprob_result.feasible:
            # if successful, updated sets
            active_disjuncts = list(
                fabs(val - 1) <= config.integer_tolerance
                for val in mip_result.disjunct_values)
            # Update the disjunct needs cover list
            disjunct_needs_cover = list((needed_cover and not was_active) for (
                needed_cover,
                was_active) in zip(disjunct_needs_cover, active_disjuncts))
            add_subproblem_cuts(subprob_result, solve_data, config)
        add_integer_cut(mip_result.var_values,
                        solve_data.linear_GDP,
                        solve_data,
                        config,
                        feasible=subprob_result.feasible)
        add_integer_cut(mip_result.var_values,
                        set_cover_mip,
                        solve_data,
                        config,
                        feasible=subprob_result.feasible)

        iter_count += 1

    if any(disjunct_needs_cover):
        # Iteration limit was hit without a full covering of all nonlinear
        # disjuncts
        config.logger.warning(
            'Iteration limit reached for set covering initialization '
            'without covering all disjuncts.')
        return False

    config.logger.info("Initialization complete.")
    return True
コード例 #14
0
ファイル: master_initialize.py プロジェクト: Pyomo/pyomo
def init_set_covering(solve_data, config):
    """Initialize by solving problems to cover the set of all disjuncts.

    The purpose of this initialization is to generate linearizations
    corresponding to each of the disjuncts.

    This work is based upon prototyping work done by Eloy Fernandez at
    Carnegie Mellon University.

    """
    config.logger.info("Starting set covering initialization.")
    # List of True/False if the corresponding disjunct in
    # disjunct_list still needs to be covered by the initialization
    disjunct_needs_cover = list(
        any(constr.body.polynomial_degree() not in (0, 1)
            for constr in disj.component_data_objects(
            ctype=Constraint, active=True, descend_into=True))
        for disj in solve_data.working_model.GDPopt_utils.disjunct_list)
    # Set up set covering mip
    set_cover_mip = solve_data.linear_GDP.clone()
    # Deactivate nonlinear constraints
    for obj in set_cover_mip.component_data_objects(Objective, active=True):
        obj.deactivate()
    iter_count = 1
    while (any(disjunct_needs_cover) and
           iter_count <= config.set_cover_iterlim):
        config.logger.info(
            "%s disjuncts need to be covered." %
            disjunct_needs_cover.count(True)
        )
        # Solve set covering MIP
        mip_result = solve_set_cover_mip(
            set_cover_mip, disjunct_needs_cover, solve_data, config)
        if not mip_result.feasible:
            # problem is infeasible. break
            return False
        # solve local NLP
        subprob_result = solve_disjunctive_subproblem(mip_result, solve_data, config)
        if subprob_result.feasible:
            # if successful, updated sets
            active_disjuncts = list(
                fabs(val - 1) <= config.integer_tolerance
                for val in mip_result.disjunct_values)
            # Update the disjunct needs cover list
            disjunct_needs_cover = list(
                (needed_cover and not was_active)
                for (needed_cover, was_active) in zip(disjunct_needs_cover,
                                                      active_disjuncts))
            add_subproblem_cuts(subprob_result, solve_data, config)
        add_integer_cut(
            mip_result.var_values, solve_data.linear_GDP, solve_data, config,
            feasible=subprob_result.feasible)
        add_integer_cut(
            mip_result.var_values, set_cover_mip, solve_data, config,
            feasible=subprob_result.feasible)

        iter_count += 1

    if any(disjunct_needs_cover):
        # Iteration limit was hit without a full covering of all nonlinear
        # disjuncts
        config.logger.warning(
            'Iteration limit reached for set covering initialization '
            'without covering all disjuncts.')
        return False

    config.logger.info("Initialization complete.")
    return True