예제 #1
0
파일: commands.py 프로젝트: Pyomo/pyomo
def results_schema():
    if len(sys.argv) > 1:
        print("results_schema  - Print the predefined schema in a SolverResults object")
    options = Options(schema=True)
    r=SolverResults()
    repn = r._repn_(options)
    r.pprint(sys.stdout, options, repn=repn)
예제 #2
0
def results_schema():
    if len(sys.argv) > 1:
        print(
            "results_schema  - Print the predefined schema in a SolverResults object"
        )
    options = Options(schema=True)
    r = SolverResults()
    repn = r._repn_(options)
    r.pprint(sys.stdout, options, repn=repn)
예제 #3
0
파일: res.py 프로젝트: wannasmile/pyomo
 def __call__(self, filename, res=None, soln=None, suffixes=[]):
     """
     Parse a *.results file
     """
     if res is None:
         res = SolverResults()
     #
     res.read(filename, using_yaml=False)
     return res
예제 #4
0
파일: res.py 프로젝트: Juanlu001/pyomo
 def __call__(self, filename, res=None, soln=None, suffixes=[]):
     """
     Parse a *.results file
     """
     if res is None:
         res = SolverResults()
     #
     res.read(filename, using_yaml=False)
     return res
예제 #5
0
    def subproblem_solve(gdp, config):
        subproblem = gdp.clone()
        TransformationFactory('gdp.bigm').apply_to(subproblem)
        main_obj = next(
            subproblem.component_data_objects(Objective, active=True))
        obj_sign = 1 if main_obj.sense == minimize else -1

        try:
            result = SolverFactory(config.solver).solve(
                subproblem, **config.solver_args)
        except RuntimeError as e:
            config.logger.warning(
                "Solver encountered RuntimeError. Treating as infeasible. "
                "Msg: %s\n%s" % (str(e), traceback.format_exc()))
            var_values = [
                v.value for v in subproblem.GDPbb_utils.variable_list
            ]
            return obj_sign * float('inf'), SolverResults(), var_values

        var_values = [v.value for v in subproblem.GDPbb_utils.variable_list]
        term_cond = result.solver.termination_condition
        if result.solver.status is SolverStatus.ok and any(
                term_cond == valid_cond
                for valid_cond in (tc.optimal, tc.locallyOptimal,
                                   tc.feasible)):
            return value(main_obj.expr), result, var_values
        elif term_cond == tc.unbounded:
            return obj_sign * float('-inf'), result, var_values
        elif term_cond == tc.infeasible:
            return obj_sign * float('inf'), result, var_values
        else:
            config.logger.warning("Unknown termination condition of %s" %
                                  term_cond)
            return obj_sign * float('inf'), result, var_values
예제 #6
0
def setup_solver_environment(model, config):
    solve_data = GDPoptSolveData()  # data object for storing solver state
    solve_data.config = config
    solve_data.results = SolverResults()
    solve_data.timing = Container()
    min_logging_level = logging.INFO if config.tee else None
    with time_code(solve_data.timing, 'total', is_main_timer=True), \
            lower_logger_level_to(config.logger, min_logging_level), \
            create_utility_block(model, 'GDPopt_utils', solve_data):

        # Create a working copy of the original model
        solve_data.original_model = model
        solve_data.working_model = model.clone()
        setup_results_object(solve_data, config)
        solve_data.active_strategy = config.strategy
        util_block = solve_data.working_model.GDPopt_utils

        # Save model initial values.
        # These can be used later to initialize NLP subproblems.
        solve_data.initial_var_values = list(v.value
                                             for v in util_block.variable_list)
        solve_data.best_solution_found = None

        # Integer cuts exclude particular discrete decisions
        util_block.integer_cuts = ConstraintList(doc='integer cuts')

        # Set up iteration counters
        solve_data.master_iteration = 0
        solve_data.mip_iteration = 0
        solve_data.nlp_iteration = 0

        # set up bounds
        solve_data.LB = float('-inf')
        solve_data.UB = float('inf')
        solve_data.iteration_log = {}

        # Flag indicating whether the solution improved in the past
        # iteration or not
        solve_data.feasible_solution_improved = False

        yield solve_data  # yield setup solver environment

        if (solve_data.best_solution_found is not None
                and solve_data.best_solution_found
                is not solve_data.original_model):
            # Update values on the original model
            copy_var_list_values(
                from_list=solve_data.best_solution_found.GDPopt_utils.
                variable_list,
                to_list=solve_data.original_model.GDPopt_utils.variable_list,
                config=config)

    # Finalize results object
    solve_data.results.problem.lower_bound = solve_data.LB
    solve_data.results.problem.upper_bound = solve_data.UB
    solve_data.results.solver.iterations = solve_data.master_iteration
    solve_data.results.solver.timing = solve_data.timing
    solve_data.results.solver.user_time = solve_data.timing.total
    solve_data.results.solver.wallclock_time = solve_data.timing.total
예제 #7
0
파일: nlp_solve.py 프로젝트: CanLi1/pyomo-1
def get_infeasible_result_object(model, message=""):
    infeas_result = SubproblemResult()
    infeas_result.feasible = False
    infeas_result.var_values = list(v.value for v in model.GDPopt_utils.variable_list)
    infeas_result.pyomo_results = SolverResults()
    infeas_result.pyomo_results.solver.termination_condition = tc.infeasible
    infeas_result.pyomo_results.message = message
    infeas_result.dual_values = list(None for _ in model.GDPopt_utils.constraint_list)
    return infeas_result
예제 #8
0
파일: util.py 프로젝트: jsiirola/pyomo
def setup_solve_data(model, config):
    solve_data = MindtPySolveData()
    solve_data.results = SolverResults()
    solve_data.timing = Bunch()
    solve_data.curr_int_sol = []
    solve_data.should_terminate = False
    solve_data.integer_list = []

    # if the objective function is a constant, dual bound constraint is not added.
    obj = next(model.component_data_objects(ctype=Objective, active=True))
    if obj.expr.polynomial_degree() == 0:
        config.use_dual_bound = False

    if config.use_fbbt:
        fbbt(model)
        # TODO: logging_level is not logging.INFO here
        config.logger.info('Use the fbbt to tighten the bounds of variables')

    solve_data.original_model = model
    solve_data.working_model = model.clone()

    # Set up iteration counters
    solve_data.nlp_iter = 0
    solve_data.mip_iter = 0
    solve_data.mip_subiter = 0
    solve_data.nlp_infeasible_counter = 0
    if config.init_strategy == 'FP':
        solve_data.fp_iter = 1

    # set up bounds
    solve_data.LB = float('-inf')
    solve_data.UB = float('inf')
    solve_data.LB_progress = [solve_data.LB]
    solve_data.UB_progress = [solve_data.UB]
    if config.single_tree and (config.add_no_good_cuts
                               or config.use_tabu_list):
        solve_data.stored_bound = {}
    if config.strategy == 'GOA' and (config.add_no_good_cuts
                                     or config.use_tabu_list):
        solve_data.num_no_good_cuts_added = {}

    # Flag indicating whether the solution improved in the past
    # iteration or not
    solve_data.solution_improved = False
    solve_data.bound_improved = False

    if config.nlp_solver == 'ipopt':
        if not hasattr(solve_data.working_model, 'ipopt_zL_out'):
            solve_data.working_model.ipopt_zL_out = Suffix(
                direction=Suffix.IMPORT)
        if not hasattr(solve_data.working_model, 'ipopt_zU_out'):
            solve_data.working_model.ipopt_zU_out = Suffix(
                direction=Suffix.IMPORT)

    return solve_data
예제 #9
0
    def process_logfile(self):
        """
        Process logfile
        """
        results = SolverResults()

        # For the lazy programmer, handle long variable names
        prob = results.problem
        solv = results.solver
        solv.termination_condition = TerminationCondition.unknown
        stats = results.solver.statistics
        bbound = stats.branch_and_bound

        prob.upper_bound = float('inf')
        prob.lower_bound = float('-inf')
        bbound.number_of_created_subproblems = 0
        bbound.number_of_bounded_subproblems = 0

        with open(self._log_file, 'r') as output:
            for line in output:
                toks = line.split()
                if 'tree is empty' in line:
                    bbound.number_of_created_subproblems = toks[-1][:-1]
                    bbound.number_of_bounded_subproblems = toks[-1][:-1]
                elif len(toks) == 2 and toks[0] == "sys":
                    solv.system_time = toks[1]
                elif len(toks) == 2 and toks[0] == "user":
                    solv.user_time = toks[1]
                elif len(toks) > 2 and (toks[0], toks[2]) == ("TIME",
                                                              "EXCEEDED;"):
                    solv.termination_condition = TerminationCondition.maxTimeLimit
                elif len(toks) > 5 and (toks[:6] == [
                        'PROBLEM', 'HAS', 'NO', 'DUAL', 'FEASIBLE', 'SOLUTION'
                ]):
                    solv.termination_condition = TerminationCondition.unbounded
                elif len(toks) > 5 and (toks[:6] == [
                        'PROBLEM', 'HAS', 'NO', 'PRIMAL', 'FEASIBLE',
                        'SOLUTION'
                ]):
                    solv.termination_condition = TerminationCondition.infeasible
                elif len(toks) > 4 and (toks[:5] == [
                        'PROBLEM', 'HAS', 'NO', 'FEASIBLE', 'SOLUTION'
                ]):
                    solv.termination_condition = TerminationCondition.infeasible
                elif len(toks) > 6 and (toks[:7] == [
                        'LP', 'RELAXATION', 'HAS', 'NO', 'DUAL', 'FEASIBLE',
                        'SOLUTION'
                ]):
                    solv.termination_condition = TerminationCondition.unbounded

        return results
예제 #10
0
 def _setup_results_obj(self):
     #
     # Create a results object
     #
     results = SolverResults()
     #
     # SOLVER
     #
     solv = results.solver
     solv.name = self.options.subsolver
     solv.wallclock_time = self.wall_time
     cpu_ = []
     for res in self.results:
         if not getattr(res.solver, 'cpu_time', None) is None:
             cpu_.append(res.solver.cpu_time)
     if cpu_:
         solv.cpu_time = sum(cpu_)
     #
     # TODO: detect infeasibilities, etc
     solv.termination_condition = self.termination_condition
     #
     # PROBLEM
     #
     prob = results.problem
     prob.name = self._instance.name
     prob.number_of_constraints = self._instance.statistics.number_of_constraints
     prob.number_of_variables = self._instance.statistics.number_of_variables
     prob.number_of_binary_variables = self._instance.statistics.number_of_binary_variables
     prob.number_of_integer_variables =\
         self._instance.statistics.number_of_integer_variables
     prob.number_of_continuous_variables =\
         self._instance.statistics.number_of_continuous_variables
     prob.number_of_objectives = self._instance.statistics.number_of_objectives
     #
     # SOLUTION(S)
     #
     self._instance.solutions.store_to(results)
     return results
예제 #11
0
    def solve(self, model, **kwds):
        config = self.CONFIG(kwds.pop('options', {}))
        config.set_value(kwds)

        # Validate model to be used with gdpbb
        self.validate_model(model)
        # Set solver as an MINLP
        solver = SolverFactory(config.solver)
        solve_data = GDPbbSolveData()
        solve_data.timing = Container()
        solve_data.original_model = model
        solve_data.results = SolverResults()

        old_logger_level = config.logger.getEffectiveLevel()
        with time_code(solve_data.timing, 'total'), \
                restore_logger_level(config.logger), \
                create_utility_block(model, 'GDPbb_utils', solve_data):
            if config.tee and old_logger_level > logging.INFO:
                # If the logger does not already include INFO, include it.
                config.logger.setLevel(logging.INFO)
            config.logger.info(
                "Starting GDPbb version %s using %s as subsolver" %
                (".".join(map(str, self.version())), config.solver))

            # Setup results
            solve_data.results.solver.name = 'GDPbb - %s' % (str(
                config.solver))
            setup_results_object(solve_data, config)
            # Initialize list containing indicator vars for reupdating model after solving
            indicator_list_name = unique_component_name(
                model, "_indicator_list")
            indicator_vars = []
            for disjunction in model.component_data_objects(ctype=Disjunction,
                                                            active=True):
                for disjunct in disjunction.disjuncts:
                    indicator_vars.append(disjunct.indicator_var)
            setattr(model, indicator_list_name, indicator_vars)

            # get objective sense
            objectives = model.component_data_objects(Objective, active=True)
            obj = next(objectives, None)
            obj_sign = 1 if obj.sense == minimize else -1
            solve_data.results.problem.sense = obj.sense
            # clone original model for root node of branch and bound
            root = model.clone()

            # set up lists to keep track of which disjunctions have been covered.

            # this list keeps track of the original disjunctions that were active and are soon to be inactive
            root.GDPbb_utils.unenforced_disjunctions = list(
                disjunction
                for disjunction in root.GDPbb_utils.disjunction_list
                if disjunction.active)

            # this list keeps track of the disjunctions that have been activated by the branch and bound
            root.GDPbb_utils.curr_active_disjunctions = []

            # deactivate all disjunctions in the model
            # self.indicate(root)
            for djn in root.GDPbb_utils.unenforced_disjunctions:
                djn.deactivate()
            # Deactivate all disjuncts in model. To be reactivated when disjunction
            # is reactivated.
            for disj in root.component_data_objects(Disjunct, active=True):
                disj._deactivate_without_fixing_indicator()

            # Satisfiability check would go here

            # solve the root node
            config.logger.info("Solving the root node.")
            obj_value, result, _ = self.subproblem_solve(root, solver, config)

            # initialize minheap for Branch and Bound algorithm
            # Heap structure: (ordering tuple, model)
            # Ordering tuple: (objective value, disjunctions_left, -counter)
            #  - select solutions with lower objective value,
            #    then fewer disjunctions left to explore (depth first),
            #    then more recently encountered (tiebreaker)
            heap = []
            counter = 0
            disjunctions_left = len(root.GDPbb_utils.unenforced_disjunctions)
            heapq.heappush(
                heap, ((obj_sign * obj_value, disjunctions_left, -counter),
                       root, result, root.GDPbb_utils.variable_list))
            # loop to branch through the tree
            while len(heap) > 0:
                # pop best model off of heap
                sort_tup, mdl, mdl_results, vars = heapq.heappop(heap)
                old_obj_val, disjunctions_left, _ = sort_tup
                config.logger.info(
                    "Exploring node with LB %.10g and %s inactive disjunctions."
                    % (old_obj_val, disjunctions_left))

                # if all the originally active disjunctions are active, solve and
                # return solution
                if disjunctions_left == 0:
                    config.logger.info("Model solved.")
                    # Model is solved. Copy over solution values.
                    for orig_var, soln_var in zip(
                            model.GDPbb_utils.variable_list, vars):
                        orig_var.value = soln_var.value

                    solve_data.results.problem.lower_bound = mdl_results.problem.lower_bound
                    solve_data.results.problem.upper_bound = mdl_results.problem.upper_bound
                    solve_data.results.solver.timing = solve_data.timing
                    solve_data.results.solver.termination_condition = mdl_results.solver.termination_condition
                    return solve_data.results

                next_disjunction = mdl.GDPbb_utils.unenforced_disjunctions.pop(
                    0)
                config.logger.info("Activating disjunction %s" %
                                   next_disjunction.name)
                next_disjunction.activate()
                mdl.GDPbb_utils.curr_active_disjunctions.append(
                    next_disjunction)
                djn_left = len(mdl.GDPbb_utils.unenforced_disjunctions)
                for disj in next_disjunction.disjuncts:
                    disj._activate_without_unfixing_indicator()
                    if not disj.indicator_var.fixed:
                        disj.indicator_var = 0  # initially set all indicator vars to zero
                added_disj_counter = 0
                for disj in next_disjunction.disjuncts:
                    if not disj.indicator_var.fixed:
                        disj.indicator_var = 1
                    mnew = mdl.clone()
                    if not disj.indicator_var.fixed:
                        disj.indicator_var = 0

                    # Check feasibility
                    if config.check_sat and satisfiable(
                            mnew, config.logger) is False:
                        # problem is not satisfiable. Skip this disjunct.
                        continue

                    obj_value, result, vars = self.subproblem_solve(
                        mnew, solver, config)
                    counter += 1
                    ordering_tuple = (obj_sign * obj_value, djn_left, -counter)
                    heapq.heappush(heap, (ordering_tuple, mnew, result, vars))
                    added_disj_counter = added_disj_counter + 1
                config.logger.info(
                    "Added %s new nodes with %s relaxed disjunctions to the heap. Size now %s."
                    % (added_disj_counter, djn_left, len(heap)))
예제 #12
0
    def solve(self, model, **kwds):
        """Solve the model.

        Warning: this solver is still in beta. Keyword arguments subject to
        change. Undocumented keyword arguments definitely subject to change.

        Args:
            model (Block): a Pyomo model or block to be solved
        """
        config = self.CONFIG(kwds.pop('options', {}))
        config.set_value(kwds)

        solve_data = MindtPySolveData()
        solve_data.results = SolverResults()
        solve_data.timing = Bunch()
        solve_data.curr_int_sol = []
        solve_data.should_terminate = False
        solve_data.integer_list = []

        check_config(config)

        # if the objective function is a constant, dual bound constraint is not added.
        obj = next(model.component_data_objects(ctype=Objective, active=True))
        if obj.expr.polynomial_degree() == 0:
            config.use_dual_bound = False

        if config.use_fbbt:
            fbbt(model)
            # TODO: logging_level is not logging.INFO here
            config.logger.info(
                'Use the fbbt to tighten the bounds of variables')

        solve_data.original_model = model
        solve_data.working_model = model.clone()
        if config.integer_to_binary:
            TransformationFactory('contrib.integer_to_binary'). \
                apply_to(solve_data.working_model)

        new_logging_level = logging.INFO if config.tee else None
        with time_code(solve_data.timing, 'total', is_main_timer=True), \
                lower_logger_level_to(config.logger, new_logging_level), \
                create_utility_block(solve_data.working_model, 'MindtPy_utils', solve_data):
            config.logger.info('---Starting MindtPy---')

            MindtPy = solve_data.working_model.MindtPy_utils
            setup_results_object(solve_data, config)
            process_objective(
                solve_data,
                config,
                move_linear_objective=(config.init_strategy == 'FP' or
                                       config.add_regularization is not None),
                use_mcpp=config.use_mcpp,
                updata_var_con_list=config.add_regularization is None)
            # The epigraph constraint is very "flat" for branching rules,
            # we want to use to original model for the main mip.
            if MindtPy.objective_list[0].expr.polynomial_degree() in {
                    1, 0
            } and config.add_regularization is not None:
                MindtPy.objective_list[0].activate()
                MindtPy.objective_constr.deactivate()
                MindtPy.objective.deactivate()

            # Save model initial values.
            solve_data.initial_var_values = list(
                v.value for v in MindtPy.variable_list)

            # Store the initial model state as the best solution found. If we
            # find no better solution, then we will restore from this copy.
            solve_data.best_solution_found = None
            solve_data.best_solution_found_time = None

            # Record solver name
            solve_data.results.solver.name = 'MindtPy' + str(config.strategy)

            # Validate the model to ensure that MindtPy is able to solve it.
            if not model_is_valid(solve_data, config):
                return

            # Create a model block in which to store the generated feasibility
            # slack constraints. Do not leave the constraints on by default.
            feas = MindtPy.feas_opt = Block()
            feas.deactivate()
            feas.feas_constraints = ConstraintList(
                doc='Feasibility Problem Constraints')

            # Create a model block in which to store the generated linear
            # constraints. Do not leave the constraints on by default.
            lin = MindtPy.cuts = Block()
            lin.deactivate()

            # no-good cuts exclude particular discrete decisions
            lin.no_good_cuts = ConstraintList(doc='no-good cuts')
            # Feasible no-good cuts exclude discrete realizations that have
            # been explored via an NLP subproblem. Depending on model
            # characteristics, the user may wish to revisit NLP subproblems
            # (with a different initialization, for example). Therefore, these
            # cuts are not enabled by default.
            #
            # Note: these cuts will only exclude integer realizations that are
            # not already in the primary no_good_cuts ConstraintList.
            lin.feasible_no_good_cuts = ConstraintList(
                doc='explored no-good cuts')
            lin.feasible_no_good_cuts.deactivate()

            # Set up iteration counters
            solve_data.nlp_iter = 0
            solve_data.mip_iter = 0
            solve_data.mip_subiter = 0
            solve_data.nlp_infeasible_counter = 0
            if config.init_strategy == 'FP':
                solve_data.fp_iter = 1

            # set up bounds
            solve_data.LB = float('-inf')
            solve_data.UB = float('inf')
            solve_data.LB_progress = [solve_data.LB]
            solve_data.UB_progress = [solve_data.UB]
            if config.single_tree and (config.add_no_good_cuts
                                       or config.use_tabu_list):
                solve_data.stored_bound = {}
            if config.strategy == 'GOA' and (config.add_no_good_cuts
                                             or config.use_tabu_list):
                solve_data.num_no_good_cuts_added = {}

            # Set of NLP iterations for which cuts were generated
            lin.nlp_iters = Set(dimen=1)

            # Set of MIP iterations for which cuts were generated in ECP
            lin.mip_iters = Set(dimen=1)

            if config.feasibility_norm == 'L1' or config.feasibility_norm == 'L2':
                feas.nl_constraint_set = RangeSet(
                    len(MindtPy.nonlinear_constraint_list),
                    doc='Integer index set over the nonlinear constraints.')
                # Create slack variables for feasibility problem
                feas.slack_var = Var(feas.nl_constraint_set,
                                     domain=NonNegativeReals,
                                     initialize=1)
            else:
                feas.slack_var = Var(domain=NonNegativeReals, initialize=1)

            # Create slack variables for OA cuts
            if config.add_slack:
                lin.slack_vars = VarList(bounds=(0, config.max_slack),
                                         initialize=0,
                                         domain=NonNegativeReals)

            # Flag indicating whether the solution improved in the past
            # iteration or not
            solve_data.solution_improved = False
            solve_data.bound_improved = False

            if config.nlp_solver == 'ipopt':
                if not hasattr(solve_data.working_model, 'ipopt_zL_out'):
                    solve_data.working_model.ipopt_zL_out = Suffix(
                        direction=Suffix.IMPORT)
                if not hasattr(solve_data.working_model, 'ipopt_zU_out'):
                    solve_data.working_model.ipopt_zU_out = Suffix(
                        direction=Suffix.IMPORT)

            # Initialize the main problem
            with time_code(solve_data.timing, 'initialization'):
                MindtPy_initialize_main(solve_data, config)

            # Algorithm main loop
            with time_code(solve_data.timing, 'main loop'):
                MindtPy_iteration_loop(solve_data, config)
            if solve_data.best_solution_found is not None:
                # Update values in original model
                copy_var_list_values(from_list=solve_data.best_solution_found.
                                     MindtPy_utils.variable_list,
                                     to_list=MindtPy.variable_list,
                                     config=config)
                copy_var_list_values(MindtPy.variable_list, [
                    i
                    for i in solve_data.original_model.component_data_objects(
                        Var) if not i.fixed
                ], config)
                # exclude fixed variables here. This is consistent with the definition of variable_list in GDPopt.util

            solve_data.results.problem.lower_bound = solve_data.LB
            solve_data.results.problem.upper_bound = solve_data.UB

        solve_data.results.solver.timing = solve_data.timing
        solve_data.results.solver.user_time = solve_data.timing.total
        solve_data.results.solver.wallclock_time = solve_data.timing.total
        solve_data.results.solver.iterations = solve_data.mip_iter
        solve_data.results.solver.num_infeasible_nlp_subproblem = solve_data.nlp_infeasible_counter
        solve_data.results.solver.best_solution_found_time = solve_data.best_solution_found_time

        if config.single_tree:
            solve_data.results.solver.num_nodes = solve_data.nlp_iter - \
                (1 if config.init_strategy == 'rNLP' else 0)

        return solve_data.results
예제 #13
0
    def _load(self, fin, res, soln, suffixes):

        if res is None:
            res = SolverResults()
        #
        msg = ""
        line = fin.readline()
        if line.strip() == "":
            line = fin.readline()
        while line:
            if line[0] == '\n' or (line[0] == '\r' and line[1] == '\n'):
                break
            msg += line
            line = fin.readline()
        z = []
        line = fin.readline()
        if line[:7] == "Options":
            line = fin.readline()
            nopts = int(line)
            need_vbtol = False
            if nopts > 4:  # WEH - when is this true?
                nopts -= 2
                need_vbtol = True
            for i in xrange(nopts + 4):
                line = fin.readline()
                z += [int(line)]
            if need_vbtol:  # WEH - when is this true?
                line = fin.readline()
                z += [float(line)]
        else:
            raise ValueError("no Options line found")
        n = z[nopts + 3]  # variables
        m = z[nopts + 1]  # constraints
        x = []
        y = []
        i = 0
        while i < m:
            line = fin.readline()
            y.append(float(line))
            i += 1
        i = 0
        while i < n:
            line = fin.readline()
            x.append(float(line))
            i += 1
        objno = [0, 0]
        line = fin.readline()
        if line:  # WEH - when is this true?
            if line[:5] != "objno":  #pragma:nocover
                raise ValueError("expected 'objno', found '%s'" % (line))
            t = line.split()
            if len(t) != 3:
                raise ValueError("expected two numbers in objno line, "
                                 "but found '%s'" % (line))
            objno = [int(t[1]), int(t[2])]
        res.solver.message = msg.strip()
        res.solver.message = res.solver.message.replace("\n", "; ")
        res.solver.message = pyutilib.misc.yaml_fix(res.solver.message)
        ##res.solver.instanceName = osrl.header.instanceName
        ##res.solver.systime = osrl.header.time
        res.solver.status = SolverStatus.ok
        soln_status = SolutionStatus.unknown
        if (objno[1] >= 0) and (objno[1] <= 99):
            objno_message = "OPTIMAL SOLUTION FOUND!"
            res.solver.termination_condition = TerminationCondition.optimal
            res.solver.status = SolverStatus.ok
            soln_status = SolutionStatus.optimal
        elif (objno[1] >= 100) and (objno[1] <= 199):
            objno_message = "Optimal solution indicated, but ERROR LIKELY!"
            res.solver.termination_condition = TerminationCondition.optimal
            res.solver.status = SolverStatus.warning
            soln_status = SolutionStatus.optimal
        elif (objno[1] >= 200) and (objno[1] <= 299):
            objno_message = "INFEASIBLE SOLUTION: constraints cannot be satisfied!"
            res.solver.termination_condition = TerminationCondition.infeasible
            res.solver.status = SolverStatus.warning
            soln_status = SolutionStatus.infeasible
        elif (objno[1] >= 300) and (objno[1] <= 399):
            objno_message = "UNBOUNDED PROBLEM: the objective can be improved without limit!"
            res.solver.termination_condition = TerminationCondition.unbounded
            res.solver.status = SolverStatus.warning
            soln_status = SolutionStatus.unbounded
        elif (objno[1] >= 400) and (objno[1] <= 499):
            objno_message = (
                "EXCEEDED MAXIMUM NUMBER OF ITERATIONS: the solver "
                "was stopped by a limit that you set!")
            res.solver.termination_condition = TerminationCondition.maxIterations
            res.solver.status = SolverStatus.warning
            soln_status = SolutionStatus.stoppedByLimit
        elif (objno[1] >= 500) and (objno[1] <= 599):
            objno_message = (
                "FAILURE: the solver stopped by an error condition "
                "in the solver routines!")
            res.solver.termination_condition = TerminationCondition.internalSolverError
            res.solver.status = SolverStatus.error
            soln_status = SolutionStatus.error
        res.solver.id = objno[1]
        ##res.problem.name = osrl.header.instanceName
        if res.solver.termination_condition in [
                TerminationCondition.unknown,
                TerminationCondition.maxIterations,
                TerminationCondition.minFunctionValue,
                TerminationCondition.minStepLength,
                TerminationCondition.globallyOptimal,
                TerminationCondition.locallyOptimal,
                TerminationCondition.optimal,
                TerminationCondition.maxEvaluations,
                TerminationCondition.other, TerminationCondition.infeasible
        ]:

            if soln is None:
                soln = res.solution.add()
            res.solution.status = soln_status
            soln.status_description = objno_message
            soln.message = msg.strip()
            soln.message = res.solver.message.replace("\n", "; ")
            soln_variable = soln.variable
            i = 0
            for var_value in x:
                soln_variable["v" + str(i)] = {"Value": var_value}
                i = i + 1
            soln_constraint = soln.constraint
            if any(re.match(suf, "dual") for suf in suffixes):
                for i in xrange(0, len(y)):
                    soln_constraint["c" + str(i)] = {"Dual": y[i]}

            ### Read suffixes ###
            line = fin.readline()
            while line:
                line = line.strip()
                if line == "":
                    continue
                line = line.split()
                if line[0] != 'suffix':
                    # We assume this is the start of a
                    # section like kestrel_option, which
                    # comes after all suffixes.
                    remaining = ""
                    line = fin.readline()
                    while line:
                        remaining += line.strip() + "; "
                        line = fin.readline()
                    res.solver.message += remaining
                    break
                unmasked_kind = int(line[1])
                kind = unmasked_kind & 3  # 0-var, 1-con, 2-obj, 3-prob
                convert_function = int
                if (unmasked_kind & 4) == 4:
                    convert_function = float
                nvalues = int(line[2])
                namelen = int(line[3])
                tablen = int(line[4])
                tabline = int(line[5])
                suffix_name = fin.readline().strip()
                if any(re.match(suf, suffix_name) for suf in suffixes):
                    # ignore translation of the table number to string value for now,
                    # this information can be obtained from the solver documentation
                    for n in xrange(tabline):
                        fin.readline()
                    if kind == 0:  # Var
                        for cnt in xrange(nvalues):
                            suf_line = fin.readline().split()
                            soln_variable["v"+suf_line[0]][suffix_name] = \
                                convert_function(suf_line[1])
                    elif kind == 1:  # Con
                        for cnt in xrange(nvalues):
                            suf_line = fin.readline().split()
                            key = "c" + suf_line[0]
                            if key not in soln_constraint:
                                soln_constraint[key] = {}
                            # convert the first letter of the suffix name to upper case,
                            # mainly for pretty-print / output purposes. these are lower-cased
                            # when loaded into real suffixes, so it is largely redundant.
                            translated_suffix_name = suffix_name[0].upper(
                            ) + suffix_name[1:]
                            soln_constraint[key][translated_suffix_name] = \
                                convert_function(suf_line[1])
                    elif kind == 2:  # Obj
                        for cnt in xrange(nvalues):
                            suf_line = fin.readline().split()
                            soln.objective.setdefault("o"+suf_line[0],{})[suffix_name] = \
                                convert_function(suf_line[1])
                    elif kind == 3:  # Prob
                        # Skip problem kind suffixes for now. Not sure the
                        # best place to put them in the results object
                        for cnt in xrange(nvalues):
                            suf_line = fin.readline().split()
                            soln.problem[suffix_name] = convert_function(
                                suf_line[1])
                else:
                    # do not store the suffix in the solution object
                    for cnt in xrange(nvalues):
                        fin.readline()
                line = fin.readline()

        #
        # This is a bit of a hack to accommodate PICO.  If
        # the PICO parser has parsed the # of constraints, then
        # don't try to read it in from the *.sol file.  The reason
        # is that these may be inconsistent values!
        #
        if res.problem.number_of_constraints == 0:
            res.problem.number_of_constraints = m
        res.problem.number_of_variables = n
        res.problem.number_of_objectives = 1
        return res
예제 #14
0
    def solve(self, model, **kwds):
        """Solve the model.
        Warning: this solver is still in beta. Keyword arguments subject to
        change. Undocumented keyword arguments definitely subject to change.
        Warning: at this point in time, if you try to use PSC or GBD with
        anything other than IPOPT as the NLP solver, bad things will happen.
        This is because the suffixes are not in place to extract dual values
        from the variable bounds for any other solver.
        TODO: fix needed with the GBD implementation.
        Args:
            model (Block): a Pyomo model or block to be solved
        """
        config = self.CONFIG(kwds.pop('options', {}))
        config.set_value(kwds)

        # configuration confirmation
        if config.single_tree:
            config.iteration_limit = 1
            config.add_slack = False
            config.add_nogood_cuts = False
            config.mip_solver = 'cplex_persistent'
            config.logger.info(
                "Single tree implementation is activated. The defalt MIP solver is 'cplex_persistent'"
            )
        # if the slacks fix to zero, just don't add them
        if config.max_slack == 0.0:
            config.add_slack = False

        if config.strategy == "GOA":
            config.add_nogood_cuts = True
            config.add_slack = True
            config.use_mcpp = True
            config.integer_to_binary = True
            config.use_dual = False
            config.use_fbbt = True

        if config.nlp_solver == "baron":
            config.use_dual = False
        # if ecp tolerance is not provided use bound tolerance
        if config.ecp_tolerance is None:
            config.ecp_tolerance = config.bound_tolerance

        # if the objective function is a constant, dual bound constraint is not added.
        obj = next(model.component_data_objects(ctype=Objective, active=True))
        if obj.expr.polynomial_degree() == 0:
            config.use_dual_bound = False

        solve_data = MindtPySolveData()
        solve_data.results = SolverResults()
        solve_data.timing = Container()
        solve_data.curr_int_sol = []
        solve_data.prev_int_sol = []

        if config.use_fbbt:
            fbbt(model)
            config.logger.info(
                "Use the fbbt to tighten the bounds of variables")

        solve_data.original_model = model
        solve_data.working_model = model.clone()
        if config.integer_to_binary:
            TransformationFactory('contrib.integer_to_binary'). \
                apply_to(solve_data.working_model)

        new_logging_level = logging.INFO if config.tee else None
        with time_code(solve_data.timing, 'total', is_main_timer=True), \
                lower_logger_level_to(config.logger, new_logging_level), \
                create_utility_block(solve_data.working_model, 'MindtPy_utils', solve_data):
            config.logger.info("---Starting MindtPy---")

            MindtPy = solve_data.working_model.MindtPy_utils
            setup_results_object(solve_data, config)
            process_objective(solve_data, config, use_mcpp=config.use_mcpp)

            # Save model initial values.
            solve_data.initial_var_values = list(
                v.value for v in MindtPy.variable_list)

            # Store the initial model state as the best solution found. If we
            # find no better solution, then we will restore from this copy.
            solve_data.best_solution_found = None
            solve_data.best_solution_found_time = None

            # Record solver name
            solve_data.results.solver.name = 'MindtPy' + str(config.strategy)

            # Validate the model to ensure that MindtPy is able to solve it.
            if not model_is_valid(solve_data, config):
                return

            # Create a model block in which to store the generated feasibility
            # slack constraints. Do not leave the constraints on by default.
            feas = MindtPy.MindtPy_feas = Block()
            feas.deactivate()
            feas.feas_constraints = ConstraintList(
                doc='Feasibility Problem Constraints')

            # Create a model block in which to store the generated linear
            # constraints. Do not leave the constraints on by default.
            lin = MindtPy.MindtPy_linear_cuts = Block()
            lin.deactivate()

            # Integer cuts exclude particular discrete decisions
            lin.integer_cuts = ConstraintList(doc='integer cuts')
            # Feasible integer cuts exclude discrete realizations that have
            # been explored via an NLP subproblem. Depending on model
            # characteristics, the user may wish to revisit NLP subproblems
            # (with a different initialization, for example). Therefore, these
            # cuts are not enabled by default.
            #
            # Note: these cuts will only exclude integer realizations that are
            # not already in the primary integer_cuts ConstraintList.
            lin.feasible_integer_cuts = ConstraintList(
                doc='explored integer cuts')
            lin.feasible_integer_cuts.deactivate()

            # Set up iteration counters
            solve_data.nlp_iter = 0
            solve_data.mip_iter = 0
            solve_data.mip_subiter = 0

            # set up bounds
            solve_data.LB = float('-inf')
            solve_data.UB = float('inf')
            solve_data.LB_progress = [solve_data.LB]
            solve_data.UB_progress = [solve_data.UB]
            if config.single_tree and config.add_nogood_cuts:
                solve_data.stored_bound = {}
            if config.strategy == 'GOA' and config.add_nogood_cuts:
                solve_data.num_no_good_cuts_added = {}

            # Set of NLP iterations for which cuts were generated
            lin.nlp_iters = Set(dimen=1)

            # Set of MIP iterations for which cuts were generated in ECP
            lin.mip_iters = Set(dimen=1)

            if config.feasibility_norm == 'L1' or config.feasibility_norm == 'L2':
                feas.nl_constraint_set = Set(
                    initialize=[
                        i
                        for i, constr in enumerate(MindtPy.constraint_list, 1)
                        if constr.body.polynomial_degree() not in (1, 0)
                    ],
                    doc="Integer index set over the nonlinear constraints."
                    "The set corresponds to the index of nonlinear constraint in constraint_set"
                )
                # Create slack variables for feasibility problem
                feas.slack_var = Var(feas.nl_constraint_set,
                                     domain=NonNegativeReals,
                                     initialize=1)
            else:
                feas.slack_var = Var(domain=NonNegativeReals, initialize=1)

            # Create slack variables for OA cuts
            if config.add_slack:
                lin.slack_vars = VarList(bounds=(0, config.max_slack),
                                         initialize=0,
                                         domain=NonNegativeReals)

            # Flag indicating whether the solution improved in the past
            # iteration or not
            solve_data.solution_improved = False

            if config.nlp_solver == 'ipopt':
                if not hasattr(solve_data.working_model, 'ipopt_zL_out'):
                    solve_data.working_model.ipopt_zL_out = Suffix(
                        direction=Suffix.IMPORT)
                if not hasattr(solve_data.working_model, 'ipopt_zU_out'):
                    solve_data.working_model.ipopt_zU_out = Suffix(
                        direction=Suffix.IMPORT)

            # Initialize the master problem
            with time_code(solve_data.timing, 'initialization'):
                MindtPy_initialize_master(solve_data, config)

            # Algorithm main loop
            with time_code(solve_data.timing, 'main loop'):
                MindtPy_iteration_loop(solve_data, config)

            if solve_data.best_solution_found is not None:
                # Update values in original model
                copy_var_list_values(from_list=solve_data.best_solution_found.
                                     MindtPy_utils.variable_list,
                                     to_list=MindtPy.variable_list,
                                     config=config)
                # MindtPy.objective_value.set_value(
                #     value(solve_data.working_objective_expr, exception=False))
                copy_var_list_values(
                    MindtPy.variable_list,
                    solve_data.original_model.component_data_objects(Var),
                    config)

            solve_data.results.problem.lower_bound = solve_data.LB
            solve_data.results.problem.upper_bound = solve_data.UB

        solve_data.results.solver.timing = solve_data.timing
        solve_data.results.solver.user_time = solve_data.timing.total
        solve_data.results.solver.wallclock_time = solve_data.timing.total

        solve_data.results.solver.iterations = solve_data.mip_iter
        solve_data.results.solver.best_solution_found_time = solve_data.best_solution_found_time

        if config.single_tree:
            solve_data.results.solver.num_nodes = solve_data.nlp_iter - \
                (1 if config.init_strategy == 'rNLP' else 0)

        return solve_data.results
예제 #15
0
    def solve(self, model, **kwds):
        config = self.config(kwds, preserve_implicit=True)

        if not isinstance(model, Block):
            raise ValueError("PyomoCyIpoptSolver.solve(model): model "
                             "must be a Pyomo Block")

        # If this is a Pyomo model / block, then we need to create
        # the appropriate PyomoNLP, then wrap it in a CyIpoptNLP
        grey_box_blocks = list(model.component_data_objects(
            egb.ExternalGreyBoxBlock, active=True))
        if grey_box_blocks:
            # nlp = pyomo_nlp.PyomoGreyBoxNLP(model)
            nlp = pyomo_grey_box.PyomoNLPWithGreyBoxBlocks(model)
        else:
            nlp = pyomo_nlp.PyomoNLP(model)

        problem = CyIpoptNLP(nlp, intermediate_callback=config.intermediate_callback)

        xl = problem.x_lb()
        xu = problem.x_ub()
        gl = problem.g_lb()
        gu = problem.g_ub()

        nx = len(xl)
        ng = len(gl)

        cyipopt_solver = cyipopt.Problem(
            n=nx,
            m=ng,
            problem_obj=problem,
            lb=xl,
            ub=xu,
            cl=gl,
            cu=gu
        )

        # check if we need scaling
        obj_scaling, x_scaling, g_scaling = problem.scaling_factors()
        if any(_ is not None for _ in (obj_scaling, x_scaling, g_scaling)):
            # need to set scaling factors
            if obj_scaling is None:
                obj_scaling = 1.0
            if x_scaling is None:
                x_scaling = np.ones(nx)
            if g_scaling is None:
                g_scaling = np.ones(ng)
            try:
                set_scaling = cyipopt_solver.set_problem_scaling
            except AttributeError:
                # Fall back to pre-1.0.0 API
                set_scaling = cyipopt_solver.setProblemScaling
            set_scaling(obj_scaling, x_scaling, g_scaling)

        # add options
        try:
            add_option = cyipopt_solver.add_option
        except AttributeError:
            # Fall back to pre-1.0.0 API
            add_option = cyipopt_solver.addOption
        for k, v in config.options.items():
            add_option(k, v)

        timer = TicTocTimer()
        try:
            # We preemptively set up the TeeStream, even if we aren't
            # going to use it: the implementation is such that the
            # context manager does nothing (i.e., doesn't start up any
            # processing threads) until afer a client accesses
            # STDOUT/STDERR
            with TeeStream(sys.stdout) as _teeStream:
                if config.tee:
                    try:
                        fd = sys.stdout.fileno()
                    except (io.UnsupportedOperation, AttributeError):
                        # If sys,stdout doesn't have a valid fileno,
                        # then create one using the TeeStream
                        fd = _teeStream.STDOUT.fileno()
                else:
                    fd = None
                with redirect_fd(fd=1, output=fd, synchronize=False):
                    x, info = cyipopt_solver.solve(problem.x_init())
            solverStatus = SolverStatus.ok
        except:
            msg = "Exception encountered during cyipopt solve:"
            logger.error(msg, exc_info=sys.exc_info())
            solverStatus = SolverStatus.unknown
            raise

        wall_time = timer.toc(None)

        results = SolverResults()

        if config.load_solutions:
            nlp.set_primals(x)
            nlp.set_duals(info['mult_g'])
            nlp.load_state_into_pyomo(
                bound_multipliers=(info['mult_x_L'], info['mult_x_U']))
        else:
            soln = results.solution.add()
            soln.variable.update(
                (i, {'Value':j, 'ipopt_zL_out': zl, 'ipopt_zU_out': zu})
                for i,j,zl,zu in zip( nlp.variable_names(),
                                      x,
                                      info['mult_x_L'],
                                      info['mult_x_U'] )
            )
            soln.constraint.update(
                (i, {'Dual':j}) for i,j in zip(
                    nlp.constraint_names(), info['mult_g']))


        results.problem.name = model.name
        obj = next(model.component_data_objects(Objective, active=True))
        if obj.sense == minimize:
            results.problem.sense = ProblemSense.minimize
            results.problem.upper_bound = info['obj_val']
        else:
            results.problem.sense = ProblemSense.maximize
            results.problem.lower_bound = info['obj_val']
        results.problem.number_of_objectives = 1
        results.problem.number_of_constraints = ng
        results.problem.number_of_variables = nx
        results.problem.number_of_binary_variables = 0
        results.problem.number_of_integer_variables = 0
        results.problem.number_of_continuous_variables = nx
        # TODO: results.problem.number_of_nonzeros

        results.solver.name = 'cyipopt'
        results.solver.return_code = info['status']
        results.solver.message = info['status_msg']
        results.solver.wallclock_time = wall_time
        status_enum = _cyipopt_status_enum[info['status_msg']]
        results.solver.termination_condition = _ipopt_term_cond[status_enum]
        results.solver.status = TerminationCondition.to_solver_status(
            results.solver.termination_condition)

        if config.return_nlp:
            return results, nlp

        return results
예제 #16
0
    def solve(self, model, **kwds):
        config = self.config(kwds, preserve_implicit=True)

        if not isinstance(model, Block):
            raise ValueError("PyomoCyIpoptSolver.solve(model): model "
                             "must be a Pyomo Block")

        # If this is a Pyomo model / block, then we need to create
        # the appropriate PyomoNLP, then wrap it in a CyIpoptNLP
        grey_box_blocks = list(
            model.component_data_objects(egb.ExternalGreyBoxBlock,
                                         active=True))
        if grey_box_blocks:
            nlp = pyomo_nlp.PyomoGreyBoxNLP(model)
        else:
            nlp = pyomo_nlp.PyomoNLP(model)
        problem = CyIpoptNLP(nlp)

        xl = problem.x_lb()
        xu = problem.x_ub()
        gl = problem.g_lb()
        gu = problem.g_ub()

        nx = len(xl)
        ng = len(gl)

        cyipopt_solver = ipopt.problem(n=nx,
                                       m=ng,
                                       problem_obj=problem,
                                       lb=xl,
                                       ub=xu,
                                       cl=gl,
                                       cu=gu)

        # check if we need scaling
        obj_scaling, x_scaling, g_scaling = problem.scaling_factors()
        if any(_ is not None for _ in (obj_scaling, x_scaling, g_scaling)):
            # need to set scaling factors
            if obj_scaling is None:
                obj_scaling = 1.0
            if x_scaling is None:
                x_scaling = np.ones(nx)
            if g_scaling is None:
                g_scaling = np.ones(ng)
            cyipopt_solver.setProblemScaling(obj_scaling, x_scaling, g_scaling)

        # add options
        for k, v in config.options.items():
            cyipopt_solver.addOption(k, v)

        timer = TicTocTimer()
        try:
            if config.tee:
                x, info = cyipopt_solver.solve(problem.x_init())
            else:
                newstdout = _redirect_stdout()
                x, info = cyipopt_solver.solve(problem.x_init())
                os.dup2(newstdout, 1)
            solverStatus = SolverStatus.ok
        except:
            msg = "Exception encountered during cyipopt solve:"
            logger.error(msg, exc_info=sys.exc_info())
            solverStatus = SolverStatus.unknown
            raise

        wall_time = timer.toc(None)

        results = SolverResults()

        if config.load_solutions:
            nlp.set_primals(x)
            nlp.set_duals(info['mult_g'])
            nlp.load_state_into_pyomo(bound_multipliers=(info['mult_x_L'],
                                                         info['mult_x_U']))
        else:
            soln = results.solution.add()
            soln.variable.update((i, {
                'Value': j,
                'ipopt_zL_out': zl,
                'ipopt_zU_out': zu
            }) for i, j, zl, zu in zip(nlp.variable_names(), x,
                                       info['mult_x_L'], info['mult_x_U']))
            soln.constraint.update((i, {
                'Dual': j
            }) for i, j in zip(nlp.constraint_names(), info['mult_g']))

        results.problem.name = model.name
        obj = next(model.component_data_objects(Objective, active=True))
        if obj.sense == minimize:
            results.problem.sense = ProblemSense.minimize
            results.problem.upper_bound = info['obj_val']
        else:
            results.problem.sense = ProblemSense.maximize
            results.problem.lower_bound = info['obj_val']
        results.problem.number_of_objectives = 1
        results.problem.number_of_constraints = ng
        results.problem.number_of_variables = nx
        results.problem.number_of_binary_variables = 0
        results.problem.number_of_integer_variables = 0
        results.problem.number_of_continuous_variables = nx
        # TODO: results.problem.number_of_nonzeros

        results.solver.name = 'cyipopt'
        results.solver.return_code = info['status']
        results.solver.message = info['status_msg']
        results.solver.wallclock_time = wall_time
        status_enum = _cyipopt_status_enum[info['status_msg']]
        results.solver.termination_condition = _ipopt_term_cond[status_enum]
        results.solver.status = TerminationCondition.to_solver_status(
            results.solver.termination_condition)
        return results
예제 #17
0
    def _apply_solver(self):
        """ The routine in a derived class that performs the solve """
        x = self.x
        best = self.evaluate(x)
        ndim = len(x)
        curr_iter = 0
        tstatus = TerminationCondition.maxIterations
        tmsg = None
        while curr_iter < self.max_iterations:
            #
            # TODO: use logging here
            #
            if self.debug:  #pragma:nocover
                print("PatternSearch", curr_iter, x, best, self.delta)
            #
            # TODO: add a mechanism to archive the reason for termination
            #
            if best <= self.min_function_value:
                tstatus = TerminationCondition.minFunctionValue
                tmsg = "%s < %s" % (best, self.min_function_value)
                break
            if self.delta <= self.min_delta:
                tstatus = TerminationCondition.minStepLength
                tmsg = "%s < %s" % (self.delta, self.min_delta)
                break
            #
            # Iterate, and keep the best point
            #
            best_new_step = None
            best_new_value = best
            for j in xrange(0, ndim):
                orig = x[j]
                #
                # x[j]-Delta
                #
                x[j] = orig - self.delta * self.scale[j]
                #
                # TODO: use logging here
                #
                if self.debug:  #pragma:nocover
                    print("  Trial Point: ", x)
                if self.lower[j] is None or x[j] >= self.lower[j]:
                    tmp = self.evaluate(x)
                    if tmp < best_new_value:
                        best_new_step = (j, -1)
                        best_new_value = tmp
                #
                # x[j]+Delta
                #
                x[j] = orig + self.delta * self.scale[j]
                #
                # TODO: use logging here
                #
                if self.debug:  #pragma:nocover
                    print("  Trial Point: ", x)
                if self.upper[j] is None or x[j] <= self.upper[j]:
                    tmp = self.evaluate(x)
                    if tmp < best_new_value:
                        best_new_step = (j, 1)
                        best_new_value = tmp
                x[j] = orig
            if best_new_value < best:
                #
                # Move to an improving point
                #
                x[best_new_step[0]] = x[best_new_step[0]] + best_new_step[
                    1] * self.delta * self.scale[best_new_step[0]]
                best = best_new_value
                self.delta *= 2.0
            else:
                #
                # Contract about a non-improving point
                #
                self.delta /= 3.0
            curr_iter += 1
        #
        # Archive results in a SolverResults object
        #
        results = SolverResults()
        #
        results.solver.number_of_iterations = curr_iter
        results.solver.final_step_length = self.delta
        results.solver.status = SolverStatus.ok
        results.solver.termination_condition = tstatus
        results.solver.termination_message = tmsg
        #
        results.problem.number_of_objectives = 1
        results.problem.number_of_constraints = 0
        results.problem.number_of_variables = ndim
        results.problem.sense = ProblemSense.minimize
        results.problem.upper_bound = best
        #
        soln = results.solution.add()
        soln.objective['f'] = {'Value': best}
        soln.status = SolutionStatus.bestSoFar
        for i in xrange(ndim):
            soln.variable["x" + str(i)] = {"Value": x[i]}
        #
        # Return final results
        #
        self.results = results

        # the pattern search has neither a proper return code or a log:
        return Bunch(rc=None, log=None)
예제 #18
0
    def solve(self, model, **kwds):
        config = self.CONFIG(kwds.pop('options', {}))
        config.set_value(kwds)
        return SolverFactory('gdpopt').solve(
            model,
            strategy='LBB',
            minlp_solver=config.solver,
            minlp_solver_args=config.solver_args,
            tee=config.tee,
            check_sat=config.check_sat,
            logger=config.logger,
            time_limit=config.time_limit)

        # Validate model to be used with gdpbb
        self.validate_model(model)
        # Set solver as an MINLP
        solve_data = GDPbbSolveData()
        solve_data.timing = Container()
        solve_data.original_model = model
        solve_data.results = SolverResults()

        old_logger_level = config.logger.getEffectiveLevel()
        with time_code(solve_data.timing, 'total', is_main_timer=True), \
                restore_logger_level(config.logger), \
                create_utility_block(model, 'GDPbb_utils', solve_data):
            if config.tee and old_logger_level > logging.INFO:
                # If the logger does not already include INFO, include it.
                config.logger.setLevel(logging.INFO)
            config.logger.info(
                "Starting GDPbb version %s using %s as subsolver" %
                (".".join(map(str, self.version())), config.solver))

            # Setup results
            solve_data.results.solver.name = 'GDPbb - %s' % (str(
                config.solver))
            setup_results_object(solve_data, config)

            # clone original model for root node of branch and bound
            root = solve_data.working_model = solve_data.original_model.clone()

            # get objective sense
            process_objective(solve_data, config)
            objectives = solve_data.original_model.component_data_objects(
                Objective, active=True)
            obj = next(objectives, None)
            solve_data.results.problem.sense = obj.sense

            # set up lists to keep track of which disjunctions have been covered.

            # this list keeps track of the relaxed disjunctions
            root.GDPbb_utils.unenforced_disjunctions = list(
                disjunction
                for disjunction in root.GDPbb_utils.disjunction_list
                if disjunction.active)

            root.GDPbb_utils.deactivated_constraints = ComponentSet([
                constr
                for disjunction in root.GDPbb_utils.unenforced_disjunctions
                for disjunct in disjunction.disjuncts
                for constr in disjunct.component_data_objects(ctype=Constraint,
                                                              active=True)
                if constr.body.polynomial_degree() not in (1, 0)
            ])
            # Deactivate nonlinear constraints in unenforced disjunctions
            for constr in root.GDPbb_utils.deactivated_constraints:
                constr.deactivate()

            # Add the BigM suffix if it does not already exist. Used later during nonlinear constraint activation.
            if not hasattr(root, 'BigM'):
                root.BigM = Suffix()

            # Pre-screen that none of the disjunctions are already predetermined due to the disjuncts being fixed
            # to True/False values.
            # TODO this should also be done within the loop, but we aren't handling it right now.
            # Should affect efficiency, but not correctness.
            root.GDPbb_utils.disjuncts_fixed_True = ComponentSet()
            # Only find top-level (non-nested) disjunctions
            for disjunction in root.component_data_objects(Disjunction,
                                                           active=True):
                fixed_true_disjuncts = [
                    disjunct for disjunct in disjunction.disjuncts
                    if disjunct.indicator_var.fixed
                    and disjunct.indicator_var.value == 1
                ]
                fixed_false_disjuncts = [
                    disjunct for disjunct in disjunction.disjuncts
                    if disjunct.indicator_var.fixed
                    and disjunct.indicator_var.value == 0
                ]
                for disjunct in fixed_false_disjuncts:
                    disjunct.deactivate()
                if len(fixed_false_disjuncts) == len(
                        disjunction.disjuncts) - 1:
                    # all but one disjunct in the disjunction is fixed to False. Remaining one must be true.
                    if not fixed_true_disjuncts:
                        fixed_true_disjuncts = [
                            disjunct for disjunct in disjunction.disjuncts
                            if disjunct not in fixed_false_disjuncts
                        ]
                # Reactivate the fixed-true disjuncts
                for disjunct in fixed_true_disjuncts:
                    newly_activated = ComponentSet()
                    for constr in disjunct.component_data_objects(Constraint):
                        if constr in root.GDPbb_utils.deactivated_constraints:
                            newly_activated.add(constr)
                            constr.activate()
                            # Set the big M value for the constraint
                            root.BigM[constr] = 1
                            # Note: we use a default big M value of 1
                            # because all non-selected disjuncts should be deactivated.
                            # Therefore, none of the big M transformed nonlinear constraints will need to be relaxed.
                            # The default M value should therefore be irrelevant.
                    root.GDPbb_utils.deactivated_constraints -= newly_activated
                    root.GDPbb_utils.disjuncts_fixed_True.add(disjunct)

                if fixed_true_disjuncts:
                    assert disjunction.xor, "GDPbb only handles disjunctions in which one term can be selected. " \
                        "%s violates this assumption." % (disjunction.name, )
                    root.GDPbb_utils.unenforced_disjunctions.remove(
                        disjunction)

            # Check satisfiability
            if config.check_sat and satisfiable(root, config.logger) is False:
                # Problem is not satisfiable. Problem is infeasible.
                obj_value = obj_sign * float('inf')
            else:
                # solve the root node
                config.logger.info("Solving the root node.")
                obj_value, result, var_values = self.subproblem_solve(
                    root, config)

            if obj_sign * obj_value == float('inf'):
                config.logger.info(
                    "Model was found to be infeasible at the root node. Elapsed %.2f seconds."
                    % get_main_elapsed_time(solve_data.timing))
                if solve_data.results.problem.sense == minimize:
                    solve_data.results.problem.lower_bound = float('inf')
                    solve_data.results.problem.upper_bound = None
                else:
                    solve_data.results.problem.lower_bound = None
                    solve_data.results.problem.upper_bound = float('-inf')
                solve_data.results.solver.timing = solve_data.timing
                solve_data.results.solver.iterations = 0
                solve_data.results.solver.termination_condition = tc.infeasible
                return solve_data.results

            # initialize minheap for Branch and Bound algorithm
            # Heap structure: (ordering tuple, model)
            # Ordering tuple: (objective value, disjunctions_left, -total_nodes_counter)
            #  - select solutions with lower objective value,
            #    then fewer disjunctions left to explore (depth first),
            #    then more recently encountered (tiebreaker)
            heap = []
            total_nodes_counter = 0
            disjunctions_left = len(root.GDPbb_utils.unenforced_disjunctions)
            heapq.heappush(heap,
                           ((obj_sign * obj_value, disjunctions_left,
                             -total_nodes_counter), root, result, var_values))

            # loop to branch through the tree
            while len(heap) > 0:
                # pop best model off of heap
                sort_tuple, incumbent_model, incumbent_results, incumbent_var_values = heapq.heappop(
                    heap)
                incumbent_obj_value, disjunctions_left, _ = sort_tuple

                config.logger.info(
                    "Exploring node with LB %.10g and %s inactive disjunctions."
                    % (incumbent_obj_value, disjunctions_left))

                # if all the originally active disjunctions are active, solve and
                # return solution
                if disjunctions_left == 0:
                    config.logger.info("Model solved.")
                    # Model is solved. Copy over solution values.
                    original_model = solve_data.original_model
                    for orig_var, val in zip(
                            original_model.GDPbb_utils.variable_list,
                            incumbent_var_values):
                        orig_var.value = val

                    solve_data.results.problem.lower_bound = incumbent_results.problem.lower_bound
                    solve_data.results.problem.upper_bound = incumbent_results.problem.upper_bound
                    solve_data.results.solver.timing = solve_data.timing
                    solve_data.results.solver.iterations = total_nodes_counter
                    solve_data.results.solver.termination_condition = incumbent_results.solver.termination_condition
                    return solve_data.results

                # Pick the next disjunction to branch on
                next_disjunction = incumbent_model.GDPbb_utils.unenforced_disjunctions[
                    0]
                config.logger.info("Branching on disjunction %s" %
                                   next_disjunction.name)
                assert next_disjunction.xor, "GDPbb only handles disjunctions in which one term can be selected. " \
                    "%s violates this assumption." % (next_disjunction.name, )

                new_nodes_counter = 0

                for i, disjunct in enumerate(next_disjunction.disjuncts):
                    # Create one branch for each of the disjuncts on the disjunction

                    if any(disj.indicator_var.fixed
                           and disj.indicator_var.value == 1
                           for disj in next_disjunction.disjuncts
                           if disj is not disjunct):
                        # If any other disjunct is fixed to 1 and an xor relationship applies,
                        # then this disjunct cannot be activated.
                        continue

                    # Check time limit
                    if get_main_elapsed_time(
                            solve_data.timing) >= config.time_limit:
                        if solve_data.results.problem.sense == minimize:
                            solve_data.results.problem.lower_bound = incumbent_obj_value
                            solve_data.results.problem.upper_bound = float(
                                'inf')
                        else:
                            solve_data.results.problem.lower_bound = float(
                                '-inf')
                            solve_data.results.problem.upper_bound = incumbent_obj_value
                        config.logger.info('GDPopt unable to converge bounds '
                                           'before time limit of {} seconds. '
                                           'Elapsed: {} seconds'.format(
                                               config.time_limit,
                                               get_main_elapsed_time(
                                                   solve_data.timing)))
                        config.logger.info(
                            'Final bound values: LB: {}  UB: {}'.format(
                                solve_data.results.problem.lower_bound,
                                solve_data.results.problem.upper_bound))
                        solve_data.results.solver.timing = solve_data.timing
                        solve_data.results.solver.iterations = total_nodes_counter
                        solve_data.results.solver.termination_condition = tc.maxTimeLimit
                        return solve_data.results

                    # Branch on the disjunct
                    child = incumbent_model.clone()
                    # TODO I am leaving the old branching system in place, but there should be
                    # something better, ideally that deals with nested disjunctions as well.
                    disjunction_to_branch = child.GDPbb_utils.unenforced_disjunctions.pop(
                        0)
                    child_disjunct = disjunction_to_branch.disjuncts[i]
                    child_disjunct.indicator_var.fix(1)
                    # Deactivate (and fix to 0) other disjuncts on the disjunction
                    for disj in disjunction_to_branch.disjuncts:
                        if disj is not child_disjunct:
                            disj.deactivate()
                    # Activate nonlinear constraints on the newly fixed child disjunct
                    newly_activated = ComponentSet()
                    for constr in child_disjunct.component_data_objects(
                            Constraint):
                        if constr in child.GDPbb_utils.deactivated_constraints:
                            newly_activated.add(constr)
                            constr.activate()
                            # Set the big M value for the constraint
                            child.BigM[constr] = 1
                            # Note: we use a default big M value of 1
                            # because all non-selected disjuncts should be deactivated.
                            # Therefore, none of the big M transformed nonlinear constraints will need to be relaxed.
                            # The default M value should therefore be irrelevant.
                    child.GDPbb_utils.deactivated_constraints -= newly_activated
                    child.GDPbb_utils.disjuncts_fixed_True.add(child_disjunct)

                    if disjunct in incumbent_model.GDPbb_utils.disjuncts_fixed_True:
                        # If the disjunct was already branched to True from a parent disjunct branching, just pass
                        # through the incumbent value without resolving. The solution should be the same as the parent.
                        total_nodes_counter += 1
                        ordering_tuple = (obj_sign * incumbent_obj_value,
                                          disjunctions_left - 1,
                                          -total_nodes_counter)
                        heapq.heappush(heap, (ordering_tuple, child, result,
                                              incumbent_var_values))
                        new_nodes_counter += 1
                        continue

                    if config.check_sat and satisfiable(
                            child, config.logger) is False:
                        # Problem is not satisfiable. Skip this disjunct.
                        continue

                    obj_value, result, var_values = self.subproblem_solve(
                        child, config)
                    total_nodes_counter += 1
                    ordering_tuple = (obj_sign * obj_value,
                                      disjunctions_left - 1,
                                      -total_nodes_counter)
                    heapq.heappush(heap,
                                   (ordering_tuple, child, result, var_values))
                    new_nodes_counter += 1

                config.logger.info(
                    "Added %s new nodes with %s relaxed disjunctions to the heap. Size now %s."
                    % (new_nodes_counter, disjunctions_left - 1, len(heap)))
예제 #19
0
    def _postsolve(self):
        results = SolverResults()

        #print 'ANS', dir(self._ans),
        #print self._ans.evals
        #print self._ans.ff
        #print self._ans.rf
        #print self._ans.xf

        solv = results.solver
        solv.name = self.options.subsolver
        #solv.status = self._glpk_get_solver_status()
        #solv.memory_used = "%d bytes, (%d KiB)" % (peak_mem, peak_mem/1024)
        solv.wallclock_time = self._ans.elapsed['solver_time']
        solv.cpu_time = self._ans.elapsed['solver_cputime']

        solv.termination_message = self._ans.msg
        istop = self._ans.istop
        if istop == openopt.kernel.setDefaultIterFuncs.SMALL_DF:
            solv.termination_condition = TerminationCondition.other
            sstatus = SolutionStatus.locallyOptimal

        elif istop == openopt.kernel.setDefaultIterFuncs.SMALL_DELTA_X:
            solv.termination_condition = TerminationCondition.minStepLength
            sstatus = SolutionStatus.stoppedByLimit

        elif istop == openopt.kernel.setDefaultIterFuncs.SMALL_DELTA_F:
            solv.termination_condition = TerminationCondition.other
            sstatus = SolutionStatus.stoppedByLimit

        elif istop == openopt.kernel.setDefaultIterFuncs.FVAL_IS_ENOUGH:
            solv.termination_condition = TerminationCondition.minFunctionValue
            sstatus = SolutionStatus.stoppedByLimit

        elif istop == openopt.kernel.setDefaultIterFuncs.MAX_NON_SUCCESS:
            solv.termination_condition = TerminationCondition.other
            sstatus = SolutionStatus.unsure

        elif istop == openopt.kernel.setDefaultIterFuncs.USER_DEMAND_STOP:
            solv.termination_condition = TerminationCondition.userInterrupt
            sstatus = SolutionStatus.bestSoFar

        elif istop == openopt.kernel.setDefaultIterFuncs.BUTTON_ENOUGH_HAS_BEEN_PRESSED:
            solv.termination_condition = TerminationCondition.userInterrupt
            sstatus = SolutionStatus.bestSoFar

        elif istop == openopt.kernel.setDefaultIterFuncs.SOLVED_WITH_UNIMPLEMENTED_OR_UNKNOWN_REASON:
            solv.termination_condition = TerminationCondition.other
            sstatus = SolutionStatus.unsure

        elif istop == openopt.kernel.setDefaultIterFuncs.UNDEFINED:
            solv.termination_condition = TerminationCondition.unknown
            sstatus = SolutionStatus.unsure

        elif istop == openopt.kernel.setDefaultIterFuncs.IS_NAN_IN_X:
            solv.termination_condition = TerminationCondition.other
            sstatus = SolutionStatus.unknown

        elif istop == openopt.kernel.setDefaultIterFuncs.IS_LINE_SEARCH_FAILED:
            solv.termination_condition = TerminationCondition.other
            sstatus = SolutionStatus.error

        elif istop == openopt.kernel.setDefaultIterFuncs.IS_MAX_ITER_REACHED:
            solv.termination_condition = TerminationCondition.maxIterations
            sstatus = SolutionStatus.stoppedByLimit

        elif istop == openopt.kernel.setDefaultIterFuncs.IS_MAX_CPU_TIME_REACHED:
            solv.termination_condition = TerminationCondition.maxTimeLimit
            sstatus = SolutionStatus.stoppedByLimit

        elif istop == openopt.kernel.setDefaultIterFuncs.IS_MAX_TIME_REACHED:
            solv.termination_condition = TerminationCondition.maxTimeLimit
            sstatus = SolutionStatus.stoppedByLimit

        elif istop == openopt.kernel.setDefaultIterFuncs.IS_MAX_FUN_EVALS_REACHED:
            solv.termination_condition = TerminationCondition.maxEvaluations
            sstatus = SolutionStatus.stoppedByLimit

        elif istop == openopt.kernel.setDefaultIterFuncs.IS_ALL_VARS_FIXED:
            solv.termination_condition = TerminationCondition.other
            sstatus = SolutionStatus.unknown

        elif istop == openopt.kernel.setDefaultIterFuncs.FAILED_TO_OBTAIN_MOVE_DIRECTION:
            solv.termination_condition = TerminationCondition.other
            sstatus = SolutionStatus.error

        elif istop == openopt.kernel.setDefaultIterFuncs.USER_DEMAND_EXIT:
            solv.termination_condition = TerminationCondition.userInterrupt
            sstatus = SolutionStatus.bestSoFar

        elif istop == -100:
            #solv.termination_condition = TerminationCondition.other
            sstatus = SolutionStatus.error

        else:
            raise ApplicationError(
                "Unexpected OpenOpt termination code: '%d'" % istop)

        prob = results.problem
        prob.name = self._instance.name
        prob.number_of_constraints = self._instance.statistics.number_of_constraints
        prob.number_of_variables = self._instance.statistics.number_of_variables
        prob.number_of_binary_variables = self._instance.statistics.number_of_binary_variables
        prob.number_of_integer_variables = self._instance.statistics.number_of_integer_variables
        prob.number_of_continuous_variables = self._instance.statistics.number_of_continuous_variables
        prob.number_of_objectives = self._instance.statistics.number_of_objectives

        from pyomo.core import maximize
        if self._problem.sense == maximize:
            prob.sense = ProblemSense.maximize
        else:
            prob.sense = ProblemSense.minimize

        if not sstatus in (SolutionStatus.error, ):
            soln = Solution()
            soln.status = sstatus

            if type(self._ans.ff) in (list, tuple):
                oval = float(self._ans.ff[0])
            else:
                oval = float(self._ans.ff)
            if self._problem.sense == maximize:
                soln.objective[self._problem._f_name[0]] = {'Value': -oval}
            else:
                soln.objective[self._problem._f_name[0]] = {'Value': oval}

            for var_label in self._ans.xf.keys():
                if self._ans.xf[var_label].is_integer():
                    soln.variable[var_label.name] = {
                        'Value': int(self._ans.xf[var_label])
                    }
                else:
                    soln.variable[var_label.name] = {
                        'Value': float(self._ans.xf[var_label])
                    }

            results.solution.insert(soln)

        self._instance.solutions.add_symbol_map(self._symbol_map)
        self._smap_id = id(self._symbol_map)

        self._instance = None
        self._symbol_map = None
        self._problem = None
        return results
예제 #20
0
def set_up_solve_data(model, config):
    """Set up the solve data.

    Parameters
    ----------
    model : Pyomo model
        The original model to be solved in MindtPy.
    config : ConfigBlock
        The specific configurations for MindtPy.

    Returns
    -------
    solve_data : MindtPySolveData
        Data container that holds solve-instance data.
    """
    solve_data = MindtPySolveData()
    solve_data.results = SolverResults()
    solve_data.timing = Bunch()
    solve_data.curr_int_sol = []
    solve_data.should_terminate = False
    solve_data.integer_list = []

    # if the objective function is a constant, dual bound constraint is not added.
    obj = next(model.component_data_objects(ctype=Objective, active=True))
    if obj.expr.polynomial_degree() == 0:
        config.use_dual_bound = False

    if config.use_fbbt:
        fbbt(model)
        # TODO: logging_level is not logging.INFO here
        config.logger.info('Use the fbbt to tighten the bounds of variables')

    solve_data.original_model = model
    solve_data.working_model = model.clone()

    # Set up iteration counters
    solve_data.nlp_iter = 0
    solve_data.mip_iter = 0
    solve_data.mip_subiter = 0
    solve_data.nlp_infeasible_counter = 0
    if config.init_strategy == 'FP':
        solve_data.fp_iter = 1

    # set up bounds
    if obj.sense == minimize:
        solve_data.primal_bound = float('inf')
        solve_data.dual_bound = float('-inf')
    else:
        solve_data.primal_bound = float('-inf')
        solve_data.dual_bound = float('inf')
    solve_data.primal_bound_progress = [solve_data.primal_bound]
    solve_data.dual_bound_progress = [solve_data.dual_bound]
    solve_data.primal_bound_progress_time = [0]
    solve_data.dual_bound_progress_time = [0]
    solve_data.abs_gap = float('inf')
    solve_data.rel_gap = float('inf')
    solve_data.log_formatter = ' {:>9}   {:>15}   {:>15g}   {:>12g}   {:>12g}   {:>7.2%}   {:>7.2f}'
    solve_data.fixed_nlp_log_formatter = '{:1}{:>9}   {:>15}   {:>15g}   {:>12g}   {:>12g}   {:>7.2%}   {:>7.2f}'
    solve_data.log_note_formatter = ' {:>9}   {:>15}   {:>15}'
    if config.add_regularization is not None:
        if config.add_regularization in {
                'level_L1', 'level_L_infinity', 'grad_lag'
        }:
            solve_data.regularization_mip_type = 'MILP'
        elif config.add_regularization in {
                'level_L2', 'hess_lag', 'hess_only_lag', 'sqp_lag'
        }:
            solve_data.regularization_mip_type = 'MIQP'

    if config.single_tree and (config.add_no_good_cuts
                               or config.use_tabu_list):
        solve_data.stored_bound = {}
    if config.strategy == 'GOA' and (config.add_no_good_cuts
                                     or config.use_tabu_list):
        solve_data.num_no_good_cuts_added = {}

    # Flag indicating whether the solution improved in the past
    # iteration or not
    solve_data.primal_bound_improved = False
    solve_data.dual_bound_improved = False

    if config.nlp_solver == 'ipopt':
        if not hasattr(solve_data.working_model, 'ipopt_zL_out'):
            solve_data.working_model.ipopt_zL_out = Suffix(
                direction=Suffix.IMPORT)
        if not hasattr(solve_data.working_model, 'ipopt_zU_out'):
            solve_data.working_model.ipopt_zU_out = Suffix(
                direction=Suffix.IMPORT)

    if config.quadratic_strategy == 0:
        solve_data.mip_objective_polynomial_degree = {0, 1}
        solve_data.mip_constraint_polynomial_degree = {0, 1}
    elif config.quadratic_strategy == 1:
        solve_data.mip_objective_polynomial_degree = {0, 1, 2}
        solve_data.mip_constraint_polynomial_degree = {0, 1}
    elif config.quadratic_strategy == 2:
        solve_data.mip_objective_polynomial_degree = {0, 1, 2}
        solve_data.mip_constraint_polynomial_degree = {0, 1, 2}

    return solve_data
예제 #21
0
def solve_NLP(nlp_model, solve_data, config):
    """Solve the NLP subproblem."""
    config.logger.info('Solving nonlinear subproblem for '
                       'fixed binaries and logical realizations.')

    # Error checking for unfixed discrete variables
    unfixed_discrete_vars = detect_unfixed_discrete_vars(nlp_model)
    assert len(unfixed_discrete_vars) == 0, \
        "Unfixed discrete variables exist on the NLP subproblem: {0}".format(
        list(v.name for v in unfixed_discrete_vars))

    GDPopt = nlp_model.GDPopt_utils

    initialize_subproblem(nlp_model, solve_data)

    # Callback immediately before solving NLP subproblem
    config.call_before_subproblem_solve(nlp_model, solve_data)

    nlp_solver = SolverFactory(config.nlp_solver)
    if not nlp_solver.available():
        raise RuntimeError("NLP solver %s is not available." %
                           config.nlp_solver)
    with SuppressInfeasibleWarning():
        try:
            results = nlp_solver.solve(nlp_model, **config.nlp_solver_args)
        except ValueError as err:
            if 'Cannot load SolverResults object with bad status: error' in str(
                    err):
                results = SolverResults()
                results.solver.termination_condition = tc.error
                results.solver.message = str(err)
            else:
                raise

    nlp_result = SubproblemResult()
    nlp_result.feasible = True
    nlp_result.var_values = list(v.value for v in GDPopt.variable_list)
    nlp_result.pyomo_results = results
    nlp_result.dual_values = list(
        nlp_model.dual.get(c, None) for c in GDPopt.constraint_list)

    term_cond = results.solver.termination_condition
    if any(term_cond == cond
           for cond in (tc.optimal, tc.locallyOptimal, tc.feasible)):
        pass
    elif term_cond == tc.infeasible:
        config.logger.info('NLP subproblem was infeasible.')
        nlp_result.feasible = False
    elif term_cond == tc.maxIterations:
        # TODO try something else? Reinitialize with different initial
        # value?
        config.logger.info(
            'NLP subproblem failed to converge within iteration limit.')
        if is_feasible(nlp_model, config):
            config.logger.info(
                'NLP solution is still feasible. '
                'Using potentially suboptimal feasible solution.')
        else:
            nlp_result.feasible = False
    elif term_cond == tc.internalSolverError:
        # Possible that IPOPT had a restoration failure
        config.logger.info("NLP solver had an internal failure: %s" %
                           results.solver.message)
        nlp_result.feasible = False
    elif (term_cond == tc.other
          and "Too few degrees of freedom" in str(results.solver.message)):
        # Possible IPOPT degrees of freedom error
        config.logger.info("IPOPT has too few degrees of freedom: %s" %
                           results.solver.message)
        nlp_result.feasible = False
    elif term_cond == tc.other:
        config.logger.info(
            "NLP solver had a termination condition of 'other': %s" %
            results.solver.message)
        nlp_result.feasible = False
    elif term_cond == tc.error:
        config.logger.info(
            "NLP solver had a termination condition of 'error': %s" %
            results.solver.message)
        nlp_result.feasible = False
    elif term_cond == tc.maxTimeLimit:
        config.logger.info(
            "NLP solver ran out of time. Assuming infeasible for now.")
        nlp_result.feasible = False
    else:
        raise ValueError('GDPopt unable to handle NLP subproblem termination '
                         'condition of %s. Results: %s' % (term_cond, results))

    # Call the NLP post-solve callback
    config.call_after_subproblem_solve(nlp_model, solve_data)

    # if feasible, call the NLP post-feasible callback
    if nlp_result.feasible:
        config.call_after_subproblem_feasible(nlp_model, solve_data)

    return nlp_result
예제 #22
0
def solve_fp_subproblem(solve_data, config):
    """
    Solves the feasibility pump NLP

    This function sets up the 'fp_nlp' by relax integer variables.
    precomputes dual values, deactivates trivial constraints, and then solves NLP model.

    Parameters
    ----------
    solve_data: MindtPy Data Container
        data container that holds solve-instance data
    config: ConfigBlock
        contains the specific configurations for the algorithm

    Returns
    -------
    fp_nlp: Pyomo model
        Fixed-NLP from the model
    results: Pyomo results object
        result from solving the Fixed-NLP
    """

    fp_nlp = solve_data.working_model.clone()
    MindtPy = fp_nlp.MindtPy_utils
    config.logger.info('FP-NLP %s: Solve feasibility pump NLP subproblem.'
                       % (solve_data.fp_iter,))

    # Set up NLP
    fp_nlp.MindtPy_utils.objective_list[-1].deactivate()
    if solve_data.objective_sense == minimize:
        fp_nlp.improving_objective_cut = Constraint(
            expr=fp_nlp.MindtPy_utils.objective_value <= solve_data.UB)
    else:
        fp_nlp.improving_objective_cut = Constraint(
            expr=fp_nlp.MindtPy_utils.objective_value >= solve_data.LB)

    # Add norm_constraint, which guarantees the monotonicity of the norm objective value sequence of all iterations
    # Ref: Paper 'A storm of feasibility pumps for nonconvex MINLP'
    # the norm type is consistant with the norm obj of the FP-main problem.
    if config.fp_norm_constraint:
        if config.fp_main_norm == 'L1':
            # TODO: check if we can access the block defined in FP-main problem
            generate_norm1_norm_constraint(
                fp_nlp, solve_data.mip, config, discrete_only=True)
        elif config.fp_main_norm == 'L2':
            fp_nlp.norm_constraint = Constraint(expr=sum((nlp_var - mip_var.value)**2 - config.fp_norm_constraint_coef*(nlp_var.value - mip_var.value)**2
                                                         for nlp_var, mip_var in zip(fp_nlp.MindtPy_utils.discrete_variable_list, solve_data.mip.MindtPy_utils.discrete_variable_list)) <= 0)
        elif config.fp_main_norm == 'L_infinity':
            fp_nlp.norm_constraint = ConstraintList()
            rhs = config.fp_norm_constraint_coef * max(nlp_var.value - mip_var.value for nlp_var, mip_var in zip(
                fp_nlp.MindtPy_utils.discrete_variable_list, solve_data.mip.MindtPy_utils.discrete_variable_list))
            for nlp_var, mip_var in zip(fp_nlp.MindtPy_utils.discrete_variable_list, solve_data.mip.MindtPy_utils.discrete_variable_list):
                fp_nlp.norm_constraint.add(nlp_var - mip_var.value <= rhs)

    MindtPy.fp_nlp_obj = generate_norm2sq_objective_function(
        fp_nlp, solve_data.mip, discrete_only=config.fp_discrete_only)

    MindtPy.cuts.deactivate()
    TransformationFactory('core.relax_integer_vars').apply_to(fp_nlp)
    try:
        TransformationFactory('contrib.deactivate_trivial_constraints').apply_to(
            fp_nlp, tmp=True, ignore_infeasible=False, tolerance=config.constraint_tolerance)
    except ValueError:
        config.logger.warning(
            'infeasibility detected in deactivate_trivial_constraints')
        results = SolverResults()
        results.solver.termination_condition = tc.infeasible
        return fp_nlp, results
    # Solve the NLP
    nlpopt = SolverFactory(config.nlp_solver)
    nlp_args = dict(config.nlp_solver_args)
    set_solver_options(nlpopt, solve_data, config, solver_type='nlp')
    with SuppressInfeasibleWarning():
        with time_code(solve_data.timing, 'fp subproblem'):
            results = nlpopt.solve(
                fp_nlp, tee=config.nlp_solver_tee, **nlp_args)
    return fp_nlp, results
예제 #23
0
def device_scheduler(  # noqa C901
    device_constraints: List[pd.DataFrame],
    ems_constraints: pd.DataFrame,
    commitment_quantities: List[pd.Series],
    commitment_downwards_deviation_price: Union[List[pd.Series], List[float]],
    commitment_upwards_deviation_price: Union[List[pd.Series], List[float]],
) -> Tuple[List[pd.Series], float, SolverResults]:
    """This generic device scheduler is able to handle an EMS with multiple devices,
    with various types of constraints on the EMS level and on the device level,
    and with multiple market commitments on the EMS level.
    A typical example is a house with many devices.
    The commitments are assumed to be with regard to the flow of energy to the device (positive for consumption,
    negative for production). The solver minimises the costs of deviating from the commitments.

    Device constraints are on a device level. Handled constraints (listed by column name):
        max: maximum stock assuming an initial stock of zero (e.g. in MWh or boxes)
        min: minimum stock assuming an initial stock of zero
        equal: exact amount of stock (we do this by clamping min and max)
        derivative max: maximum flow (e.g. in MW or boxes/h)
        derivative min: minimum flow
        derivative equals: exact amount of flow (we do this by clamping derivative min and derivative max)
        derivative down efficiency: ratio of downwards flows (flow into EMS : flow out of device)
        derivative up efficiency: ratio of upwards flows (flow into device : flow out of EMS)
    EMS constraints are on an EMS level. Handled constraints (listed by column name):
        derivative max: maximum flow
        derivative min: minimum flow
    Commitments are on an EMS level. Parameter explanations:
        commitment_quantities: amounts of flow specified in commitments (both previously ordered and newly requested)
            - e.g. in MW or boxes/h
        commitment_downwards_deviation_price: penalty for downwards deviations of the flow
            - e.g. in EUR/MW or EUR/(boxes/h)
            - either a single value (same value for each flow value) or a Series (different value for each flow value)
        commitment_upwards_deviation_price: penalty for upwards deviations of the flow

    All Series and DataFrames should have the same resolution.

    For now, we pass in the various constraints and prices as separate variables, from which we make a MultiIndex
    DataFrame. Later we could pass in a MultiIndex DataFrame directly.
    """

    # If the EMS has no devices, don't bother
    if len(device_constraints) == 0:
        return [], 0, SolverResults()

    # Check if commitments have the same time window and resolution as the constraints
    start = device_constraints[0].index.to_pydatetime()[0]
    resolution = pd.to_timedelta(device_constraints[0].index.freq)
    end = device_constraints[0].index.to_pydatetime()[-1] + resolution
    if len(commitment_quantities) != 0:
        start_c = commitment_quantities[0].index.to_pydatetime()[0]
        resolution_c = pd.to_timedelta(commitment_quantities[0].index.freq)
        end_c = commitment_quantities[0].index.to_pydatetime()[-1] + resolution
        if not (start_c == start and end_c == end):
            raise Exception(
                "Not implemented for different time windows.\n(%s,%s)\n(%s,%s)"
                % (start, end, start_c, end_c))
        if resolution_c != resolution:
            raise Exception(
                "Not implemented for different resolutions.\n%s\n%s" %
                (resolution, resolution_c))

    # Turn prices per commitment into prices per commitment flow
    if len(commitment_downwards_deviation_price) != 0:
        if all(
                isinstance(price, float)
                for price in commitment_downwards_deviation_price):
            commitment_downwards_deviation_price = [
                initialize_series(price, start, end, resolution)
                for price in commitment_downwards_deviation_price
            ]
    if len(commitment_upwards_deviation_price) != 0:
        if all(
                isinstance(price, float)
                for price in commitment_upwards_deviation_price):
            commitment_upwards_deviation_price = [
                initialize_series(price, start, end, resolution)
                for price in commitment_upwards_deviation_price
            ]

    model = ConcreteModel()

    # Add indices for devices (d), datetimes (j) and commitments (c)
    model.d = RangeSet(0, len(device_constraints) - 1, doc="Set of devices")
    model.j = RangeSet(0,
                       len(device_constraints[0].index.to_pydatetime()) - 1,
                       doc="Set of datetimes")
    model.c = RangeSet(0,
                       len(commitment_quantities) - 1,
                       doc="Set of commitments")

    # Add parameters
    def price_down_select(m, c, j):
        return commitment_downwards_deviation_price[c].iloc[j]

    def price_up_select(m, c, j):
        return commitment_upwards_deviation_price[c].iloc[j]

    def commitment_quantity_select(m, c, j):
        return commitment_quantities[c].iloc[j]

    def device_max_select(m, d, j):
        max_v = device_constraints[d]["max"].iloc[j]
        equal_v = device_constraints[d]["equals"].iloc[j]
        if np.isnan(max_v) and np.isnan(equal_v):
            return infinity
        else:
            return np.nanmin([max_v, equal_v])

    def device_min_select(m, d, j):
        min_v = device_constraints[d]["min"].iloc[j]
        equal_v = device_constraints[d]["equals"].iloc[j]
        if np.isnan(min_v) and np.isnan(equal_v):
            return -infinity
        else:
            return np.nanmax([min_v, equal_v])

    def device_derivative_max_select(m, d, j):
        max_v = device_constraints[d]["derivative max"].iloc[j]
        equal_v = device_constraints[d]["derivative equals"].iloc[j]
        if np.isnan(max_v) and np.isnan(equal_v):
            return infinity
        else:
            return np.nanmin([max_v, equal_v])

    def device_derivative_min_select(m, d, j):
        min_v = device_constraints[d]["derivative min"].iloc[j]
        equal_v = device_constraints[d]["derivative equals"].iloc[j]
        if np.isnan(min_v) and np.isnan(equal_v):
            return -infinity
        else:
            return np.nanmax([min_v, equal_v])

    def ems_derivative_max_select(m, j):
        v = ems_constraints["derivative max"].iloc[j]
        if np.isnan(v):
            return infinity
        else:
            return v

    def ems_derivative_min_select(m, j):
        v = ems_constraints["derivative min"].iloc[j]
        if np.isnan(v):
            return -infinity
        else:
            return v

    def device_derivative_down_efficiency(m, d, j):
        try:
            return device_constraints[d]["derivative down efficiency"].iloc[j]
        except KeyError:
            return 1

    def device_derivative_up_efficiency(m, d, j):
        try:
            return device_constraints[d]["derivative up efficiency"].iloc[j]
        except KeyError:
            return 1

    model.up_price = Param(model.c, model.j, initialize=price_up_select)
    model.down_price = Param(model.c, model.j, initialize=price_down_select)
    model.commitment_quantity = Param(model.c,
                                      model.j,
                                      initialize=commitment_quantity_select)
    model.device_max = Param(model.d, model.j, initialize=device_max_select)
    model.device_min = Param(model.d, model.j, initialize=device_min_select)
    model.device_derivative_max = Param(
        model.d, model.j, initialize=device_derivative_max_select)
    model.device_derivative_min = Param(
        model.d, model.j, initialize=device_derivative_min_select)
    model.ems_derivative_max = Param(model.j,
                                     initialize=ems_derivative_max_select)
    model.ems_derivative_min = Param(model.j,
                                     initialize=ems_derivative_min_select)
    model.device_derivative_down_efficiency = Param(
        model.d, model.j, initialize=device_derivative_down_efficiency)
    model.device_derivative_up_efficiency = Param(
        model.d, model.j, initialize=device_derivative_up_efficiency)

    # Add variables
    model.ems_power = Var(model.d, model.j, domain=Reals, initialize=0)
    model.device_power_down = Var(model.d,
                                  model.j,
                                  domain=NonPositiveReals,
                                  initialize=0)
    model.device_power_up = Var(model.d,
                                model.j,
                                domain=NonNegativeReals,
                                initialize=0)
    model.commitment_downwards_deviation = Var(model.c,
                                               model.j,
                                               domain=NonPositiveReals,
                                               initialize=0)
    model.commitment_upwards_deviation = Var(model.c,
                                             model.j,
                                             domain=NonNegativeReals,
                                             initialize=0)

    # Add constraints as a tuple of (lower bound, value, upper bound)
    def device_bounds(m, d, j):
        return (
            m.device_min[d, j],
            sum(m.device_power_down[d, k] + m.device_power_up[d, k]
                for k in range(0, j + 1)),
            m.device_max[d, j],
        )

    def device_derivative_bounds(m, d, j):
        return (
            m.device_derivative_min[d, j],
            m.device_power_down[d, j] + m.device_power_up[d, j],
            m.device_derivative_max[d, j],
        )

    def device_down_derivative_bounds(m, d, j):
        return (
            m.device_derivative_min[d, j],
            m.device_power_down[d, j],
            0,
        )

    def device_up_derivative_bounds(m, d, j):
        return (
            0,
            m.device_power_up[d, j],
            m.device_derivative_max[d, j],
        )

    def ems_derivative_bounds(m, j):
        return m.ems_derivative_min[j], sum(
            m.ems_power[:, j]), m.ems_derivative_max[j]

    def ems_flow_commitment_equalities(m, j):
        """Couple EMS flows (sum over devices) to commitments."""
        return (
            0,
            sum(m.commitment_quantity[:, j]) +
            sum(m.commitment_downwards_deviation[:, j]) +
            sum(m.commitment_upwards_deviation[:, j]) - sum(m.ems_power[:, j]),
            0,
        )

    def device_derivative_equalities(m, d, j):
        """Couple device flows to EMS flows per device, applying efficiencies."""
        return (
            0,
            m.device_power_up[d, j] / m.device_derivative_up_efficiency[d, j] +
            m.device_power_down[d, j] *
            m.device_derivative_down_efficiency[d, j] - m.ems_power[d, j],
            0,
        )

    model.device_energy_bounds = Constraint(model.d,
                                            model.j,
                                            rule=device_bounds)
    model.device_power_bounds = Constraint(model.d,
                                           model.j,
                                           rule=device_derivative_bounds)
    model.device_power_down_bounds = Constraint(
        model.d, model.j, rule=device_down_derivative_bounds)
    model.device_power_up_bounds = Constraint(model.d,
                                              model.j,
                                              rule=device_up_derivative_bounds)
    model.ems_power_bounds = Constraint(model.j, rule=ems_derivative_bounds)
    model.ems_power_commitment_equalities = Constraint(
        model.j, rule=ems_flow_commitment_equalities)
    model.device_power_equalities = Constraint(
        model.d, model.j, rule=device_derivative_equalities)

    # Add objective
    def cost_function(m):
        costs = 0
        for c in m.c:
            for j in m.j:
                costs += m.commitment_downwards_deviation[c,
                                                          j] * m.down_price[c,
                                                                            j]
                costs += m.commitment_upwards_deviation[c, j] * m.up_price[c,
                                                                           j]
        return costs

    model.costs = Objective(rule=cost_function, sense=minimize)

    # Solve
    results = SolverFactory(
        current_app.config.get("FLEXMEASURES_LP_SOLVER")).solve(model)

    planned_costs = value(model.costs)
    planned_power_per_device = []
    for d in model.d:
        planned_device_power = [
            model.device_power_down[d, j].value +
            model.device_power_up[d, j].value for j in model.j
        ]
        planned_power_per_device.append(
            pd.Series(
                index=pd.date_range(start=start,
                                    end=end,
                                    freq=to_offset(resolution),
                                    closed="left"),
                data=planned_device_power,
            ))

    # model.pprint()
    # print(results.solver.termination_condition)
    # print(planned_costs)
    # model.display()
    return planned_power_per_device, planned_costs, results
예제 #24
0
def solve_fp_subproblem(solve_data, config):
    """Solves the feasibility pump NLP subproblem.

    This function sets up the 'fp_nlp' by relax integer variables.
    precomputes dual values, deactivates trivial constraints, and then solves NLP model.

    Parameters
    ----------
    solve_data : MindtPySolveData
        Data container that holds solve-instance data.
    config : ConfigBlock
        The specific configurations for MindtPy.

    Returns
    -------
    fp_nlp : Pyomo model
        Fixed-NLP from the model.
    results : SolverResults
        Results from solving the fixed-NLP subproblem.
    """
    fp_nlp = solve_data.working_model.clone()
    MindtPy = fp_nlp.MindtPy_utils

    # Set up NLP
    fp_nlp.MindtPy_utils.objective_list[-1].deactivate()
    if solve_data.objective_sense == minimize:
        fp_nlp.improving_objective_cut = Constraint(
            expr=sum(fp_nlp.MindtPy_utils.objective_value[:]) <= solve_data.UB)
    else:
        fp_nlp.improving_objective_cut = Constraint(
            expr=sum(fp_nlp.MindtPy_utils.objective_value[:]) >= solve_data.LB)

    # Add norm_constraint, which guarantees the monotonicity of the norm objective value sequence of all iterations
    # Ref: Paper 'A storm of feasibility pumps for nonconvex MINLP'   https://doi.org/10.1007/s10107-012-0608-x
    # the norm type is consistant with the norm obj of the FP-main problem.
    if config.fp_norm_constraint:
        generate_norm_constraint(fp_nlp, solve_data, config)

    MindtPy.fp_nlp_obj = generate_norm2sq_objective_function(
        fp_nlp, solve_data.mip, discrete_only=config.fp_discrete_only)

    MindtPy.cuts.deactivate()
    TransformationFactory('core.relax_integer_vars').apply_to(fp_nlp)
    try:
        TransformationFactory(
            'contrib.deactivate_trivial_constraints').apply_to(
                fp_nlp,
                tmp=True,
                ignore_infeasible=False,
                tolerance=config.constraint_tolerance)
    except ValueError:
        config.logger.warning(
            'infeasibility detected in deactivate_trivial_constraints')
        results = SolverResults()
        results.solver.termination_condition = tc.infeasible
        return fp_nlp, results
    # Solve the NLP
    nlpopt = SolverFactory(config.nlp_solver)
    nlp_args = dict(config.nlp_solver_args)
    set_solver_options(nlpopt, solve_data, config, solver_type='nlp')
    with SuppressInfeasibleWarning():
        with time_code(solve_data.timing, 'fp subproblem'):
            results = nlpopt.solve(fp_nlp,
                                   tee=config.nlp_solver_tee,
                                   **nlp_args)
    return fp_nlp, results
예제 #25
0
파일: mip_solve.py 프로젝트: vova292/pyomo
def solve_linear_GDP(linear_GDP_model, solve_data, config):
    """Solves the linear GDP model and attempts to resolve solution issues."""
    m = linear_GDP_model
    GDPopt = m.GDPopt_utils
    # Transform disjunctions
    _bigm = TransformationFactory('gdp.bigm')
    _bigm.handlers[Port] = False
    _bigm.apply_to(m)

    preprocessing_transformations = [
        # # Propagate variable bounds
        # 'contrib.propagate_eq_var_bounds',
        # # Detect fixed variables
        # 'contrib.detect_fixed_vars',
        # # Propagate fixed variables
        # 'contrib.propagate_fixed_vars',
        # # Remove zero terms in linear expressions
        # 'contrib.remove_zero_terms',
        # # Remove terms in equal to zero summations
        # 'contrib.propagate_zero_sum',
        # # Transform bound constraints
        # 'contrib.constraints_to_var_bounds',
        # # Detect fixed variables
        # 'contrib.detect_fixed_vars',
        # # Remove terms in equal to zero summations
        # 'contrib.propagate_zero_sum',
        # Remove trivial constraints
        'contrib.deactivate_trivial_constraints',
    ]
    if config.mip_presolve:
        try:
            fbbt(m, integer_tol=config.integer_tolerance)
            for xfrm in preprocessing_transformations:
                TransformationFactory(xfrm).apply_to(m)
        except InfeasibleConstraintException:
            config.logger.debug("MIP preprocessing detected infeasibility.")
            mip_result = MasterProblemResult()
            mip_result.feasible = False
            mip_result.var_values = list(v.value for v in GDPopt.variable_list)
            mip_result.pyomo_results = SolverResults()
            mip_result.pyomo_results.solver.termination_condition = tc.error
            mip_result.disjunct_values = list(disj.indicator_var.value
                                              for disj in GDPopt.disjunct_list)
            return mip_result

    # Deactivate extraneous IMPORT/EXPORT suffixes
    getattr(m, 'ipopt_zL_out', _DoNothing()).deactivate()
    getattr(m, 'ipopt_zU_out', _DoNothing()).deactivate()

    # Create solver, check availability
    if not SolverFactory(config.mip_solver).available():
        raise RuntimeError("MIP solver %s is not available." %
                           config.mip_solver)

    # Callback immediately before solving MIP master problem
    config.call_before_master_solve(m, solve_data)

    try:
        with SuppressInfeasibleWarning():
            mip_args = dict(config.mip_solver_args)
            elapsed = get_main_elapsed_time(solve_data.timing)
            remaining = max(config.time_limit - elapsed, 1)
            if config.mip_solver == 'gams':
                mip_args['add_options'] = mip_args.get('add_options', [])
                mip_args['add_options'].append('option reslim=%s;' % remaining)
            elif config.mip_solver == 'multisolve':
                mip_args['time_limit'] = min(
                    mip_args.get('time_limit', float('inf')), remaining)
            results = SolverFactory(config.mip_solver).solve(m, **mip_args)
    except RuntimeError as e:
        if 'GAMS encountered an error during solve.' in str(e):
            config.logger.warning(
                "GAMS encountered an error in solve. Treating as infeasible.")
            mip_result = MasterProblemResult()
            mip_result.feasible = False
            mip_result.var_values = list(v.value for v in GDPopt.variable_list)
            mip_result.pyomo_results = SolverResults()
            mip_result.pyomo_results.solver.termination_condition = tc.error
            mip_result.disjunct_values = list(disj.indicator_var.value
                                              for disj in GDPopt.disjunct_list)
            return mip_result
        else:
            raise
    terminate_cond = results.solver.termination_condition
    if terminate_cond is tc.infeasibleOrUnbounded:
        # Linear solvers will sometimes tell me that it's infeasible or
        # unbounded during presolve, but fails to distinguish. We need to
        # resolve with a solver option flag on.
        results, terminate_cond = distinguish_mip_infeasible_or_unbounded(
            m, config)
    if terminate_cond is tc.unbounded:
        # Solution is unbounded. Add an arbitrary bound to the objective and resolve.
        # This occurs when the objective is nonlinear. The nonlinear objective is moved
        # to the constraints, and deactivated for the linear master problem.
        obj_bound = 1E15
        config.logger.warning(
            'Linear GDP was unbounded. '
            'Resolving with arbitrary bound values of (-{0:.10g}, {0:.10g}) on the objective. '
            'Check your initialization routine.'.format(obj_bound))
        main_objective = next(m.component_data_objects(Objective, active=True))
        GDPopt.objective_bound = Constraint(expr=(-obj_bound,
                                                  main_objective.expr,
                                                  obj_bound))
        with SuppressInfeasibleWarning():
            results = SolverFactory(config.mip_solver).solve(
                m, **config.mip_solver_args)
        terminate_cond = results.solver.termination_condition

    # Build and return results object
    mip_result = MasterProblemResult()
    mip_result.feasible = True
    mip_result.var_values = list(v.value for v in GDPopt.variable_list)
    mip_result.pyomo_results = results
    mip_result.disjunct_values = list(disj.indicator_var.value
                                      for disj in GDPopt.disjunct_list)

    if terminate_cond in {tc.optimal, tc.locallyOptimal, tc.feasible}:
        pass
    elif terminate_cond is tc.infeasible:
        config.logger.info(
            'Linear GDP is now infeasible. '
            'GDPopt has finished exploring feasible discrete configurations.')
        mip_result.feasible = False
    elif terminate_cond is tc.maxTimeLimit:
        # TODO check that status is actually ok and everything is feasible
        config.logger.info(
            'Unable to optimize linear GDP problem within time limit. '
            'Using current solver feasible solution.')
    elif (terminate_cond is tc.other
          and results.solution.status is SolutionStatus.feasible):
        # load the solution and suppress the warning message by setting
        # solver status to ok.
        config.logger.info('Linear GDP solver reported feasible solution, '
                           'but not guaranteed to be optimal.')
    else:
        raise ValueError('GDPopt unable to handle linear GDP '
                         'termination condition '
                         'of %s. Solver message: %s' %
                         (terminate_cond, results.solver.message))

    return mip_result
예제 #26
0
def solve_subproblem(solve_data, config):
    """Solves the Fixed-NLP (with fixed integers).

    This function sets up the 'fixed_nlp' by fixing binaries, sets continuous variables to their intial var values,
    precomputes dual values, deactivates trivial constraints, and then solves NLP model.

    Parameters
    ----------
    solve_data : MindtPySolveData
        Data container that holds solve-instance data.
    config : ConfigBlock
        The specific configurations for MindtPy.

    Returns
    -------
    fixed_nlp : Pyomo model
        Integer-variable-fixed NLP model.
    results : SolverResults
        Results from solving the Fixed-NLP.
    """
    fixed_nlp = solve_data.working_model.clone()
    MindtPy = fixed_nlp.MindtPy_utils
    solve_data.nlp_iter += 1

    # Set up NLP
    TransformationFactory('core.fix_integer_vars').apply_to(fixed_nlp)

    MindtPy.cuts.deactivate()
    if config.calculate_dual:
        fixed_nlp.tmp_duals = ComponentMap()
        # tmp_duals are the value of the dual variables stored before using deactivate trivial contraints
        # The values of the duals are computed as follows: (Complementary Slackness)
        #
        # | constraint | c_geq | status at x1 | tmp_dual (violation) |
        # |------------|-------|--------------|----------------------|
        # | g(x) <= b  | -1    | g(x1) <= b   | 0                    |
        # | g(x) <= b  | -1    | g(x1) > b    | g(x1) - b            |
        # | g(x) >= b  | +1    | g(x1) >= b   | 0                    |
        # | g(x) >= b  | +1    | g(x1) < b    | b - g(x1)            |
        evaluation_error = False
        for c in fixed_nlp.MindtPy_utils.constraint_list:
            # We prefer to include the upper bound as the right hand side since we are
            # considering c by default a (hopefully) convex function, which would make
            # c >= lb a nonconvex inequality which we wouldn't like to add linearizations
            # if we don't have to
            rhs = value(c.upper) if c.has_ub() else value(c.lower)
            c_geq = -1 if c.has_ub() else 1
            try:
                fixed_nlp.tmp_duals[c] = c_geq * max(
                    0, c_geq * (rhs - value(c.body)))
            except (ValueError, OverflowError) as error:
                fixed_nlp.tmp_duals[c] = None
                evaluation_error = True
        if evaluation_error:
            for nlp_var, orig_val in zip(MindtPy.variable_list,
                                         solve_data.initial_var_values):
                if not nlp_var.fixed and not nlp_var.is_binary():
                    nlp_var.set_value(orig_val, skip_validation=True)
    try:
        TransformationFactory(
            'contrib.deactivate_trivial_constraints').apply_to(
                fixed_nlp,
                tmp=True,
                ignore_infeasible=False,
                tolerance=config.constraint_tolerance)
    except InfeasibleConstraintException:
        config.logger.warning(
            'infeasibility detected in deactivate_trivial_constraints')
        results = SolverResults()
        results.solver.termination_condition = tc.infeasible
        return fixed_nlp, results
    # Solve the NLP
    nlpopt = SolverFactory(config.nlp_solver)
    nlp_args = dict(config.nlp_solver_args)
    set_solver_options(nlpopt, solve_data, config, solver_type='nlp')
    with SuppressInfeasibleWarning():
        with time_code(solve_data.timing, 'fixed subproblem'):
            results = nlpopt.solve(fixed_nlp,
                                   tee=config.nlp_solver_tee,
                                   **nlp_args)
    return fixed_nlp, results
예제 #27
0
    def solve(self, model, **kwds):
        """Solve the model.
        Warning: this solver is still in beta. Keyword arguments subject to
        change. Undocumented keyword arguments definitely subject to change.
        Warning: at this point in time, if you try to use PSC or GBD with
        anything other than IPOPT as the NLP solver, bad things will happen.
        This is because the suffixes are not in place to extract dual values
        from the variable bounds for any other solver.
        TODO: fix needed with the GBD implementation.
        Args:
            model (Block): a Pyomo model or block to be solved
        """
        config = self.CONFIG(kwds.pop('options', {}))
        config.set_value(kwds)
        solve_data = MindtPySolveData()
        solve_data.results = SolverResults()
        solve_data.timing = Container()

        solve_data.original_model = model
        solve_data.working_model = model.clone()
        if config.integer_to_binary:
            TransformationFactory('contrib.integer_to_binary'). \
                apply_to(solve_data.working_model)

        new_logging_level = logging.INFO if config.tee else None
        with time_code(solve_data.timing, 'total', is_main_timer=True), \
             lower_logger_level_to(config.logger, new_logging_level), \
             create_utility_block(solve_data.working_model, 'MindtPy_utils', solve_data):
            config.logger.info("---Starting MindtPy---")

            MindtPy = solve_data.working_model.MindtPy_utils
            setup_results_object(solve_data, config)
            process_objective(solve_data, config)

            # Save model initial values.
            solve_data.initial_var_values = list(
                v.value for v in MindtPy.variable_list)

            # Store the initial model state as the best solution found. If we
            # find no better solution, then we will restore from this copy.
            solve_data.best_solution_found = None

            # Record solver name
            solve_data.results.solver.name = 'MindtPy' + str(config.strategy)

            # Validate the model to ensure that MindtPy is able to solve it.
            if not model_is_valid(solve_data, config):
                return

            # Create a model block in which to store the generated feasibility
            # slack constraints. Do not leave the constraints on by default.
            feas = MindtPy.MindtPy_feas = Block()
            feas.deactivate()
            feas.feas_constraints = ConstraintList(
                doc='Feasibility Problem Constraints')

            # Create a model block in which to store the generated linear
            # constraints. Do not leave the constraints on by default.
            lin = MindtPy.MindtPy_linear_cuts = Block()
            lin.deactivate()

            # Integer cuts exclude particular discrete decisions
            lin.integer_cuts = ConstraintList(doc='integer cuts')
            # Feasible integer cuts exclude discrete realizations that have
            # been explored via an NLP subproblem. Depending on model
            # characteristics, the user may wish to revisit NLP subproblems
            # (with a different initialization, for example). Therefore, these
            # cuts are not enabled by default.
            #
            # Note: these cuts will only exclude integer realizations that are
            # not already in the primary integer_cuts ConstraintList.
            lin.feasible_integer_cuts = ConstraintList(
                doc='explored integer cuts')
            lin.feasible_integer_cuts.deactivate()

            # Set up iteration counters
            solve_data.nlp_iter = 0
            solve_data.mip_iter = 0
            solve_data.mip_subiter = 0

            # set up bounds
            solve_data.LB = float('-inf')
            solve_data.UB = float('inf')
            solve_data.LB_progress = [solve_data.LB]
            solve_data.UB_progress = [solve_data.UB]

            # Set of NLP iterations for which cuts were generated
            lin.nlp_iters = Set(dimen=1)

            # Set of MIP iterations for which cuts were generated in ECP
            lin.mip_iters = Set(dimen=1)

            nonlinear_constraints = [
                c for c in MindtPy.constraint_list
                if c.body.polynomial_degree() not in (1, 0)
            ]
            lin.nl_constraint_set = RangeSet(
                len(nonlinear_constraints),
                doc="Integer index set over the nonlinear constraints")
            feas.constraint_set = RangeSet(
                len(MindtPy.constraint_list),
                doc="integer index set over the constraints")

            # # Mapping Constraint -> integer index
            # MindtPy.feas_map = {}
            # # Mapping integer index -> Constraint
            # MindtPy.feas_inverse_map = {}
            # # Generate the two maps. These maps may be helpful for later
            # # interpreting indices on the slack variables or generated cuts.
            # for c, n in zip(MindtPy.constraint_list, feas.constraint_set):
            #     MindtPy.feas_map[c] = n
            #     MindtPy.feas_inverse_map[n] = c

            # Create slack variables for OA cuts
            lin.slack_vars = VarList(bounds=(0, config.max_slack),
                                     initialize=0,
                                     domain=NonNegativeReals)
            # Create slack variables for feasibility problem
            feas.slack_var = Var(feas.constraint_set,
                                 domain=NonNegativeReals,
                                 initialize=1)

            # Flag indicating whether the solution improved in the past
            # iteration or not
            solve_data.solution_improved = False

            if not hasattr(solve_data.working_model, 'ipopt_zL_out'):
                solve_data.working_model.ipopt_zL_out = Suffix(
                    direction=Suffix.IMPORT)
            if not hasattr(solve_data.working_model, 'ipopt_zU_out'):
                solve_data.working_model.ipopt_zU_out = Suffix(
                    direction=Suffix.IMPORT)

            # Initialize the master problem
            with time_code(solve_data.timing, 'initialization'):
                MindtPy_initialize_master(solve_data, config)

            # Algorithm main loop
            with time_code(solve_data.timing, 'main loop'):
                MindtPy_iteration_loop(solve_data, config)

            if solve_data.best_solution_found is not None:
                # Update values in original model
                copy_var_list_values(from_list=solve_data.best_solution_found.
                                     MindtPy_utils.variable_list,
                                     to_list=MindtPy.variable_list,
                                     config=config)
                # MindtPy.objective_value.set_value(
                #     value(solve_data.working_objective_expr, exception=False))
                copy_var_list_values(
                    MindtPy.variable_list,
                    solve_data.original_model.component_data_objects(Var),
                    config)

            solve_data.results.problem.lower_bound = solve_data.LB
            solve_data.results.problem.upper_bound = solve_data.UB

        solve_data.results.solver.timing = solve_data.timing
        solve_data.results.solver.user_time = solve_data.timing.total
        solve_data.results.solver.wallclock_time = solve_data.timing.total

        solve_data.results.solver.iterations = solve_data.mip_iter

        return solve_data.results
예제 #28
0
def solve(instance, solver_path, definition_file):

    sol_file = definition_file.split('.')[0] + '.sol'
    # write the model to a file
    _, smap_id = instance.write(definition_file)

    # Solve the problem
    rc = subprocess.call(solver_path + ' ' + definition_file,shell=True)
    assert rc == 0

    # parse the solution into a result object
    result = SolverResults()
    result._smap_id = smap_id
    
    fin = open(sol_file)

    #parse the first line which contains the objective function value or solution status
    line = fin.readline()
    #line = line.replace('\n','')
    line_split = line.split(' ')
    if len(line_split) > 1:
        if line_split[0] == '=obj=':
            msg = "Optimal objective function value is " + line_split[1]
            objno_message = "OPTIMAL SOLUTION FOUND!"
            result.solver.termination_condition = TerminationCondition.optimal
            result.solver.status = SolverStatus.ok
            soln_status = SolutionStatus.optimal
        else:
            msg = "Solution status is " + line
            objno_message = "Solution status unkown, check output log for details."
            result.solver.termination_condition = TerminationCondition.unknown
            result.solver.status = SolverStatus.unknown
            soln_status = SolutionStatus.unknown
    else:
        msg = "Solution status is " + line
        objno_message = "Solution status unkown, check output log for details."
        result.solver.termination_condition = TerminationCondition.unknown
        result.solver.status = SolverStatus.unknown
        soln_status = SolutionStatus.unknown

    result.solver.message = msg

    #parse the rest of the file with variable values
    x_names = []
    x = []
    line = fin.readline()
    if line.strip() == "":
            line = fin.readline()
    while line:
        #if line[0] == '\n' or (line[0] == '\r' and line[1] == '\n'):
        #    break
        if line[0] == 'x':
            line = line.replace('\n','')
            line_split = line.split(' ')
            x_names.append(line_split[0])
            x.append(float(line_split[1]))      
        line = fin.readline()


    if result.solver.termination_condition in [TerminationCondition.unknown,
                                            TerminationCondition.maxIterations,
                                            TerminationCondition.minFunctionValue,
                                            TerminationCondition.minStepLength,
                                            TerminationCondition.globallyOptimal,
                                            TerminationCondition.locallyOptimal,
                                            TerminationCondition.optimal,
                                            TerminationCondition.maxEvaluations,
                                            TerminationCondition.other,
                                            TerminationCondition.infeasible]:

            soln = result.solution.add()
            result.solution.status = soln_status
            soln.status_description = objno_message
            soln.message = msg.strip()
            soln.message = result.solver.message.replace("\n","; ")
            soln_variable = soln.variable
            i = 1
            for var_name, var_value in zip(x_names,x):
                #soln_variable["x"+str(i)] = {"Value" : var_value}
                soln_variable[var_name] = {"Value" : var_value}
                i = i + 1
    
    return result