Exemple #1
0
import sys

import cplex
from cplex.exceptions import CplexError

alpha_r, alpha_badr, alpha_itself, alpha_faction, alpha_bl, alpha_badbl = (
    0.7, -0.8, 0.4, 0.5, 0.1, -0.5)

# For computational
# efficiency and to avoid erroneous propagation,
# soft constraints associated with reciprocity
# and balance theory are introduced only on pairs
# for which a high-precision classifier assigned polarity.

F = cplex.Cplex()

# get all pos_i,j first
F.objective.set_sense(F.objective.sense.maximize)

######## pair wise weights ########
my_lin = []
my_rhs = []
my_sense = []
names = []
n = 3  # no_of_entities

#ψ_fact = faction inference
for i in range(1, n):
    for j in range(1, n):
        F.variables.add(obj=[alpha_itself],
Exemple #2
0
    nclist.remove(i)
'''Find maximal c choice'''

orig_stdout = sys.stdout
f = open(outfile, 'w')
sys.stdout = f
print('Case' + str(casenumber) +
      'Linear Constraints using python code, no SDP and uptri vars.')
print("Brad", ",", "CRad", ",", "Time", ",", "NumVar", ",", "NumCon")
sys.stdout = orig_stdout
f.close()
'''''' 'Basic Model Setup' ''''''
start = process_time()
'''Model parameters'''
Gaplim = 0.0
prob = cplex.Cplex()
# prob.parameters.timelimit.set(timelim);
# prob.parameters.mip.limits.solutions.set(Solim);
# prob.parameters.mip.tolerances.integrality.set(0);
# prob.parameters.mip.tolerances.mipgap.set(Gaplim);
prob.objective.set_sense(prob.objective.sense.maximize)
'''Model variables'''
newb = b.copy()
m = shape(A)[0]
n = shape(A)[1]
numvars = int(n * (n + 1) / 2.0 + n)
numuptri = numvars - n
constraintrowindex = 0
CIndex = {}
xindices = prob.variables.add(names=[
    "x" + str(i) for i in range(numvars)
Exemple #3
0
#José Eduardo González Barbosa
#Alexis Darien Zuníga Vera
#José Francisco Góngora Rangel

import cplex

# Create an instance of a linear problem to solve
problem = cplex.Cplex()

# We want to find a maximum of our objective function
problem.objective.set_sense(problem.objective.sense.maximize)

# The names of our variables
names = ["x1", "x2"]

# The obective function. More precisely, the coefficients of the objective
# function. Note that we are casting to floats.
objective = [5.0, 4.0]

# Lower bounds. Since these are all zero, we could simply not pass them in as
# all zeroes is the default.
lower_bounds = [0.0, 0.0]

# Upper bounds. The default here would be cplex.infinity, or 1e+20.
upper_bounds = [cplex.infinity, 2]

problem.variables.add(obj=objective,
                      lb=lower_bounds,
                      ub=upper_bounds,
                      names=names)
#types=['I','I'])
    def compile_instance(self,
                         pyomo_instance,
                         symbolic_solver_labels=False,
                         output_fixed_variable_bounds=False,
                         skip_trivial_constraints=False):

        from pyomo.core.base import Var, Constraint, SOSConstraint
        from pyomo.repn import canonical_is_constant, LinearCanonicalRepn, canonical_degree

        self._symbolic_solver_labels = symbolic_solver_labels
        self._output_fixed_variable_bounds = output_fixed_variable_bounds
        self._skip_trivial_constraints = skip_trivial_constraints

        self._has_quadratic_constraints = False
        self._has_quadratic_objective = False
        used_sos_constraints = False

        self._active_cplex_instance = cplex.Cplex()

        if self._symbolic_solver_labels:
            labeler = self._labeler = TextLabeler()
        else:
            labeler = self._labeler = NumericLabeler('x')

        self._symbol_map = SymbolMap()
        self._instance = pyomo_instance
        pyomo_instance.solutions.add_symbol_map(self._symbol_map)
        self._smap_id = id(self._symbol_map)

        # we use this when iterating over the constraints because it
        # will have a much smaller hash table, we also use this for
        # the warm start code after it is cleaned to only contain
        # variables referenced in the constraints
        self._variable_symbol_map = SymbolMap()

        # cplex wants the caller to set the problem type, which is (for
        # current purposes) strictly based on variable type counts.
        num_binary_variables = 0
        num_integer_variables = 0
        num_continuous_variables = 0

        #############################################
        # populate the variables in the cplex model #
        #############################################

        var_names = []
        var_lbs = []
        var_ubs = []
        var_types = []

        self._referenced_variable_ids.clear()

        # maps pyomo var data labels to the corresponding CPLEX variable id.
        self._cplex_variable_ids.clear()

        # cached in the loop below - used to update the symbol map
        # immediately following loop termination.
        var_label_pairs = []

        for var_data in pyomo_instance.component_data_objects(Var, active=True):

            if var_data.fixed and not self._output_fixed_variable_bounds:
                # if a variable is fixed, and we're preprocessing
                # fixed variables (as in not outputting them), there
                # is no need to add them to the compiled model.
                continue

            var_name = self._symbol_map.getSymbol(var_data, labeler)
            var_names.append(var_name)
            var_label_pairs.append((var_data, var_name))

            self._cplex_variable_ids[var_name] = len(self._cplex_variable_ids)

            if (var_data.lb is None) or (var_data.lb == -infinity):
                var_lbs.append(-cplex.infinity)
            else:
                var_lbs.append(value(var_data.lb))

            if (var_data.ub is None) or (var_data.ub == infinity):
                var_ubs.append(cplex.infinity)
            else:
                var_ubs.append(value(var_data.ub))

            if var_data.is_integer():
                var_types.append(self._active_cplex_instance.variables.type.integer)
                num_integer_variables += 1
            elif var_data.is_binary():
                var_types.append(self._active_cplex_instance.variables.type.binary)
                num_binary_variables += 1
            elif var_data.is_continuous():
                var_types.append(self._active_cplex_instance.variables.type.continuous)
                num_continuous_variables += 1
            else:
                raise TypeError("Invalid domain type for variable with name '%s'. "
                                "Variable is not continuous, integer, or binary.")

        self._active_cplex_instance.variables.add(names=var_names,
                                                  lb=var_lbs,
                                                  ub=var_ubs,
                                                  types=var_types)

        self._active_cplex_instance.variables.add(lb=[1],
                                                  ub=[1],
                                                  names=["ONE_VAR_CONSTANT"])

        self._cplex_variable_ids["ONE_VAR_CONSTANT"] = len(self._cplex_variable_ids)

        self._variable_symbol_map.addSymbols(var_label_pairs)
        self._cplex_variable_names = self._active_cplex_instance.variables.get_names()

        ########################################################
        # populate the standard constraints in the cplex model #
        ########################################################

        expressions = []
        senses = []
        rhss = []
        range_values = []
        names = []

        qexpressions = []
        qlinears = []
        qsenses = []
        qrhss = []
        qnames = []

        for block in pyomo_instance.block_data_objects(active=True):

            gen_con_canonical_repn = \
                getattr(block, "_gen_con_canonical_repn", True)
            # Get/Create the ComponentMap for the repn
            if not hasattr(block,'_canonical_repn'):
                block._canonical_repn = ComponentMap()
            block_canonical_repn = block._canonical_repn

            for con in block.component_data_objects(Constraint,
                                                    active=True,
                                                    descend_into=False):

                if (con.lower is None) and \
                   (con.upper is None):
                    continue  # not binding at all, don't bother

                con_repn = None
                if isinstance(con, LinearCanonicalRepn):
                    con_repn = con
                else:
                    if gen_con_canonical_repn:
                        con_repn = generate_canonical_repn(con.body)
                        block_canonical_repn[con] = con_repn
                    else:
                        con_repn = block_canonical_repn[con]

                # There are conditions, e.g., when fixing variables, under which
                # a constraint block might be empty.  Ignore these, for both
                # practical reasons and the fact that the CPLEX LP format
                # requires a variable in the constraint body.  It is also
                # possible that the body of the constraint consists of only a
                # constant, in which case the "variable" of
                if isinstance(con_repn, LinearCanonicalRepn):
                    if (con_repn.linear is None) and \
                       self._skip_trivial_constraints:
                       continue
                else:
                    # we shouldn't come across a constant canonical repn
                    # that is not LinearCanonicalRepn
                    assert not canonical_is_constant(con_repn)

                name = self._symbol_map.getSymbol(con, labeler)
                expr = None
                qexpr = None
                quadratic = False
                if isinstance(con_repn, LinearCanonicalRepn):
                    expr, offset = \
                        self._encode_constraint_body_linear_specialized(con_repn,
                                                                        labeler,
                                                                        use_variable_names=False,
                                                                        cplex_variable_name_index_map=self._cplex_variable_ids)
                else:
                    degree = canonical_degree(con_repn)
                    if degree == 2:
                        quadratic = True
                    elif (degree != 0) or (degree != 1):
                        raise ValueError(
                            "CPLEXPersistent plugin does not support general nonlinear "
                            "constraint expression (only linear or quadratic).\n"
                            "Constraint: %s" % (con.cname(True)))
                    expr, offset = self._encode_constraint_body_linear(con_repn,
                                                                       labeler)

                if quadratic:
                    if expr is None:
                        expr = cplex.SparsePair(ind=[0],val=[0.0])
                    self._has_quadratic_constraints = True

                    qexpr = self._encode_constraint_body_quadratic(con_repn,labeler)
                    qnames.append(name)

                    if con.equality:
                        # equality constraint.
                        qsenses.append('E')
                        qrhss.append(self._get_bound(con.lower) - offset)

                    elif (con.lower is not None) and (con.upper is not None):
                        raise RuntimeError(
                            "The CPLEXDirect plugin can not translate range "
                            "constraints containing quadratic expressions.")

                    elif con.lower is not None:
                        assert con.upper is None
                        qsenses.append('G')
                        qrhss.append(self._get_bound(con.lower) - offset)

                    else:
                        qsenses.append('L')
                        qrhss.append(self._get_bound(con.upper) - offset)

                    qlinears.append(expr)
                    qexpressions.append(qexpr)

                else:
                    names.append(name)
                    expressions.append(expr)

                    if con.equality:
                        # equality constraint.
                        senses.append('E')
                        rhss.append(self._get_bound(con.lower) - offset)
                        range_values.append(0.0)

                    elif (con.lower is not None) and (con.upper is not None):
                        # ranged constraint.
                        senses.append('R')
                        lower_bound = self._get_bound(con.lower) - offset
                        upper_bound = self._get_bound(con.upper) - offset
                        rhss.append(lower_bound)
                        range_values.append(upper_bound - lower_bound)

                    elif con.lower is not None:
                        senses.append('G')
                        rhss.append(self._get_bound(con.lower) - offset)
                        range_values.append(0.0)

                    else:
                        senses.append('L')
                        rhss.append(self._get_bound(con.upper) - offset)
                        range_values.append(0.0)

        ###################################################
        # populate the SOS constraints in the cplex model #
        ###################################################

        # SOS constraints - largely taken from cpxlp.py so updates there,
        # should be applied here
        # TODO: Allow users to specify the variables coefficients for custom
        # branching/set orders - refer to cpxlp.py
        sosn = self._capabilities.sosn
        sos1 = self._capabilities.sos1
        sos2 = self._capabilities.sos2
        modelSOS = ModelSOS()
        for soscondata in pyomo_instance.component_data_objects(SOSConstraint,
                                                                active=True):
            level = soscondata.level
            if (level == 1 and not sos1) or \
               (level == 2 and not sos2) or \
               (level > 2 and not sosn):
                raise Exception("Solver does not support SOS level %s constraints"
                                % (level,))
            modelSOS.count_constraint(self._symbol_map,
                                      labeler,
                                      self._variable_symbol_map,
                                      soscondata)

        if modelSOS.sosType:
            for key in modelSOS.sosType:
                self._active_cplex_instance.SOS.add(type = modelSOS.sosType[key],
                                       name = modelSOS.sosName[key],
                                       SOS = [modelSOS.varnames[key],
                                              modelSOS.weights[key]])
                self._referenced_variable_ids.update(modelSOS.varids[key])
            used_sos_constraints = True

        self._active_cplex_instance.linear_constraints.add(
            lin_expr=expressions,
            senses=senses,
            rhs=rhss,
            range_values=range_values,
            names=names)

        for index in xrange(len(qexpressions)):
            self._active_cplex_instance.quadratic_constraints.add(
                lin_expr=qlinears[index],
                quad_expr=qexpressions[index],
                sense=qsenses[index],
                rhs=qrhss[index],
                name=qnames[index])

        #############################################
        # populate the objective in the cplex model #
        #############################################

        self.compile_objective(pyomo_instance)

        ################################################
        # populate the problem type in the cplex model #
        ################################################

        # This gets rid of the annoying "Freeing MIP data." message.
        def _filter_freeing_mip_data(val):
            if val.strip() == 'Freeing MIP data.':
                return ""
            return val
        self._active_cplex_instance.set_warning_stream(sys.stderr,
                                                       fn=_filter_freeing_mip_data)

        if (self._has_quadratic_objective is True) or \
           (self._has_quadratic_constraints is True):
            if (num_integer_variables > 0) or \
               (num_binary_variables > 0) or \
               (used_sos_constraints):
                if self._has_quadratic_constraints is True:
                    self._active_cplex_instance.set_problem_type(
                        self._active_cplex_instance.problem_type.MIQCP)
                else:
                    self._active_cplex_instance.set_problem_type(
                        self._active_cplex_instance.problem_type.MIQP)
            else:
                if self._has_quadratic_constraints is True:
                    self._active_cplex_instance.set_problem_type(
                        self._active_cplex_instance.problem_type.QCP)
                else:
                    self._active_cplex_instance.set_problem_type(
                        self._active_cplex_instance.problem_type.QP)
        elif (num_integer_variables > 0) or \
             (num_binary_variables > 0) or \
             (used_sos_constraints):
            self._active_cplex_instance.set_problem_type(
                self._active_cplex_instance.problem_type.MILP)
        else:
            self._active_cplex_instance.set_problem_type(
                self._active_cplex_instance.problem_type.LP)

        # restore the warning stream without our filter function
        self._active_cplex_instance.set_warning_stream(sys.stderr)
Exemple #5
0
def optimize_length(cycles, vertices, dirname, coef=[], ilp=True):
    prob = cplex.Cplex()
    prob.set_problem_name("KIDNEY EXCHANGE")
    names = []

    # Set problem type as LP or ILP
    prob.set_problem_type(cplex.Cplex.problem_type.LP)
    obj = coef

    c = {}
    i = 0
    for cycle in cycles:
        names.append("c_%s" % str(cycle))
        i = i + 1

    # Adds variable and related data to problem
    # obj is a list of floats, specifying linear objective coefficient of variables.
    # lb:- lower bound, ub:- upper bound
    # types must be either list of single-character string or a string containing types of variables
    # names is a list of string
    # column may be a list of sparse vector or matrix in a list of list format
    # option = int((input("Choose option\n1.ILP\n2.LP")))

    if ilp:
        prob.variables.add(
            obj=obj,
            names=names,
            lb=[0] * len(names),
            ub=[1] * len(names),
            types=["B"] * len(names),
        )

    elif not ilp:
        prob.variables.add(
            obj=obj,
            names=names,
            lb=[0] * len(names),
            ub=[1] * len(names),
            types=["C"] * len(names),
        )

    constraints = []
    constraint_names = []
    for v in vertices:
        constraint = []
        constraint_names = []
        i = 0
        for cycle in cycles:
            if v in cycle:
                constraint.append(names[i])
            i = i + 1
        if constraint:
            constraint_names.append("v" + str(v))
            # Adds a linear constraint to the problem.
            # lin_expr may either be a list of sparse pair instances, or matrix in a list of a list format.
            # senses must be either a list of single-character string or a string containing the senses of linear constraint. Each entry must be one of ‘G’, ‘L’, ‘E’, ‘R’ ->greater than, less than, equality and ranged constraint.
            # rhs is a list of floats specifying right hand side of each linear constraint.
            # returns an iterator containing indices of added linear constraint
            prob.linear_constraints.add(
                lin_expr=[cplex.SparsePair(constraint, [1] * len(constraint))],
                senses=["L"],
                rhs=[1],
                names=constraint_names,
            )

    prob.objective.set_sense(prob.objective.sense.maximize)
    # dump the lp in file
    prob.write(dirname + "/" + "optimize_length.lp")

    start = prob.get_time()

    # Solving with local cplex
    prob.solve()

    end = prob.get_time()

    print("SUM IS", sum(prob.solution.get_values()))

    print("*****************************************************************")
    print(end - start)
    print("*****************************************************************")

    # return values of all variables from problem.
    return prob.solution.get_values()
def dual_of_Restrited(pathsK_nodes):
    """ Goal : Obtain the dual variables of the restricted master problem
    """
    #   ------------- DUAL OF THE RESTRICTED MASTER PROBLEM -------------

    model = cplex.Cplex()  # Initialize the model
    model.objective.set_sense(model.objective.sense.maximize
                              )  ## Set the objective function to maximization

    #Add variables
    # Correspond to the flow conservation constraints
    for i in range(nCommodities):
        model.variables.add(
            obj=[1],
            lb=[-cplex.infinity],
            ub=[
                cplex.infinity
            ],  #obj contains the coefficients of the decision variables in the objective function
            types=[model.variables.type.continuous],
            names=['Y( K_' + str(i) + ')'])

    # Correspond to the node capacity constraints
    for i in range(nStations):
        model.variables.add(obj=[capacity_node[node[i][2] - 1]],
                            lb=[-cplex.infinity],
                            ub=[0],
                            types=[model.variables.type.continuous],
                            names=['Y( ' + str(node[i][0]) + ')'])

    #Add constraints
    for i in range(nCommodities):
        for j in range(len(pathsK_nodes[i])):
            ind = [
            ]  # Put the indices of the non-zero variables of the current constraint
            val = []  # Put their coefficients here
            row = []
            ind.append(i)
            val.append(1)
            for k in range(nStations):
                if (
                        pathsK_nodes[i][j][k] > 0
                ):  #Check which node is contained in the current paths (do not include starting node)
                    ind.append(nCommodities + k)
                    val.append(commodities[i][2])
            row.append([ind, val])
            model.linear_constraints.add(
                lin_expr=row,
                senses="L",  #Less or equal than constraint
                rhs=[pathsK_nodes[i][j][nStations] * float(commodities[i][2])
                     ])  # Right Hand Side of the constraint

    try:
        print("\n\n-----------RESTRICTED DUAL SOLUTION : -----------\n")
        model.set_results_stream(None)
        model.set_warning_stream(None)
        model.solve()
        model.write('test_dual.lp')
    except CplexSolverError as e:
        print("Exception raised during dual of restricted problem: " + e)

    # Return the dual variables corresponding to the commodities, arcs and nodes constraints
    return model.solution.get_values(
    )[:nCommodities], model.solution.get_values()[nCommodities:]
def pricingProblem(dualCommodities, dualStations):
    """ Return the paths corresponding to the most negative reduced cost of each commodity
    """
    #   ------------- SOLVE THE PRICING PROBLEM-------------

    reducedCost = 0
    forCommodity = [
    ]  # id of the commodity which will receive the new path, if any
    bestPath = []  # Contains the path which will be added (arc_id's and cost)
    for i in range(
            nCommodities
    ):  #Solve the shortest path problem (with updated length) for each commodity
        model = cplex.Cplex()
        model.objective.set_sense(
            model.objective.sense.minimize
        )  ## Set the objective function to minimization

        #Create decision variables (array of nArcs size)
        for j in range(nArcs):
            model.variables.add(obj=[
                commodities[i][2] * (arc[j][2] - dualStations[arc[j][1] - 1])
            ],
                                lb=[0],
                                ub=[1],
                                types=[model.variables.type.integer],
                                names=['alpha ( ' + str(j) + ' )'])

        model.objective.set_offset(
            -dualCommodities[i])  # Check if does what we want

        #Add the consistency constraint ( decision varibales form a path )
        #For each node, check if it is a starting node, ending node or in-between node
        for k in range(nStations):
            ind = []
            val = []
            row = []
            if (commodities[i][0] != k
                    and commodities[i][1] != k):  #If the node is in between
                rhs = [0]
                for j in range(nArcs):
                    if (
                            arc[j][0] - 1 == k
                    ):  # Compute its leaving flow (-1 because in the data we start with index 1 and not 0)
                        ind.append(j)
                        val.append(1)
                    elif (arc[j][1] - 1 == k):  # Minus what comes in
                        ind.append(j)
                        val.append(-1)
            elif (commodities[i][0] == k
                  ):  # If the node is the starting node of the commodity
                rhs = [1]
                for j in range(nArcs):
                    if (arc[j][0] - 1 == k):  # Compute its leaving flow
                        ind.append(j)
                        val.append(1)
                    elif (arc[j][1] - 1 == k):  # Minus what comes in
                        ind.append(j)
                        val.append(-1)
            elif (commodities[i][1] == k
                  ):  # If the node is the starting node of the commodity
                rhs = [-1]
                for j in range(nArcs):
                    if (arc[j][0] - 1 == k):  # Compute its leaving flow
                        ind.append(j)
                        val.append(1)
                    elif (arc[j][1] - 1 == k):  # Minus what comes in
                        ind.append(j)
                        val.append(-1)
            row.append([ind, val])
            model.linear_constraints.add(
                lin_expr=row,
                senses="E",  #Equality constraint
                rhs=rhs)

        try:
            print("\n\n-----------PRICING PROBLEM SOLUTION FOR COMMODITY n°",
                  i, ": -----------\n")
            model.set_results_stream(None)
            model.set_warning_stream(None)
            model.solve()
            model.write('test_princing.lp')
            #Print reduced cost for the current commodity
            print()
            print("\n\tREDUCED COST : ", model.solution.get_objective_value())
            #print("\n\tNEW PATH : ", model.solution.get_values())
        except CplexSolverError as e:
            print("Exception raised during pricing problem: " + e)

        if (
                round(model.solution.get_objective_value()) < 0
        ):  # If we obtained a negative reduced cost, take it, again round to avoid computational mistake
            reducedCost = model.solution.get_objective_value()
            toAdd = model.solution.get_values()
            forCommodity.append(i)

            tempCost = 0
            for i in range(nArcs):
                if (toAdd[i] == 1):
                    tempCost += arc[i][2]
            toAdd.append(
                tempCost
            )  #Put the cost of the path at the bottom of its arcs description

            bestPath.append(toAdd)

    return reducedCost, bestPath, forCommodity
Exemple #8
0
def solve_conflicting_phylogeny(cf_graph,
                                no_muts,
                                pool_size,
                                no_plotted_solutions,
                                time_limit=None,
                                n_max_threads=0):
    """
    Translates given conflict graph into a integer linear program and
    solves the ILP for the minimum number of mutation patterns (set of identical mutation patterns)
    which need to be ignored
    :param cf_graph: Conflict graph: nodes correspond to mutation patterns and edges to their conflicts
    :param no_muts: Number of processed mutations
    :param pool_size: number of best solutions explored by ILP solver to estimate confidence
    :param time_limit: time limit for MILP solver in seconds
    :param n_max_threads: Sets the default maximal number of parallel threads that will be invoked by CPLEX
      (0: default, let CPLEX decide; 1: single threaded; N: uses up to N threads)
      https://www.ibm.com/support/knowledgecenter/en/SS9UKU_12.5.0/com.ibm.cplex.zos.help/Parameters/topics/Threads.html
    :param no_plotted_solutions: number of best solutions from the solution pool that will be plotted
    :return list of top solutions, dictionary with calculated node likelihoods based on likelihood of
            each solution in the solution pool
    """

    logger.info(
        'Build linear program (cplex) for finding the minimal number of conflicting mutations.'
    )

    # the number of columns in the ILP is given by the number of nodes in the conflict graph
    # weighting of the mutation patterns corresponds to the number of mutation which are conflicting
    objective_function = [
        data['weight'] for _, data in cf_graph.nodes(data=True)
    ]

    logger.debug('Objective function: ' + ', '.join(
        '{}: {:.1e}'.format(var_idx, weight)
        for var_idx, weight in enumerate(objective_function, 1)))

    # look at the fourth highest reliability score value to get an impression of the magnitude of these values
    ex_rs = heapq.nlargest(min(5, cf_graph.order()), objective_function)[-1]
    # scale all values to avoid numerical issues with CPLEX
    Solution.SCALING_FACTOR = 1e20 / ex_rs
    scaled_obj_func = [Solution.SCALING_FACTOR * x for x in objective_function]

    # column types
    ctypes = ['B' for _ in range(len(objective_function))]

    # add column names to the ILP
    cnames = []
    ilp_col_mps = []
    for col_idx, node in enumerate(cf_graph.nodes(), 0):
        cnames.append(str(node))
        ilp_col_mps.append(node)

    lp = cp.Cplex()

    # set objective function which is the minimum number of positions with a sequencing error
    lp.objective.set_sense(lp.objective.sense.minimize)
    lp.parameters.threads.set(n_max_threads)
    # see more information at:
    # http://www.ibm.com/support/knowledgecenter/en/SS9UKU_12.4.0/com.ibm.cplex.zos.help/Parameters/topics/SolnPoolGap.html?view=embed
    # lp.parameters.mip.tolerances.absmipgap.set(1e-15)
    # lp.parameters.mip.tolerances.mipgap.set(1e-15)
    # lp.parameters.simplex.tolerances.optimality.set(1e-09)

    # set time limit for MILP solver
    if time_limit is not None:
        lp.parameters.timelimit.set(time_limit)

    lp.variables.add(obj=scaled_obj_func, types=ctypes, names=cnames)

    # add evolutionary constraints
    constraints = []  # LHS (left hand side) of the rows in the ILP
    row_names = []  # names of the rows (constraints)
    for constraint_idx, (source, sink) in enumerate(cf_graph.edges(), 1):
        constraint = [[str(source), str(sink)], [1, 1]]

        constraints.append(constraint)
        row_names.append(str(source) + '-' + str(sink))

        # logger.debug('Add constraint {}: {}'.format(constraint_idx, str(source)+'-'+str(sink)))

    row_rhss = [1 for _ in range(len(constraints))
                ]  # 1 is the RHS in all constraints
    row_senses = ['G' for _ in range(len(constraints))
                  ]  # greater equal is used in all constraints
    lp.linear_constraints.add(lin_expr=constraints,
                              senses=row_senses,
                              rhs=row_rhss,
                              names=row_names)

    logger.debug('Added {} constraints.'.format(len(constraints)))

    # # 3... Node file on disk and compressed
    # lp.parameters.mip.strategy.file.set(3)
    # lp.parameters.workmem.set(2048)

    # ################ explore the solution space by keeping a pool of the best solutions ###############
    # more information at:
    # https://www.ibm.com/support/knowledgecenter/SS9UKU_12.5.0/com.ibm.cplex.zos.help/Parameters/topics/PopulateLim.html
    # and populate.py an example within the CPLEX installation
    if pool_size > 1:
        lp.parameters.mip.limits.populate.set(pool_size)
        # strategy for replacing a solution in the solution pool when the solution pool has reached its capacity
        lp.parameters.mip.pool.replace.set(
            1)  # 1...replace the solution which has the worst objective

        # Controls the trade-off between the number of solutions generated for the solution pool and
        # the amount of time or memory consumed.
        lp.parameters.mip.pool.intensity.set(
            4)  # 4...very aggressive: enumerate all practical solutions
        # lp.parameters.mip.pool.capacity.set(pool_size)

        # set the solution pool relative gap parameter to obtain solutions
        # of objective value within 10% of the optimal
        # lp.parameters.mip.pool.relgap.set(5)
        try:
            lp.populate_solution_pool(
            )  # solve the Integer Linear Program (ILP)
        except cp.exceptions.CplexSolverError as e:
            logger.error("Exception raised during populate")
            raise e
    else:
        lp.solve()

    # assess obtained solutions
    solutions, weighted_node_lh = assess_solutions(lp.solution,
                                                   objective_function,
                                                   cf_graph, ilp_col_mps,
                                                   no_muts, pool_size,
                                                   no_plotted_solutions)

    return solutions, weighted_node_lh
Exemple #9
0
def bootstrapping_solving(cf_graph, mp_weights, idx_to_mp, no_samples):
    """
    Generate and solve MILP of the down-sampled data-set and track the identified
    mutation pattern occurrences
    :param cf_graph: Conflict graph: nodes correspond to mutation patterns and edges to their conflicts
    :param mp_weights: 2-dimensional array with log probability that this variant has this mutation pattern
    :param idx_to_mp: dictionary from mutation patterns to the column ids used in the mp_weights array
    :param no_samples: Number of samples with replacement for the bootstrapping
    :return: observed occurrences of mutation patterns
    """

    # record the chosen patterns in the down-sampled data set
    node_frequencies = Counter()

    logger.debug(
        'Build linear programs (cplex) for the robustness analysis through bootstrapping.'
    )

    # add column names to the ILP
    ilp_col_names = []
    ilp_col_mps = []
    ilp_cols = dict()
    for col_idx, node in enumerate(cf_graph.nodes(), 0):
        ilp_col_names.append(str(node))
        ilp_cols[node] = col_idx
        ilp_col_mps.append(node)

    # column types
    var_types = ['B' for _ in range(len(ilp_col_names))]

    # add evolutionary constraints
    constraints = []  # LHS (left hand side) of the rows in the ILP
    row_names = []  # names of the rows (constraints)
    for constraint_idx, (source, sink) in enumerate(cf_graph.edges(), 1):
        constraint = [[str(source), str(sink)], [1, 1]]
        constraints.append(constraint)
        row_names.append(str(source) + '-' + str(sink))

        # logger.debug('Add constraint {}: {}'.format(constraint_idx, str(source)+'-'+str(sink)))

    row_rhss = [1 for _ in range(len(constraints))
                ]  # 1 is the RHS in all constraints
    row_senses = ['G' for _ in range(len(constraints))
                  ]  # greater equal is used in all constraints
    logger.debug('Generated {} constraints.'.format(len(constraints)))
    logger.info('Do bootstrapping with {} samples.'.format(no_samples))

    m = len(mp_weights)  # number of variants
    for rep in range(no_samples):

        # obtain sample of used variants
        used_muts = np.random.choice(m, m, replace=True)
        # the number of columns in the ILP is given by the number of nodes in the conflict graph
        # weighting of the mutation patterns corresponds to the number of mutation which are conflicting
        objective_function = np.zeros(cf_graph.order())

        # update objective function (mutation pattern scores)
        # decrease objective function values according to the removed patterns
        for used_mut in used_muts:
            for col_idx, log_ml in mp_weights[used_mut].items():
                # add the (negative log probability) part of the reliability score of this mutation in this pattern
                # note we are in log space
                if idx_to_mp[col_idx] in ilp_cols.keys():
                    objective_function[ilp_cols[
                        idx_to_mp[col_idx]]] -= math.log(-math.expm1(log_ml))
                # else: for parsimony-uninformative mutation patterns nothing needs to be done

        # logger.debug('Update objective function: ' + ', '.join(
        #     '{}: {:.3f}'.format(var_idx, weight) for var_idx, weight in enumerate(objective_function, 1)))

        # generate new MILP
        lp = cp.Cplex()
        # lp.set_error_stream(None)
        # lp.set_warning_stream(None)
        lp.set_results_stream(None)
        lp.set_log_stream(None)
        lp.variables.add(obj=objective_function,
                         types=var_types,
                         names=ilp_col_names)
        lp.objective.set_sense(lp.objective.sense.minimize)
        lp.linear_constraints.add(lin_expr=constraints,
                                  senses=row_senses,
                                  rhs=row_rhss,
                                  names=row_names)

        # solve the Integer Linear Program (ILP)
        lp.solve()
        sol = lp.solution  # obtain solution
        # solve_stat = sol.get_status()
        # logger.debug('Solution status: {}'.format(sol.status[solve_stat]))
        #
        # # proportional to incompatible mutations (depending on the weight definition)
        # objective_value = sol.get_objective_value()
        # logger.debug('Minimum vertex cover is of weight (objective value) {:.4f} (original weight: {:4f}).'
        #               .format(objective_value, sum(val for val in objective_function)))

        # column solution values
        # solution_values = sol.get_values()
        # logger.debug('Column solution values: ' + ', '.join(
        #     '{}: {}'.format(var_idx, status) for var_idx, status in enumerate(solution_values, 1)))

        solution_values = sol.get_values()
        for ilp_col_idx, val in enumerate(solution_values):
            if round(val, 5) == 0:
                node_frequencies[ilp_col_mps[ilp_col_idx]] += 1

        if no_samples >= 100 and rep > 0 and rep % (no_samples / 100) == 0:
            logger.debug('{:.0%} of bootstrapping completed.'.format(
                1.0 * rep / no_samples))

    return node_frequencies
Exemple #10
0
    def __init__(self, problem=None, *args, **kwargs):

        super(Model, self).__init__(*args, **kwargs)

        if problem is None:
            self.problem = cplex.Cplex()

        elif isinstance(problem, cplex.Cplex):
            self.problem = problem
            zipped_var_args = zip(
                self.problem.variables.get_names(),
                self.problem.variables.get_lower_bounds(),
                self.problem.variables.get_upper_bounds(),
                # self.problem.variables.get_types(), # TODO uncomment when cplex is fixed
            )
            for name, lb, ub in zipped_var_args:
                var = Variable(name, lb=lb, ub=ub,
                               problem=self)  # Type should also be in there
                super(Model, self)._add_variables([
                    var
                ])  # This avoids adding the variable to the glpk problem
            zipped_constr_args = zip(
                self.problem.linear_constraints.get_names(),
                self.problem.linear_constraints.get_rows(),
                self.problem.linear_constraints.get_senses(),
                self.problem.linear_constraints.get_rhs())
            variables = self._variables
            for name, row, sense, rhs in zipped_constr_args:
                constraint_variables = [variables[i - 1] for i in row.ind]

                # Since constraint expressions are lazily retrieved from the solver they don't have to be built here
                # lhs = _unevaluated_Add(*[val * variables[i - 1] for i, val in zip(row.ind, row.val)])
                lhs = symbolics.Integer(0)
                if sense == 'E':
                    constr = Constraint(lhs,
                                        lb=rhs,
                                        ub=rhs,
                                        name=name,
                                        problem=self)
                elif sense == 'G':
                    constr = Constraint(lhs, lb=rhs, name=name, problem=self)
                elif sense == 'L':
                    constr = Constraint(lhs, ub=rhs, name=name, problem=self)
                elif sense == 'R':
                    range_val = self.problem.linear_constraints.get_range_values(
                        name)
                    if range_val > 0:
                        constr = Constraint(lhs,
                                            lb=rhs,
                                            ub=rhs + range_val,
                                            name=name,
                                            problem=self)
                    else:
                        constr = Constraint(lhs,
                                            lb=rhs + range_val,
                                            ub=rhs,
                                            name=name,
                                            problem=self)
                else:  # pragma: no cover
                    raise Exception(
                        '%s is not a recognized constraint sense.' % sense)

                for variable in constraint_variables:
                    try:
                        self._variables_to_constraints_mapping[
                            variable.name].add(name)
                    except KeyError:
                        self._variables_to_constraints_mapping[
                            variable.name] = set([name])

                super(Model, self)._add_constraints([constr], sloppy=True)
            try:
                objective_name = self.problem.objective.get_name()
            except CplexSolverError as e:
                if 'CPLEX Error  1219:' not in str(e):
                    raise e
            else:
                linear_expression = add([
                    mul(symbolics.Real(coeff), variables[index]) for index,
                    coeff in enumerate(self.problem.objective.get_linear())
                    if coeff != 0.
                ])
                try:
                    quadratic = self.problem.objective.get_quadratic()
                except IndexError:
                    quadratic_expression = Zero
                else:
                    quadratic_expression = self._get_quadratic_expression(
                        quadratic)

                self._objective = Objective(
                    linear_expression + quadratic_expression,
                    problem=self,
                    direction={
                        self.problem.objective.sense.minimize: 'min',
                        self.problem.objective.sense.maximize: 'max'
                    }[self.problem.objective.get_sense()],
                    name=objective_name)
        else:
            raise TypeError("Provided problem is not a valid CPLEX model.")
        self.configuration = Configuration(problem=self, verbosity=0)
Exemple #11
0
 def from_lp(cls, lp_form):
     problem = cplex.Cplex()
     with TemporaryFilename(suffix=".lp", content=lp_form) as tmp_file_name:
         problem.read(tmp_file_name)
     model = cls(problem=problem)
     return model
Exemple #12
0
    'optimal_relaxed_inf': interface.SPECIAL,
    'optimal_relaxed_quad': interface.SPECIAL,
    'optimal_relaxed_sum': interface.SPECIAL,
    'optimal_tolerance': interface.OPTIMAL,
    'populate_solution_limit': interface.SPECIAL,
    'solution_limit': interface.SPECIAL,
    'unbounded': interface.UNBOUNDED,
    'relaxation_unbounded': interface.UNBOUNDED,
    'non-existing-status':
    'Here for testing that missing statuses are handled.'
    # 102: interface.OPTIMAL # The same as cplex.Cplex.solution.status.optimal_tolerance
}

# Check if each status is supported by the current cplex version
_CPLEX_STATUS_TO_STATUS = {}
_solution = cplex.Cplex().solution
for status_name, optlang_status in _STATUS_MAP.items():
    cplex_status = getattr(_solution.status, status_name, None)
    if cplex_status is not None:
        _CPLEX_STATUS_TO_STATUS[cplex_status] = optlang_status

_LP_METHODS = [
    "auto", "primal", "dual", "network", "barrier", "sifting", "concurrent"
]

_SOLUTION_TARGETS = ("auto", "convex", "local", "global")

_QP_METHODS = ("auto", "primal", "dual", "network", "barrier")

_CPLEX_VTYPE_TO_VTYPE = {'C': 'continuous', 'I': 'integer', 'B': 'binary'}
    for j in range(node_size):
        variables_constraint = ["y_{}_{}_{}".format(node_size, i+1,j+1) for i in range(node_size)]
        print(variables_constraint)
        variables_constraint += ["y_{}_{}_{}".format(1,j+1,i+1) for i in range(node_size)]
        constraint_5.append(cplex.SparsePair(ind=variables_constraint, val=[1] * node_size + [-1] * node_size))
    #add constraint #5
    solver.linear_constraints.add(lin_expr=constraint_5, senses=["E"] * len(constraint_5), rhs=[0] * len(constraint_5))
    return solver, variables_y

if __name__ == '__main__':
    _input = pd.read_csv('./input.txt', sep = '\t', index_col = False)
    #calculate distance
    dist_matrix = produce_dist_matrix(_input)
    _input_len = len(_input)
    #initialize solver
    solver = cplex.Cplex()
    #set solver requirements
    solver, variables_y = set_solver(solver, dist_matrix,_input_len)
    solver.solve()
    print("objective value = {}".format(solver.solution.get_objective_value()))
    #find tour sequence
    solution = solver.solution.get_values()
    index_list = np.nonzero(solution)[0]
    pairs = []
    for i in index_list:
        y_chosen=variables_y[i].split('_')
        pair = (int(y_chosen[1]),y_chosen[2])
        pairs.append(pair)
    pairs.sort(key=lambda x: x[0])
    seq = []
    for i in pairs:
Exemple #14
0
    # Input data. If no file is given on the command line then use a
    # default file name. The data read is
    # width  - the width of the the roll,
    # size   - the sie of each strip,
    # amount - the demand for each strip.
    datafile = "../../../examples/data/cutstock.dat"
    if len(sys.argv) < 2:
        print "Default data file : " + datafile
    else:
        datafile = sys.argv[1]
    width, size, amount = read_dat_file(datafile)

    # Setup cutting optimization (master) problem.
    # This is the problem to which columns will be added in the loop
    # below.
    cut = cplex.Cplex()
    cutcons = range(len(amount))  # constraint indices
    cutvars = range(len(size))  # variable indices
    cut.variables.add(obj=[1] * len(cutvars))
    # Add constraints. They have empty left-hand side initially. The
    # left-hand side is filled in the next loop.
    cut.linear_constraints.add(lin_expr=[SparsePair()] * len(cutcons),
                               senses=["G"] * len(cutcons),
                               rhs=amount)
    for v in cutvars:
        cut.linear_constraints.set_coefficients(v, v, int(width / size[v]))

    # Setup pattern generation (worker) problem.
    # The constraints and variables in this problem always stay the same
    # but the objective function will change during the column generation
    # loop.
Exemple #15
0
def generate_lift_and_project_cuts(master_prob, cut_var_indices=None, subproblem_label='lift_and_project_subproblem', save_subproblem_lp=False):        
    var_names = master_prob.variables.get_names()
    var_indices = master_prob.variables.get_indices(var_names)
    constr_names = master_prob.linear_constraints.get_names()
    constr_indices = master_prob.linear_constraints.get_indices(constr_names)
    curr_solution = master_prob.solution
    
    if cut_var_indices is None:
        values = curr_solution.get_values(var_indices)
        # all fractional variables
        cut_var_indices = [var_indices[i] for i, val in enumerate(values) if val-__EPS > 0.0 and val+__EPS < 1.0]
    
    subprob = cplex.Cplex()
    subprob.parameters.lpmethod.set(subprob.parameters.lpmethod.values.primal)
    subprob.objective.set_sense(subprob.objective.sense.maximize)    
    
    # create subproblem vars
    alpha_vars_dict = create_alpha_vars(subprob, master_prob, var_indices, var_names)
    beta_var_name, beta_var_index = create_beta_var(subprob)
    u_vars_dict = create_u_vars(subprob, master_prob, var_indices, var_names, constr_names, constr_indices)
    v_vars_dict = create_v_vars(subprob)
    
    # create subproblem constraints
    coef_constrs_dict = create_cut_coefficients_constraints(subprob, master_prob, alpha_vars_dict, u_vars_dict, v_vars_dict, var_indices)
    create_cut_rhs_constraints(subprob, master_prob, beta_var_index, u_vars_dict, v_vars_dict, var_indices, constr_indices)
    create_normalization_constraint(subprob, master_prob, u_vars_dict, v_vars_dict)    
    
    #random.shuffle(cut_var_indices)
    cuts = []
    print 'Solving subproblems:',
    for iteration, cut_var_index in enumerate(cut_var_indices):
        #if iteration >= 10:
        #    break
        print cut_var_index, 
        subprob.linear_constraints.set_coefficients(coef_constrs_dict[(cut_var_index, 0)], v_vars_dict[0], -1.0)
        subprob.linear_constraints.set_coefficients(coef_constrs_dict[(cut_var_index, 1)], v_vars_dict[1], -1.0)
        
        if save_subproblem_lp:
            subprob.write('output/'+subproblem_label+str(cut_var_index)+'.lp')
        # solve subproblem        
        #subprob = cplex.Cplex(subprob)
        subprob.set_results_stream(None)        
        subprob.solve()
        solution = subprob.solution
        status = solution.get_status()
        obj = solution.get_objective_value()    
        if status == solution.status.optimal and obj > 0.0:                
            # generate cut
            vars = []
            coefs = []
            for var_index in var_indices:
                vars.append(var_index)
                coefs.append(solution.get_values(alpha_vars_dict[var_index]))
            rhs = solution.get_values(beta_var_index)
            cut = {'vars': vars, 'coefs': coefs, 'sense': 'G', 'rhs': rhs, 'violation': obj}
            cuts.append(cut)
            
        subprob.linear_constraints.set_coefficients(coef_constrs_dict[(cut_var_index, 0)], v_vars_dict[0], 0.0)
        subprob.linear_constraints.set_coefficients(coef_constrs_dict[(cut_var_index, 1)], v_vars_dict[1], 0.0)
    print   
    return cuts
Exemple #16
0
def solve_downsampled_nodes(cf_graph, mp_weights, col_ids_mp, no_replications):
    """
    Generate and solve MILP of the down-sampled data-set and track the identified
    mutation pattern occurrences
    :param cf_graph: Conflict graph: nodes correspond to mutation patterns and edges to their conflicts
    :param mp_weights: 2-dimensional array with log probability that this variant has this mutation pattern
    :param col_ids_mp: dictionary from mutation patterns to the column ids used in the mp_weights array
    :param no_replications: Number of replications per used fraction of variants
    :return: observed occurrences of mutation patterns per variant fraction
    """

    # record the chosen patterns in the down-sampled data set
    node_frequencies = defaultdict(Counter)

    logger.debug(
        'Build linear programs (cplex) for the robustness analysis through down-sampling.'
    )

    # the number of columns in the ILP is given by the number of nodes in the conflict graph
    # weighting of the mutation patterns corresponds to the number of mutation which are conflicting
    objective_function = []

    # add column names to the ILP
    var_names = []
    for col_idx, mp in sorted(col_ids_mp.items(), key=lambda k: k[0]):
        var_names.append(str(mp))
        objective_function.append(cf_graph.node[mp]['weight'])

    logger.debug('Objective function: ' + ', '.join(
        '{}: {:.3f}'.format(var_idx, weight)
        for var_idx, weight in enumerate(objective_function, 1)))

    # column types
    var_types = ['B' for _ in range(len(objective_function))]

    # add evolutionary constraints
    constraints = []  # LHS (left hand side) of the rows in the ILP
    row_names = []  # names of the rows (constraints)
    for constraint_idx, (source, sink) in enumerate(cf_graph.edges(), 1):
        constraint = [[str(source), str(sink)], [1, 1]]
        constraints.append(constraint)
        row_names.append(str(source) + '-' + str(sink))

        # logger.debug('Add constraint {}: {}'.format(constraint_idx, str(source)+'-'+str(sink)))

    row_rhss = [1 for _ in range(len(constraints))
                ]  # 1 is the RHS in all constraints
    row_senses = ['G' for _ in range(len(constraints))
                  ]  # greater equal is used in all constraints
    logger.debug('Generated {} constraints.'.format(len(constraints)))

    mut_ids = [i for i in range(len(mp_weights))]
    for removed_fraction in range(90, 0, -10):
        for rep in range(no_replications):

            # obtain sample of used shared mutations
            removed_muts = sample(
                mut_ids, int(round(0.01 * removed_fraction * len(mut_ids))))

            # update objective function (mutation pattern scores)
            # decrease objective function values according to the removed patterns
            for col_idx, mp in sorted(col_ids_mp.items(), key=lambda k: k[0]):
                for removed_mut in removed_muts:
                    # detract the part of the reliability score of this mutation in this pattern
                    # note we are in log space
                    objective_function[col_idx] += math.log(
                        -math.expm1(mp_weights[removed_mut][col_idx]))

            # logger.debug('Update objective function: ' + ', '.join(
            #     '{}: {:.3f}'.format(var_idx, weight) for var_idx, weight in enumerate(objective_function, 1)))

            # generate new MILP
            lp = cp.Cplex()
            # lp.set_error_stream(None)
            # lp.set_warning_stream(None)
            lp.set_results_stream(None)
            lp.set_log_stream(None)
            lp.variables.add(obj=objective_function,
                             types=var_types,
                             names=var_names)
            lp.objective.set_sense(lp.objective.sense.minimize)
            lp.linear_constraints.add(lin_expr=constraints,
                                      senses=row_senses,
                                      rhs=row_rhss,
                                      names=row_names)

            # solve the Integer Linear Program (ILP)
            lp.solve()
            sol = lp.solution  # obtain solution
            # solve_stat = sol.get_status()
            # logger.debug('Solution status: {}'.format(sol.status[solve_stat]))

            # proportional to incompatible mutations (depending on the weight definition)
            # objective_value = sol.get_objective_value()
            # logger.debug('Minimum vertex cover is of weight (objective value) {:.4f} (original weight: {:4f}).'
            #               .format(objective_value, sum(val for val in objective_function)))

            # column solution values
            # solution_values = sol.get_values()
            # logger.debug('Column solution values: ' + ', '.join(
            #     '{}: {}'.format(var_idx, status) for var_idx, status in enumerate(solution_values, 1)))

            solution_values = sol.get_values()
            for col_idx, val in enumerate(solution_values):
                if round(val, 5) == 0:
                    node_frequencies[100 - removed_fraction][
                        col_ids_mp[col_idx]] += 1

            # increase objective function values according to the removed values to its original value
            for col_idx, mp in sorted(col_ids_mp.items(), key=lambda k: k[0]):
                for removed_mut in removed_muts:
                    # detract the part of the reliability score of this mutation in this pattern
                    # note we are in log space
                    objective_function[col_idx] -= math.log(
                        -math.expm1(mp_weights[removed_mut][col_idx]))

        logger.debug(
            'Finished sampling and solving {:.0%} used fraction of variants.'.
            format(0.01 * removed_fraction))

    return node_frequencies
Exemple #17
0
def cutstock(datafile):
    # Input data. If no file is given on the command line then use a
    # default file name. The data read is
    # width  - the width of the the roll,
    # size   - the sie of each strip,
    # amount - the demand for each strip.
    width, size, amount = read_dat_file(datafile)

    # Setup cutting optimization (master) problem.
    # This is the problem to which columns will be added in the loop
    # below.
    cut = cplex.Cplex()
    cutcons = list(range(len(amount)))  # constraint indices
    cutvars = list(range(len(size)))  # variable indices
    cut.variables.add(obj=[1] * len(cutvars))
    # Add constraints. They have empty left-hand side initially. The
    # left-hand side is filled in the next loop.
    cut.linear_constraints.add(lin_expr=[SparsePair()] * len(cutcons),
                               senses=["G"] * len(cutcons),
                               rhs=amount)
    for v in cutvars:
        cut.linear_constraints.set_coefficients(v, v, int(width / size[v]))

    # Setup pattern generation (worker) problem.
    # The constraints and variables in this problem always stay the same
    # but the objective function will change during the column generation
    # loop.
    pat = cplex.Cplex()
    use = list(range(len(size)))  # variable indices
    pat.variables.add(types=[pat.variables.type.integer] * len(use))
    # Add a constant 1 to the objective.
    pat.variables.add(obj=[1], lb=[1], ub=[1])
    # Single constraint: total size must not exceed the width.
    totalsize = SparsePair(ind=use, val=size)
    pat.linear_constraints.add(lin_expr=[totalsize], senses=["L"], rhs=[width])
    pat.objective.set_sense(pat.objective.sense.minimize)

    # Column generation procedure
    while True:

        # Optimize over current patterns
        cut.solve()
        report1(cut)

        # Find and add new pattern. The objective function of the
        # worker problem is constructed from the dual values of the
        # constraints of the master problem.
        price = [-d for d in cut.solution.get_dual_values(cutcons)]
        pat.objective.set_linear(list(zip(use, price)))
        pat.solve()
        report2(pat, use)

        # If reduced cost (worker problem objective function value) is
        # non-negative we are optimal. Otherwise we found a new column
        # to be added. Coefficients of the new column are given by the
        # optimal solution vector to the worker problem.
        if pat.solution.get_objective_value() > -RC_EPS:
            break
        newpat = pat.solution.get_values(use)

        # The new pattern constitutes a new variable in the cutting
        # optimization problem. Create that variable and add it to all
        # constraints with the coefficients read from the optimal solution
        # of the pattern generation problem.
        idx = cut.variables.get_num()
        cut.variables.add(obj=[1.0])
        cut.linear_constraints.set_coefficients(
            list(zip(cutcons, [idx] * len(use), newpat)))
        cutvars.append(idx)

    # Perform a final solve on the cutting optimization problem.
    # Turn all variables into integers before doing that.
    cut.variables.set_types(
        list(zip(cutvars, [cut.variables.type.integer] * len(cutvars))))
    cut.solve()
    report3(cut)
    print("Solution status = ", cut.solution.get_status())
Exemple #18
0
def solve_downsampled_binary_nodes(cf_graph, mut_pattern_scores,
                                   shared_mutations, no_replications,
                                   no_samples):
    """
    Generate and solve MILP of the down-sampled data-set and track the identified
    mutation pattern occurrences when each variant has exactly one mutation pattern
    :param cf_graph: Conflict graph: nodes correspond to mutation patterns and edges to their conflicts
    :param mut_pattern_scores: Mutation pattern score of each mutation (key: mut_idx)
    :param shared_mutations: List of the shared (parsimony-informative) mutations (mut_idx)
    :param no_replications: Number of replications per used fraction of variants
    :param no_samples: number of samples
    :return: observed occurrences of mutation patterns per variant fraction
    """

    # record the chosen patterns in the down-sampled data set
    node_frequencies = defaultdict(Counter)

    logger.debug(
        'Build linear programs (cplex) for the robustness analysis through down-sampling.'
    )

    # the number of columns in the ILP is given by the number of nodes in the conflict graph
    # weighting of the mutation patterns corresponds to the number of mutation which are conflicting
    objective_function = []

    # build index from mutations to patterns
    mutations = dict()

    # add column names to the ILP
    var_names = []
    node_indices = dict(
    )  # map nodes (mutation patterns) to column id in the ILP
    for col_idx, (node, data) in enumerate(cf_graph.nodes(data=True), 0):

        var_names.append(str(node))
        objective_function.append(data['weight'])
        # nodes are given by a frozenset of samples (mutation patterns)
        node_indices[node] = col_idx
        for mut_idx in data['muts']:
            mutations[mut_idx] = node

    logger.debug('Objective function: ' + ', '.join(
        '{}: {:.3f}'.format(var_idx, weight)
        for var_idx, weight in enumerate(objective_function, 1)))

    # column types
    var_types = ['B' for _ in range(len(objective_function))]

    # add evolutionary constraints
    constraints = []  # LHS (left hand side) of the rows in the ILP
    row_names = []  # names of the rows (constraints)
    for constraint_idx, (source, sink) in enumerate(cf_graph.edges(), 1):
        constraint = [[str(source), str(sink)], [1, 1]]
        constraints.append(constraint)
        row_names.append(str(source) + '-' + str(sink))

        # logger.debug('Add constraint {}: {}'.format(constraint_idx, str(source)+'-'+str(sink)))

    row_rhss = [1 for _ in range(len(constraints))
                ]  # 1 is the RHS in all constraints
    row_senses = ['G' for _ in range(len(constraints))
                  ]  # greater equal is used in all constraints
    logger.debug('Generated {} constraints.'.format(len(constraints)))

    for removed_fraction in range(95, 0, -5):
        for rep in range(no_replications):

            # obtain sample of used shared mutations
            removed_muts = sample(
                shared_mutations,
                int(round(0.01 * removed_fraction * len(shared_mutations))))

            # update objective function (mutation pattern scores)
            # decrease objective function values according to the removed patterns
            updated_nodes = set()
            for removed_mut in removed_muts:
                updated_nodes.add(mutations[removed_mut])
                objective_function[node_indices[
                    mutations[removed_mut]]] -= mut_pattern_scores[removed_mut]

            # logger.debug('Update objective function: ' + ', '.join(
            #     '{}: {:.3f}'.format(var_idx, weight) for var_idx, weight in enumerate(objective_function, 1)))

            # generate new MILP
            lp = cp.Cplex()
            # lp.set_error_stream(None)
            # lp.set_warning_stream(None)
            lp.set_results_stream(None)
            lp.set_log_stream(None)
            lp.variables.add(obj=objective_function,
                             types=var_types,
                             names=var_names)
            lp.objective.set_sense(lp.objective.sense.minimize)
            lp.linear_constraints.add(lin_expr=constraints,
                                      senses=row_senses,
                                      rhs=row_rhss,
                                      names=row_names)

            # solve the Integer Linear Program (ILP)
            lp.solve()
            sol = lp.solution  # obtain solution
            # solve_stat = sol.get_status()
            # logger.debug('Solution status: {}'.format(sol.status[solve_stat]))

            # proportional to incompatible mutations (depending on the weight definition)
            # objective_value = sol.get_objective_value()
            # logger.debug('Minimum vertex cover is of weight (objective value) {:.4f} (original weight: {:4f}).'
            #               .format(objective_value, sum(val for val in objective_function)))

            # column solution values
            # solution_values = sol.get_values()
            # logger.debug('Column solution values: ' + ', '.join(
            #     '{}: {}'.format(var_idx, status) for var_idx, status in enumerate(solution_values, 1)))

            for node in cf_graph.nodes():
                if round(sol.get_values(str(node)),
                         5) == 0 and 1 < len(node) < no_samples:
                    node_frequencies[100 - removed_fraction][node] += 1

            # increase objective function values again to the initial values
            for removed_mut in removed_muts:
                objective_function[node_indices[
                    mutations[removed_mut]]] += mut_pattern_scores[removed_mut]

        logger.debug(
            'Finished sampling and solving {:.0%} used fraction of variants.'.
            format(0.01 * removed_fraction))

    return node_frequencies
def restricted_Master(pathsK_nodes):

    #   ------------- SOLVE THE RESTRICTED MASTER PROBLEM -------------

    model = cplex.Cplex()  # Initialize the model
    model.objective.set_sense(model.objective.sense.minimize
                              )  ## Set the objective function to minimization

    #Create decision variables
    for i in range(nCommodities):
        for j in range(len(pathsK_nodes[i])):
            model.variables.add(
                obj=[pathsK_nodes[i][j][nStations] * float(commodities[i][2])],
                lb=[0],
                ub=[
                    1
                ],  #obj contains the coefficients of the decision variables in the objective function                               
                types=[model.variables.type.continuous],
                names=['P(' + str(i) + ',' + str(j) + ')'])

    #Add constraints
    #Flow conservation constraints :
    count = 0  # To iterate over the indices of the decision variables
    for i in range(nCommodities):
        ind = [
        ]  # Put the indices of the non-zero variables of the current constraint
        val = []  # Put their coefficients here
        row = []
        for j in range(len(pathsK_nodes[i])):
            ind.append(count)
            val.append(1)
            count += 1
        row.append([ind, val])
        model.linear_constraints.add(
            lin_expr=row,
            senses="E",  #Equality constraint
            rhs=[1])  # Right Hand Side of the constraint

    #Node capacity constraints :
    for i in range(nStations):
        ind, val, row = [], [], []
        count = 0
        for j in range(
                nCommodities
        ):  #For each commodity path, check each time a path contains the node (do not include starting node)
            for k in range(len(pathsK_nodes[j])):
                if (
                        pathsK_nodes[j][k][i] > 0
                ):  # If it is the case, add the decision variable index to the constraint
                    ind.append(count)
                    val.append(commodities[j][2])  # With its coefficiant
                count += 1
        row.append([ind, val])
        model.linear_constraints.add(
            lin_expr=row,
            senses="L",  # Less-than
            rhs=[capacity_node[node[i][2] - 1]
                 ])  #Capacity of the node (-1 because type_id start at 1)

    try:
        print("\n\n-----------RESTRICTED MASTER SOLUTION : -----------\n")
        model.set_results_stream(None)
        model.set_warning_stream(None)
        model.solve()
        model.write('test_restricted.lp')
        print("\n")
        print("Solution primal : ", model.solution.get_values())

        #Print the solution
        count = 0
        for i in range(nCommodities):
            for j in range(len(pathsK_nodes[i])):
                indices = [
                    k for k, x in enumerate(pathsK_nodes[i][j])
                    if (x > 0 and k < nStations)
                ]  #Get the indices of the nodes contained in the current path
                print(
                    "\t",
                    model.solution.get_values(count) * commodities[i][2],
                    "units of commodity n°", i + 1, "on path",
                    node[commodities[i][0]][0],
                    '' + ' '.join([node[k][0] for k in indices]) +
                    ". Length path : " + str(pathsK_nodes[i][j][nStations]))
                count += 1

        print("\nTotal cost = " + str(model.solution.get_objective_value()))

        dualCommodities, dualStations = dual_of_Restrited(
            pathsK_nodes)  # Compute the dual variables and print them

        print()
        for i in range(len(dualCommodities)):
            if (dualCommodities[i] != 0):
                print("Dual values y_K" + str(i + 1) + " = " +
                      str(dualCommodities[i]))
        for i in range(len(dualStations)):
            if (dualStations[i] != 0):
                print("Dual values y_Node" + str(i + 1) + " = " +
                      str(dualStations[i]))

    except CplexSolverError as e:
        print("Exception raised during restricted master problem: ", e)
        return [
            -1
        ] * 4  # return -1 to indicate the infeasibility of restricted master problem

    return model.solution.get_objective_value(), model.solution.get_values(
    ), dualCommodities, dualStations
Exemple #20
0
def run_MIPLIB(problems = ['enlight9'], 
  rowlengths = [2,3], 
  nTrials = 2, 
  prefix = './MIPLIB/', 
  postfix = '.mps', 
  nCuts = 100,
  n_badrow = [1, 2],
  runGX = False,
  runX = False,
  verbose = 0,
  scratch = './',
  saveDict = False):
    """
    run_MIPLIB(problems = ['enlight9'], rowlengths = [2,3], nTrials = 2, prefix = './MIPLIB/', postfix = '.mps', nCuts=100, n_badrow = 1, runGX = False, runX = False, verbose=0)
    problems = set of MIPLIB problem names to run.
    prefix/postfix = Any relative path and extension for file names
    rowlengths: list containing number of row cuts to be iterated on
    nTrials: number of iterations on cut of each row length
    nCuts: number of Cuts
    n_badrow: number of "bad rows" to be picked in each cut
    runGX: whether or not to run GX cuts
    runX: Whether or not to run X cuts
    saveDict: SHould we save a dictionary with the results?  
    Returns GXGvals/GXvals, both of shape
    (len(problems), len(rowlengths), nTrials)
    containing the objective value obtained in each.
    """
    Trials = range(nTrials)  
    LPvals = np.zeros((len(problems),))
    GMIvals = np.zeros((len(problems),))
    AllCutSol = dict()    
    # Do this for each problem in under consideration
    for filename in problems:    
        if verbose > 0:
            print("Running: "+str(filename))
        cutValues = dict()
        # Reading the original MIPLIB problem
        C_org = cplex.Cplex()
        C_org.read(prefix+filename+postfix)
        int_var_org = np.array([0 if i=='C' else 1 for i in C_org.variables.get_types()])
        # Converting it into standard form
        C = Cplex2StdCplex(prefix+filename+postfix, MIP  = True, verbose = verbose-2)
        cont_var = np.array([1 if i=='C' else 0 for i in C.variables.get_types()])
        int_var = 1-cont_var
        C.set_problem_type(C.problem_type.LP)
        C.write(prefix+filename+'_std'+postfix)
        # Solving the LP relaxation of the standard form and getting solve information
        LPSolution = getfromCPLEX(C, verbose=verbose-2, ForceSolve=True, tableaux=False)
        x_B = -LPSolution["Sol_Basic"]
        bad_rows = intRows(x_B,int_var[LPSolution["Basic"]].astype(int))
        if verbose > 1:
            print(LPSolution["Objective"])
            print("ORIGINAL PROBLEM\n******************")
            print("nVar: "+str(C_org.variables.get_num())+ 
                "\n nCons: "+str(C_org.linear_constraints.get_num()) +
                "\n IntCon: "+str(np.sum(int_var_org)))
            print("\nSTANDARD PROBLEM\n*****************")
            print("nVar: "+str(C.variables.get_num())+ 
                "\n nCons: "+str(C.linear_constraints.get_num()) +
                "\n IntCon: "+ str (np.sum(int_var) ))
            print("OTHERS\n******")
            print("LP Objective: ", LPSolution["Objective"])
            print("# Integer constraints not satified in LP relaxation:", np.where(bad_rows)[0].shape[0])
        cutValues["LP"] = LPSolution["Objective"]
        cutValues["badrow"] = np.where(bad_rows)[0].shape[0]
        # Dealing with LP relaxation complete
        # Adding GMI cuts
        (A_GMI, b_GMI) = GMI(
                            LPSolution["Tableaux_NB"].todense().A, 
                            LPSolution["Sol_Basic"], 
                            bad_rows, 
                            cont_var[LPSolution["NonBasic"]].astype(int)
                            )
        C_GMI = addCuts2Cplex(filename = prefix+filename+'_std'+postfix,
                            NB = LPSolution["NonBasic"],
                            A_cut = A_GMI,
                            b_cut = b_GMI, scratch = scratch)
        GMIans = getfromCPLEX(C_GMI, tableaux = False, basic = False, TablNB = False)
        if verbose > 1:
            print('GMI:', GMIans["Objective"])
        cutValues["GMI"] = GMIans["Objective"]
        # GMI complete
        # Adding Crosspolytope based cuts
        # Looping among all rowlengths required
        for nRows in rowlengths:
            if verbose > 0.5:
                print("***" + str(nRows)+" row cuts Started ***")
            # Initialize GXGvals and GXvals if GX cuts are run            
            if runGX:
                GXGvals = np.zeros((len(n_badrow), len(Trials)))
                GXvals = np.zeros((len(n_badrow), len(Trials)))
            # Initialize XGvals and Xvals if X cuts are run
            if runX:
                XGvals = np.zeros((len(Trials),))
                Xvals = np.zeros((len(Trials),))
            # Looping over number of trials needed
            for Trial in Trials:
                # If GX cuts have to be done, then the following
                if runGX:
                    # In GX cuts, there is an option of choosing number of bad rows. Looping over all reqd values
                    for badrow_ct in n_badrow:
                        ans = Rows4Xcut(x_B, nRows, nCuts, int_var[LPSolution["Basic"]], badrow_ct)
                        if ans is None: # Problem occurred in X cut parameter generation. This can happen if there are insufficient badrows
                            print(nRows,'row GX cut in Problem: ', filename, "not possible", sep = " ")
                            GXvals[n_badrow.index(badrow_ct), Trial] = None
                            GXGvals[n_badrow.index(badrow_ct), Trial] = None
                        else:
                            # Calculating GX cuts
                            (A_GX, b_GX) = GXLift(-LPSolution["Tableaux_NB"], 
                                                -LPSolution["Sol_Basic"],
                                                ans["RowMat"],
                                                ans["muMat"],
                                                ans["fMat"],
                                                cont_var[LPSolution["NonBasic"]].astype(int),
                                                sparse = True,
                                                verbose = verbose-2
                                                )
                            # creating GX model
                            C_GX = addCuts2Cplex(filename = prefix+filename+'_std'+postfix,
                                                NB = LPSolution["NonBasic"],
                                                A_cut = A_GX,
                                                b_cut = b_GX, scratch = scratch)
                            # creating GXG model
                            C_GXG = addCuts2Cplex(filename = prefix+filename+'_std'+postfix,
                                                NB = LPSolution["NonBasic"],
                                                A_cut = np.concatenate((A_GX , A_GMI),axis=0),
                                                b_cut = np.concatenate((b_GX,  b_GMI),axis=0), scratch = scratch)
                            # Solving the models with cuts
                            GXans = getfromCPLEX(C_GX, tableaux = False, basic = False, TablNB = False)
                            GXGans = getfromCPLEX(C_GXG, tableaux = False, basic = False, TablNB = False)
                            # Printing and storing the results
                            if verbose > 1:
                                print(nRows,'row cut GX in Problem: ', filename, 'with badrow count: ', badrow_ct, '. Improvement: ', GXans["Objective"], GXGans["Objective"],sep = " ")
                            GXvals[n_badrow.index(badrow_ct), Trial] = GXans["Objective"]
                            GXGvals[n_badrow.index(badrow_ct), Trial] = GXGans["Objective"]
                # If X cuts have to be run
                if runX:
                    # Note that there is no looping over number of badrow selection. Number of badrow = number of rows here, necessarily.
                    ans = Rows4Xcut(x_B, nRows, nCuts, int_var[LPSolution["Basic"]], nRows)
                    if ans is None: # Problem occurred in X cut parameter generation. This can happen if there are insufficient badrows
                        print(nRows,'row X cut in Problem: ', filename, "not possible", sep = " ")
                        Xvals[Trial] = None
                        XGvals[Trial] = None
                    else:
                        # Calculating the X cuts
                        (A_X, b_X) = XLift(-LPSolution["Tableaux_NB"], 
                                            -LPSolution["Sol_Basic"],
                                            ans["RowMat"],
                                            ans["muMat"],
                                            cont_var[LPSolution["NonBasic"]].astype(int),
                                            sparse = True,
                                            verbose = verbose-2
                                            )
                        # Creating the X model
                        C_X = addCuts2Cplex(filename = prefix+filename+'_std'+postfix,
                                        NB = LPSolution["NonBasic"],
                                        A_cut = A_X,
                                        b_cut = b_X, scratch = scratch)
                        # Creating the XG model
                        C_XG = addCuts2Cplex(filename = prefix+filename+'_std'+postfix,
                                            NB = LPSolution["NonBasic"],
                                            A_cut = np.concatenate((A_X , A_GMI),axis=0),
                                            b_cut = np.concatenate((b_X,  b_GMI),axis=0), scratch = scratch)
                        # Solving the models with cuts
                        Xans = getfromCPLEX(C_X, tableaux = False, basic = False, TablNB = False)
                        XGans = getfromCPLEX(C_XG, tableaux = False, basic = False, TablNB = False)
                        # Printing and storing the results
                        if verbose > 1:
                            print(nRows,'row X cut in Problem: ', filename, Xans["Objective"], XGans["Objective"],sep = " ")
                        Xvals[Trial] = Xans["Objective"]
                        XGvals[Trial] = XGans["Objective"]
            if runGX or runX:
                cutValues[str(nRows)] = dict()
            if runGX:
                cutValues[str(nRows)]["GX"] = GXvals.tolist()
                cutValues[str(nRows)]["GXG"] = GXGvals.tolist()
            if runX:
                cutValues[str(nRows)]["X"] = Xvals.tolist()
                cutValues[str(nRows)]["XG"] = XGvals.tolist()
        if saveDict:
            myFile = open( scratch + filename + "_prob_" + str(nCuts) + "_cuts_" + str(nTrials) + "_trials.txt" , "w")
            myFile.write(str(cutValues))
            myFile.close()
        AllCutSol[filename] = cutValues
        if verbose > 0:
            print(AllCutSol)
    # Returning appropriately based on inputs.
    if runX or runGX:
        return AllCutSol
    else:
        return GMIans
Exemple #21
0
    def __init__(self, numNodes): 

        # Set up Cplex instance to solve the worker LP
        cpx = cplex.Cplex()
        cpx.set_results_stream(None)
        cpx.set_log_stream(None) 
         
        # Turn off the presolve reductions and set the CPLEX optimizer
        # to solve the worker LP with primal simplex method.
        cpx.parameters.preprocessing.reduce.set(0) 
        cpx.parameters.lpmethod.set(cpx.parameters.lpmethod.values.primal)
        
        cpx.objective.set_sense(cpx.objective.sense.minimize)
        
        # Create variables v(k,i,j) forall k in V0, (i,j) in A
        # For simplicity, also dummy variables v(k,i,i) are created.
        # Those variables are fixed to 0 and do not contribute to 
        # the constraints.
        v = []
        for k in range(1, numNodes):
            v.append([])
            for i in range(numNodes):
                v[k-1].append([])
                for j in range(numNodes):
                    varName = "v."+str(k)+"."+str(i)+"."+str(j)
                    v[k-1][i].append(cpx.variables.get_num()) 
                    cpx.variables.add(obj = [0.0], 
                                      lb = [0.0], 
                                      ub = [cplex.infinity], 
                                      names = [varName])
                cpx.variables.set_upper_bounds(v[k-1][i][i], 0.0)
                
        # Create variables u(k,i) forall k in V0, i in V     
        u = []
        for k in range(1, numNodes):
            u.append([])
            for i in range(numNodes):
                varName = "u."+str(k)+"."+str(i)
                u[k-1].append(cpx.variables.get_num())
                obj = 0.0
                if i == 0:
                    obj = -1.0
                if i == k:
                    obj = 1.0;
                cpx.variables.add(obj = [obj], 
                                  lb = [-cplex.infinity], 
                                  ub = [cplex.infinity],  
                                  names = [varName])

        # Add constraints:
        # forall k in V0, forall (i,j) in A: u(k,i) - u(k,j) <= v(k,i,j)
        for k in range(1, numNodes):
            for i in range(numNodes):
                for j in range(0, numNodes):
                    if i != j:
                        thevars = []
                        thecoefs = []
                        thevars.append(v[k-1][i][j])
                        thecoefs.append(-1.0)
                        thevars.append(u[k-1][i])
                        thecoefs.append(1.0)
                        thevars.append(u[k-1][j])
                        thecoefs.append(-1.0)
                        cpx.linear_constraints.add(lin_expr = \
                                                   [cplex.SparsePair(thevars, thecoefs)],
                                                   senses = ["L"], rhs = [0.0])
                                                   
        self.cpx      = cpx
        self.v        = v
        self.u        = u
        self.numNodes = numNodes
def populate(filename):
    c = cplex.Cplex(filename)

    # set the solution pool relative gap parameter to obtain solutions
    # of objective value within 10% of the optimal
    c.parameters.mip.pool.relgap.set(0.1)

    try:
        c.populate_solution_pool()
    except CplexSolverError:
        print("Exception raised during populate")
        return

    print()
    # solution.get_status() returns an integer code
    print("Solution status = ", c.solution.get_status(), ":", end=' ')
    # the following line prints the corresponding string
    print(c.solution.status[c.solution.get_status()])

    numcols = c.variables.get_num()

    # Print information about the incumbent
    print()
    print("Objective value of the incumbent  = ",
          c.solution.get_objective_value())
    x = c.solution.get_values()
    for j in range(numcols):
        print("Incumbent: Column %d:  Value = %10f" % (j, x[j]))

    # Print information about other solutions
    print()
    numsol = c.solution.pool.get_num()
    print("The solution pool contains %d solutions." % numsol)

    numsolreplaced = c.solution.pool.get_num_replaced()
    print("%d solutions were removed due to the solution pool "
          "relative gap parameter." % numsolreplaced)

    numsoltotal = numsol + numsolreplaced
    print("In total, %d solutions were generated." % numsoltotal)

    meanobjval = c.solution.pool.get_mean_objective_value()
    print("The average objective value of the solutions is %.10g." %
          meanobjval)

    # write out the objective value of each solution and its
    # difference to the incumbent
    names = c.solution.pool.get_names()

    print()
    print("Solution        Objective       Number of variables")
    print("                value           that differ compared to")
    print("                                the incumbent")

    for i in range(numsol):

        objval_i = c.solution.pool.get_objective_value(i)

        x_i = c.solution.pool.get_values(i)

        # compute the number of variables that differ in solution i
        # and in the incumbent
        numdiff = 0
        for j in range(numcols):
            if abs(x_i[j] - x[j]) > epszero:
                numdiff = numdiff + 1
        print("%-15s %-10g      %d / %d" %
              (names[i], objval_i, numdiff, numcols))
Exemple #23
0
def optimize_weight(cycles, vertices, weight, dirname, ilp):
    # option is ilp option
    prob = cplex.Cplex()
    prob.set_problem_name("KIDNEY EXCHANGE")

    names = []

    prob.set_problem_type(cplex.Cplex.problem_type.LP)
    obj = []
    for cycle in cycles:
        obj.append(weight[tuple(cycle)])

    c = {}
    i = 0
    for cycle in cycles:
        names.append("c_%s" % str(cycle))
        i = i + 1

    # option = int((input("Choose option\n1.ILP\n2.LP")))

    if ilp:
        prob.variables.add(
            obj=obj,
            names=names,
            lb=[0] * len(names),
            ub=[1] * len(names),
            types=["B"] * len(names),
        )

    elif not ilp:
        prob.variables.add(
            obj=obj,
            names=names,
            lb=[0] * len(names),
            ub=[1] * len(names),
            types=["C"] * len(names),
        )

    constraints = []
    constraint_names = []
    for v in vertices:
        constraint = []
        i = 0
        for cycle in cycles:
            if v in cycle:
                constraint.append(names[i])
            i = i + 1
        if constraint:
            names.append("v" + v)
            prob.linear_constraints.add(
                lin_expr=[cplex.SparsePair(constraint, [1] * len(constraint))],
                senses=["L"],
                rhs=[1],
                names=constraint_names,
            )

    prob.objective.set_sense(prob.objective.sense.maximize)
    prob.write(dirname + "/" + "optimize_weight.lp")

    start = prob.get_time()

    prob.solve()

    end = prob.get_time()
    print("*****************************************************************")
    print(end - start)
    print("*****************************************************************")

    return prob.solution.get_values()
Exemple #24
0
def facility(datafile):
    # Read in data file. If no file name is given on the command line
    # we use a default file name. The data we read is
    # capacity   -- a list/array of facility capacity
    # fixedcost  -- a list/array of facility fixed cost
    # cost       -- a matrix for the costs to serve each client by each
    #               facility
    capacity, fixedcost, cost = read_dat_file(datafile)

    num_facilities = len(fixedcost)
    num_clients = len(cost)

    # Create a new (empty) model and populate it below.
    model = cplex.Cplex()

    # Create one binary variable for each facility. The variables model
    # whether each facility is open or not

    model.variables.add(obj=fixedcost,
                        lb=[0] * num_facilities,
                        ub=[1] * num_facilities,
                        types=["B"] * num_facilities)

    # Create one binary variable for each facility/client pair. The variables
    # model whether a client is served by a facility.
    for c in range(num_clients):
        model.variables.add(obj=cost[c],
                            lb=[0] * num_facilities,
                            ub=[1] * num_facilities,
                            types=["B"] * num_facilities)

    # Create corresponding indices for later use
    supply = []
    for c in range(num_clients):
        supply.append([])
        for f in range(num_facilities):
            supply[c].append((c + 1) * (num_facilities) + f)
    # Equivalently, supply can be defined by list comprehension
    # supply = [[(c + 1) * num_facilities + f
    #            for f in range(num_facilities)] for c in range(num_clients)]

    # Each client must be assigned to exactly one location
    for c in range(num_clients):
        assignment_constraint = cplex.SparsePair(
            ind=[supply[c][f] for f in range(num_facilities)],
            val=[1.0] * num_facilities)
        model.linear_constraints.add(lin_expr=[assignment_constraint],
                                     senses=["E"],
                                     rhs=[1])

    # The number of clients assigned to a facility must be less than the
    # capacity of the facility, and clients must be assigned to an open
    # facility
    for f in range(num_facilities):
        index = [f]
        value = [-capacity[f]]
        for c in range(num_clients):
            index.append(supply[c][f])
            value.append(1.0)
        capacity_constraint = cplex.SparsePair(ind=index, val=value)
        model.linear_constraints.add(lin_expr=[capacity_constraint],
                                     senses=["L"],
                                     rhs=[0])

    # Our objective is to minimize cost. Fixed and variable costs
    # have been set when variables were created.
    model.objective.set_sense(model.objective.sense.minimize)

    # Solve
    try:
        model.solve()
    except CplexSolverError as e:
        print("Exception raised during solve: " + e)
    else:
        solution = model.solution

        # solution.get_status() returns an integer code
        print("Solution status = ", solution.get_status(), ":", end=' ')
        # the following line prints the corresponding string
        print(solution.status[solution.get_status()])

        # Display solution.
        print("Total cost = ", solution.get_objective_value())

        for f in range(num_facilities):
            if (solution.get_values(f) >
                    model.parameters.mip.tolerances.integrality.get()):
                print("Facility %d is open and serves the "
                      "following clients:" % f,
                      end=' ')
                for c in range(num_clients):
                    if (solution.get_values(supply[c][f]) >
                            model.parameters.mip.tolerances.integrality.get()):
                        print(c, end=' ')
                print()
Exemple #25
0
                                            Action_current, action_interval)
                    x_Eq, y_Eq, s_Eq, e_Eq, v_Eq, b_Eq, row_Eq = OptCoeff.EqCoeff(
                    )
                    x_Ineq, y_Ineq, s_Ineq, e_Ineq, v_Ineq, b_Ineq, slack_Ineq, row_Ineq = OptCoeff.IneqCoeff(
                    )
                    # x_Ineq, y_Ineq, s_Ineq, e_Ineq, v_Ineq, b_Ineq, row_Ineq = OptCoeff.IneqCoeff()
                    x_obj, y_obj, s_obj, e_obj, v_obj, slack_obj = OptCoeff.ObjCoeff(
                    )
                    # x_obj, y_obj, s_obj, e_obj, v_obj = OptCoeff.ObjCoeff()

                    Aeq = np.hstack((x_Eq, y_Eq, s_Eq, e_Eq, v_Eq))
                    # Aineq = np.hstack((x_Ineq, y_Ineq, s_Ineq, e_Ineq, v_Ineq))
                    Aineq = np.hstack(
                        (x_Ineq, y_Ineq, s_Ineq, e_Ineq, v_Ineq, slack_Ineq))

                    OptimizeProblem = cplex.Cplex()

                    ## set the decision varibales, lower band upper bounds, and the objective coefficiences
                    xlen = len(NetInfoX[zone])
                    upbounds = np.zeros(xlen * tc)
                    for j in range(xlen):
                        upbounds[j * tc:(j + 1) *
                                 tc] = LinksOccupy[zone][j] * np.ones(tc)
                    OptimizeProblem.variables.add(
                        names=["x" + str(j) for j in range(xlen * tc)],
                        obj=x_obj,
                        lb=np.zeros(xlen * tc),
                        ub=upbounds,
                        types=[OptimizeProblem.variables.type.continuous] *
                        xlen * tc)
                    ylen = len(NetInfoY[zone])
Exemple #26
0
def inout3():
    c = cplex.Cplex()

    # sys.stdout is the default output stream for log and results
    # so these lines may be omitted
    c.set_results_stream(sys.stdout)
    c.set_log_stream(sys.stdout)

    # indices of the inside production variables
    inside = list(range(0, nbProducts))
    c.variables.add(lb=[10.0 for x in inside],
                    names=["inside_" + str(i) for i in range(nbProducts)])

    # indices of the outside production variables
    outside = list(range(nbProducts, 2 * nbProducts))
    c.variables.add(lb=[0.0 for x in outside],
                    names=["outside_" + str(i) for i in range(nbProducts)])

    # index of the cost variables
    cost = 2 * nbProducts
    c.variables.add(obj=[1.0], names=["cost"])

    # assign the cost varibles
    c.linear_constraints.add(lin_expr=[
        SparsePair(ind=[cost] + inside + outside,
                   val=[-1.0] + insideCost + outsideCost)
    ],
                             senses="E",
                             rhs=[0.0],
                             names=["cost"])

    # add capacity constraint for each resource
    c.linear_constraints.add(
        lin_expr=[
            SparsePair(ind=inside, val=consumption[i])
            for i in range(len(consumption))
        ],
        senses=["L" for i in consumption],
        rhs=capacity,
        names=["capacity_" + str(i) for i in range(nbResources)])

    # must meet demand for each product
    c.linear_constraints.add(
        lin_expr=[
            SparsePair(ind=[inside[p]] + [outside[p]],
                       val=[1.0 for i in [0, 1]]) for p in range(nbProducts)
        ],
        senses=["E" for i in demand],
        rhs=demand,
        names=["demand_" + str(i) for i in range(nbProducts)])

    # find cost-minimal solution
    c.solve()
    print("Solution status = ", c.solution.get_status())

    # Add constraint: cost must be no more than 10% over minimum
    c.variables.set_upper_bounds(cost, 1.1 * c.solution.get_objective_value())

    # Set objective to minimize outside production
    c.objective.set_linear(cost, 0.0)
    c.objective.set_linear([outside[i], 1.0] for i in range(len(outside)))

    c.write("inout3.lp")

    # optimize for new objective
    c.solve()
    print("Solution status = ", c.solution.get_status())
    # display the solution
    print("cost: ", c.solution.get_values(cost))
    for p in range(nbProducts):
        print("Product ", p, ":")
        print("   inside: ", c.solution.get_values(inside[p]))
        print("  outside: ", c.solution.get_values(outside[p]))
Exemple #27
0
    def fit(self, examples, labels):
        x = examples.copy().astype(np.float64)
        y = labels.copy().astype(np.float64)
        y[y!=1.] = -1.

        time_start = time.time()
        self.initialize_result()
        num, dim = x.shape
        c = cplex.Cplex()
        c.set_results_stream(None)
        w_names = [r'w%s' % i for i in range(dim)]
        xi_names = [r'xi%s' % i for i in range(num)]
        # Initialize risk
        self.risks = - y * (np.dot(x, self.weight) + self.bias)
        # Initialize eta
        self.update_eta()
        eta_bef = self.eta
        # Initialize t
        obj_val = num * (ersvmutil.calc_cvar(self.risks, 1 - self.nu) * self.nu -
                         ersvmutil.calc_cvar(self.risks, 1 - self.mu) * self.mu)
        self.obj.append(obj_val)
        if self.constant_t < -0.5:
            self.t.append(max(0, self.obj[-1] / 0.99))
        else:
            self.t.append(self.constant_t)
        # Set variables and objective function
        c.variables.add(names=w_names, lb=[-cplex.infinity]*dim, ub=[cplex.infinity]*dim)
        c.variables.add(names=[r'b'], lb=[-cplex.infinity], ub=[cplex.infinity])
        c.variables.add(names=xi_names, obj=[1.] * num, lb=[0.]*num, ub=[cplex.infinity]*num)
        c.variables.add(names=[r'alpha'], obj=[self.nu * num], lb=[-cplex.infinity], ub=[cplex.infinity])
        # Set quadratic constraint
        c.quadratic_constraints.add(name=r'norm', quad_expr=[w_names, w_names, [1.]*dim], rhs=1., sense='L')
        # Set linear constraints w*y_i*x_i + b*y_i + xi_i - alf >= 0
        # linexpr = [[w_names + ['b', 'xi%s' % i, 'alpha'], list(x[i] * y[i]) + [y[i], 1., 1.]] for i in range(num)]

        opti_vars = list(x[0] * y[0]) + [y[0], 1., 1.]
        len_vars = len(opti_vars)

        print(w_names + [r'b', r'xi0', r'alpha'], opti_vars, len_vars)
        linexpr = [cplex.SparsePair(ind=w_names + [r'b', r'xi%s'%i, r'alpha'],
                                    val=list(x[i] * y[i]) + [y[i], 1., 1.]) for i in range(num)]
        names = [r'margin%s' % i for i in range(num)]
        c.linear_constraints.add(names=names, senses=[r'G' for _ in range(num)],
                                 range_values=[0.0 for _ in range(num)],
                                 rhs=[0.0 for _ in range(num)], lin_expr=linexpr)
        # Set QP optimization method
        c.parameters.qpmethod.set(self.cplex_method)
        # Iteration
        for i in range(self.max_itr):
            self.total_itr += 1
            # Update objective function
            c.objective.set_linear('b', np.dot(1 - self.eta, y))
            c.objective.set_linear(zip(w_names, np.dot(y * (1 - self.eta), x) - 2 * self.t[-1] * self.weight))
            # Solve subproblem
            c.solve()
            # print( 'feasibility:', c.solution.is_primal_feasible())
            self.weight = np.array(c.solution.get_values(w_names))
            # xi = np.array(c.solution.get_values(xi_names))
            self.bias = c.solution.get_values('b')
            self.alpha = c.solution.get_values('alpha')
            # Update risk
            self.risks = - y * (np.dot(x, self.weight) + self.bias)
            # Update eta
            self.update_eta()
            # Objective Value
            obj_val = num * (ersvmutil.calc_cvar(self.risks, 1 - self.nu) * self.nu -
                             ersvmutil.calc_cvar(self.risks, 1 - self.mu) * self.mu)
            self.obj.append(obj_val)
            # Update t
            if self.constant_t < -0.5:
                self.t.append(max(1e-5 + self.obj[-1] / 0.999, 0.))
            else:
                self.t.append(self.constant_t)
            # Termination
            diff = (self.obj[-2] - self.obj[-1]) / (abs(self.obj[-1]) + 1e-7)
            if diff < self.eps:
                break
            eta_bef = self.eta
        time_end = time.time()
        self.comp_time = time_end - time_start
        self.c = c
Exemple #28
0
    def create_model(self):
        self.model = cplex.Cplex()
        model = self.model
        if not self.print_log:
            model.parameters.simplex.display.set(0)
            model.set_results_stream(None)
            model.set_log_stream(None)
        #self.x_names = ["x_%d_%d_%d_%d" % (arc.tail.name,arc.head.name,
        #                        arc.tail.interval[0],arc.head.interval[0])
        #                for node in self.nodes
        #                for interval_node in node.interval_nodes
        #                for arc in interval_node.outgoing_arcs]
        all_names = [
            "x_%d_%d_%d_%d" % (arc.tail.name, arc.head.name,
                               arc.tail.interval[0], arc.head.interval[0])
            for node in self.nodes for interval_node in node.interval_nodes
            for arc in interval_node.outgoing_arcs
        ]
        all_obj = [
            self.cost_matrix[arc.tail.name][arc.head.name] +
            0.0001 * self.adj_matrix[arc.tail.name][arc.head.name]
            for node in self.nodes for interval_node in node.interval_nodes
            for arc in interval_node.outgoing_arcs
        ]
        all_lb = [0.0] * len(all_names)
        all_ub = [1.0] * len(all_names)
        model.variables.add(names=all_names,
                            types=['C'] * len(all_names),
                            obj=all_obj,
                            lb=all_lb,
                            ub=all_ub)
        allvars = []
        allrhs = []
        allsenses = []
        allnames = []

        for node in self.nodes:
            for interval_node in node.interval_nodes:
                thevars = ([
                    "x_%d_%d_%d_%d" %
                    (arc.tail.name, arc.head.name, arc.tail.interval[0],
                     arc.head.interval[0])
                    for arc in interval_node.outgoing_arcs
                ] + [
                    "x_%d_%d_%d_%d" %
                    (arc.tail.name, arc.head.name, arc.tail.interval[0],
                     arc.head.interval[0])
                    for arc in interval_node.ingoing_arcs
                ])

                thecoefs = [-1] * len(interval_node.outgoing_arcs) + [1] * len(
                    interval_node.ingoing_arcs)

                allvars.append(cplex.SparsePair(thevars, thecoefs))
                allsenses.append("E")
                allnames.append("c_%d_%d" %
                                (node.name, interval_node.interval[0]))
                if node.name == self.origin:
                    allrhs.append(-1.0)
                else:
                    if node.name == self.destination:
                        allrhs.append(1.0)
                    else:
                        allrhs.append(0.0)

        model.linear_constraints.add(lin_expr=allvars,
                                     names=allnames,
                                     senses=allsenses,
                                     rhs=allrhs)
        self.const_num = len(allvars)

        self.names = {n: j for j, n in enumerate(model.variables.get_names())}
        self.dual_names = {
            n: j
            for j, n in enumerate(model.linear_constraints.get_names())
        }
        self.var_num = len(self.names)
Exemple #29
0
    def max_optimize(self, mars):
        X_UB = 1.0
        X_LB = -1.0
        M = 2.0
        isQuadratic = False
        opt_prob = cplex.Cplex()
        opt_prob.objective.set_sense(opt_prob.objective.sense.maximize)
        target = opt_prob.parameters.optimalitytarget.values
        opt_prob.parameters.optimalitytarget.set(target.optimal_global)
        n_var = 1 + mars.n_variables

        for i in range(mars.n_basis_fn):
            n_var += 2 * mars.basis_fns[i + 1].order
        dt = opt_prob.variables.type
        # data_types=[dt.continuous, dt.binary, dt.integer]
        data_types = [dt.continuous] + [dt.continuous] * mars.n_variables
        ub = [1] + [X_UB] * mars.n_variables
        lb = [1] + [X_LB] * mars.n_variables
        obj = [0] * n_var
        obj[0] = mars.coefficients[0, 0]

        var_index = 1 + mars.n_variables
        q_mat = [cplex.SparsePair(ind=[], val=[])
                 ] + [cplex.SparsePair(ind=[], val=[])] * mars.n_variables

        con_lin_expr = []
        con_senses = []
        con_rhs = []
        # self.knot_value = knot_value
        # self.index_of_variable = index_of_variable
        # self.sign = sign
        for i in range(mars.n_basis_fn):
            order = mars.basis_fns[i + 1].order
            if order == 1:
                obj[var_index] = mars.coefficients[i + 1, 0]
                data_types.extend([dt.continuous, dt.binary])
                ub.extend([M, 1])
                lb.extend([0, 0])
                q_mat.append(cplex.SparsePair(ind=[], val=[]))
                q_mat.append(cplex.SparsePair(ind=[], val=[]))
                # print(mars.basis_fns[i + 1].knot_items[0].knot_value)
                # print(mars.basis_fns[i + 1].knot_items[0].index_of_variable)
                # print(mars.basis_fns[i + 1].knot_items[0].sign)
                s = mars.basis_fns[i + 1].knot_items[0].sign
                k = mars.basis_fns[i + 1].knot_items[0].knot_value
                i_v = mars.basis_fns[i + 1].knot_items[0].index_of_variable + 1
                if s == 1:
                    con_lin_expr.extend([
                        cplex.SparsePair(ind=[var_index, i_v], val=[1.0,
                                                                    -1.0]),
                        cplex.SparsePair(ind=[var_index + 1, i_v, var_index],
                                         val=[-M, 1.0, -1.0]),
                        cplex.SparsePair(ind=[var_index + 1, var_index],
                                         val=[M, -1.0])
                    ])
                    con_senses.extend(["G", "G", "G"])
                    con_rhs.extend([-k, k - M, 0])
                else:
                    con_lin_expr.extend([
                        cplex.SparsePair(ind=[var_index, i_v], val=[1.0, 1.0]),
                        cplex.SparsePair(ind=[var_index + 1, i_v, var_index],
                                         val=[-M, -1.0, -1.0]),
                        cplex.SparsePair(ind=[var_index + 1, var_index],
                                         val=[M, -1.0])
                    ])
                    con_senses.extend(["G", "G", "G"])
                    con_rhs.extend([k, -M - k, 0])

                var_index += 2

            elif order == 2:
                isQuadratic = True
                q_mat.append(
                    cplex.SparsePair(ind=[var_index + 2],
                                     val=[mars.coefficients[i + 1, 0]]))
                q_mat.append(cplex.SparsePair(ind=[], val=[]))

                s = mars.basis_fns[i + 1].knot_items[0].sign
                k = mars.basis_fns[i + 1].knot_items[0].knot_value
                i_v = mars.basis_fns[i + 1].knot_items[0].index_of_variable + 1
                if s == 1:
                    con_lin_expr.extend([
                        cplex.SparsePair(ind=[var_index, i_v], val=[1.0,
                                                                    -1.0]),
                        cplex.SparsePair(ind=[var_index + 1, i_v, var_index],
                                         val=[-M, 1.0, -1.0]),
                        cplex.SparsePair(ind=[var_index + 1, var_index],
                                         val=[M, -1.0])
                    ])
                    con_senses.extend(["G", "G", "G"])
                    con_rhs.extend([-k, k - M, 0])
                else:
                    con_lin_expr.extend([
                        cplex.SparsePair(ind=[var_index, i_v], val=[1.0, 1.0]),
                        cplex.SparsePair(ind=[var_index + 1, i_v, var_index],
                                         val=[-M, -1.0, -1.0]),
                        cplex.SparsePair(ind=[var_index + 1, var_index],
                                         val=[M, -1.0])
                    ])
                    con_senses.extend(["G", "G", "G"])
                    con_rhs.extend([k, -M - k, 0])

                var_index += 2
                q_mat.append(
                    cplex.SparsePair(ind=[var_index - 2],
                                     val=[mars.coefficients[i + 1, 0]]))
                q_mat.append(cplex.SparsePair(ind=[], val=[]))

                s = mars.basis_fns[i + 1].knot_items[1].sign
                k = mars.basis_fns[i + 1].knot_items[1].knot_value
                i_v = mars.basis_fns[i + 1].knot_items[1].index_of_variable + 1
                if s == 1:
                    con_lin_expr.extend([
                        cplex.SparsePair(ind=[var_index, i_v], val=[1.0,
                                                                    -1.0]),
                        cplex.SparsePair(ind=[var_index + 1, i_v, var_index],
                                         val=[-M, 1.0, -1.0]),
                        cplex.SparsePair(ind=[var_index + 1, var_index],
                                         val=[M, -1.0])
                    ])
                    con_senses.extend(["G", "G", "G"])
                    con_rhs.extend([-k, k - M, 0])
                else:
                    con_lin_expr.extend([
                        cplex.SparsePair(ind=[var_index, i_v], val=[1.0, 1.0]),
                        cplex.SparsePair(ind=[var_index + 1, i_v, var_index],
                                         val=[-M, -1.0, -1.0]),
                        cplex.SparsePair(ind=[var_index + 1, var_index],
                                         val=[M, -1.0])
                    ])
                    con_senses.extend(["G", "G", "G"])
                    con_rhs.extend([k, -M - k, 0])

                var_index += 2
                data_types.extend(
                    [dt.continuous, dt.binary, dt.continuous, dt.binary])
                ub.extend([M, 1, M, 1])
                lb.extend([0, 0, 0, 0])
            else:
                print("problem!greater than 2 way interaction term!")
                return

        # print(con_lin_expr)
        # print(con_senses)
        # print(con_rhs)

        # opt_prob.linear_constraints.add(lin_expr=con_lin_expr, senses=con_senses, rhs=con_rhs)
        # print(obj)
        # print(data_types)
        # print(q_mat)
        opt_prob.variables.add(obj=obj, types=data_types, lb=lb, ub=ub)

        if isQuadratic:
            opt_prob.objective.set_quadratic(q_mat)
        opt_prob.linear_constraints.add(lin_expr=con_lin_expr,
                                        senses=con_senses,
                                        rhs=con_rhs)
        # print(var_index)
        # indices = c.linear_constraints.add(
        #     lin_expr=[cplex.SparsePair(ind=["x1", "x3"], val=[1.0, -1.0]),
        #               cplex.SparsePair(ind=["x1", "x2"], val=[1.0, 1.0]),
        #               cplex.SparsePair(ind=["x1", "x2", "x3"], val=[-1.0] * 3),
        #               cplex.SparsePair(ind=["x2", "x3"], val=[10.0, -2.0])],
        #     senses=["E", "L", "G", "R"],
        #     rhs=[0.0, 1.0, -1.0, 2.0],
        #     range_values=[0.0, 0.0, 0.0, -10.0],
        #     names=["c0", "c1", "c2", "c3"])

        # opt_prob.linear_constraints.add(lin_expr=[cplex.SparsePair(ind=[0, 3], val=[1.0, -1.0])],
        #                                           senses=["E"], rhs=[0.0])
        # print(obj)
        # print(mars.coefficients)
        opt_prob.write('mars.lp')
        opt_prob.solve()
        # opt_prob.solution.get_status()
        r = opt_prob.solution.get_objective_value()
        x = opt_prob.solution.get_values()
        print("R", r)
        print("x", [x[1:1 + mars.n_variables]])
        print("x_original", mars.X_inverse_scale([x[1:1 + mars.n_variables]]))
        print("predict",
              mars.predict(mars.X_inverse_scale([x[1:1 + mars.n_variables]])))
        # print(mars.basis_fns[5].order)
        # p = cplex.Cplex()
        # p.objective.set_sense(p.objective.sense.maximize)
        # obj = [1.0, 2.0]
        # ub = [2, 4]
        # lb = [-1, -2]
        # p.variables.add(obj=obj, ub=ub, lb=lb)
        #
        # target = p.parameters.optimalitytarget.values
        # p.parameters.optimalitytarget.set(target.optimal_global)
        opt_x = mars.X_inverse_scale([x[1:1 + mars.n_variables]])
        opt_x = opt_x[0]
        return opt_x.tolist() + [r]
Exemple #30
0
    def build_lp(self):
        """
        Build CPLEX problem based on current matrices.

        Returns
        -------
        cplex object
            LP problem with default parameters corresponding to current
            matrices.

        """
        # preprocess matrices
        # rescale concentration columns?
        lhs = self.matrix.A
        # scaling_factor = 1000
        # scaling = numpy.ones(lhs.shape[1])
        # scaling[numpy.concatenate([self.enzyme_cols, self.process_cols,
        # self.species_cols])] \
        # = 1.0/scaling_factor
        # lhs *= diags(scaling)

        # transform inequality and equality constraints to CPLEX row format
        lhs = lhs.tolil()
        rows = []
        for nz_ind, data in zip(lhs.rows, lhs.data):
            rows.append(cplex.SparsePair(nz_ind, data))

        # define problem
        lp_problem = cplex.Cplex()
        # set parameters
        lp_problem.parameters.feasopt.tolerance.set(1e-9)
        lp_problem.parameters.simplex.tolerances.feasibility.set(1e-9)
        lp_problem.parameters.simplex.tolerances.optimality.set(1e-9)
        lp_problem.parameters.simplex.tolerances.markowitz.set(0.1)
        lp_problem.parameters.barrier.convergetol.set(1e-9)
        # agressive scaling
        lp_problem.parameters.read.scale.set(1)
        # Threads: the default (0) means that Cplex decides automatically
        # how many threads to use
        # lp_problem.parameters.threads.set(0)
        lp_problem.set_results_stream(None)

        # define columns and add rows
        lp_problem.variables.add(names=self.matrix.col_names)
        lp_problem.variables.set_lower_bounds(
            zip(self.matrix.col_names, self.matrix.LB))
        lp_problem.variables.set_upper_bounds(
            zip(self.matrix.col_names, self.matrix.UB))
        lp_problem.objective.set_sense(lp_problem.objective.sense.minimize)
        lp_problem.objective.set_linear(
            zip(self.matrix.col_names, self.matrix.f))

        lp_problem.linear_constraints.add(names=self.matrix.row_names)
        lp_problem.linear_constraints.set_linear_components(
            zip(self.matrix.row_names, rows))
        lp_problem.linear_constraints.set_rhs(
            zip(self.matrix.row_names, self.matrix.b))
        lp_problem.linear_constraints.set_senses(
            zip(self.matrix.row_names, self.matrix.row_signs))
        # set starting point (not exactly sure how this works)
        if self._sol_basis is not None:
            lp_problem.start.set_start(self._sol_basis[0], self._sol_basis[1],
                                       self.X, [], [], self.lambda_)
        return lp_problem