Beispiel #1
0
 def __init__(self, cplex=None):
     if cplex:
         self._model = Cplex(cplex._model)
     else:
         self._model = Cplex()
     self._init_lin()
     # to avoid a variable with index 0
     self._model.variables.add(
         names=['_dummy_'], types=[self._model.variables.type.continuous])
     self._var_id = {'_dummy_': 0}
def modelisation():

    # Variables

    m, L, li, qi = read_instance_file(argv[1])

    # Modèle pour résoudre le problème de minimisation des planches utilisées

    model = Cplex()
    model.set_results_stream(None)

    # Variables de décision du modèle

    model_variables = list(range(len(li)))
    model.variables.add(obj=[1 for j in model_variables])

    # Contraintes du modèle

    model_contraintes = range(len(qi))

    model.linear_constraints.add(
        lin_expr=[SparsePair() for j in model_contraintes],
        senses=["G" for j in model_contraintes],
        rhs=qi)

    for var_index in model_variables:
        model.linear_constraints.set_coefficients(var_index, var_index,
                                                  int(L / li[var_index]))

    # Modèle utilisé pour générer des pattern en utilisant
    # la méthode Column generation de Gilmore-Gomory

    pattern_model = Cplex()
    pattern_model.set_results_stream(None)

    # Variable de décision

    panneaux_indices = range(len(li))
    pattern_model.variables.add(
        types=[pattern_model.variables.type.integer for j in panneaux_indices])

    pattern_model.variables.add(obj=[1], lb=[1], ub=[1])

    # L'unique contrainte ici est que la taille total des panneaux ne peut être
    # plus grande que la longueur de la planche L.
    pattern_model.linear_constraints.add(
        lin_expr=[SparsePair(ind=panneaux_indices, val=li)],
        senses=["L"],
        rhs=[L])

    # Définir l'objectif (Minimisation)
    pattern_model.objective.set_sense(pattern_model.objective.sense.minimize)

    return m, model, pattern_model, model_contraintes, model_variables, panneaux_indices
Beispiel #3
0
    def __init__(self, cplex=None):
        try:
            if cplex:
                self._model = Cplex(cplex._model)
            else:
                self._model = Cplex()
        except NameError:
            raise NameError('CPLEX is not installed. See https://www.ibm.com/support/knowledgecenter/SSSA5P_12.8.0/ilog.odms.studio.help/Optimization_Studio/topics/COS_home.html')

        self._init_lin()
        # to avoid a variable with index 0
        self._model.variables.add(names=['_dummy_'], types=[self._model.variables.type.continuous])
        self._var_id = {'_dummy_': 0}
Beispiel #4
0
 def __init__(self, problem):
     #instance variable: the solution set found by solver
     self.cplexSolutionSet = []
     #instance variable: the solution map, the key is the solution obj values, the value is the solution
     self.cplexResultMap = {}
     #instance variable: the map of the solutions in the pareto front
     self.cplexParetoSet = {}
     # problem
     self.problem = problem
     # solver
     self.solver = Cplex()
     # boundary solver
     self.boundary_solver = Cplex()
Beispiel #5
0
def copy_cplex(cpx):
    cpx_copy = Cplex(cpx)
    cpx_parameters = cpx.parameters.get_changed()
    for (pname, pvalue) in cpx_parameters:
        phandle = reduce(getattr, str(pname).split("."), cpx_copy)
        phandle.set(pvalue)
    return cpx_copy
Beispiel #6
0
    def __init__(self, cplex=None):
        if not _HAS_CPLEX:
            raise MissingOptionalLibraryError(
                libname='CPLEX',
                name='SimpleCPLEX',
                pip_install='pip install qiskit-aqua[cplex]')

        if cplex:
            self._model = Cplex(cplex._model)
        else:
            self._model = Cplex()

        self._init_lin()
        # to avoid a variable with index 0
        self._model.variables.add(names=['_dummy_'], types=[self._model.variables.type.continuous])
        self._var_id = {'_dummy_': 0}
Beispiel #7
0
    def __init__(self, model=None):
        Solver.__init__(self)
        self.problem = Cplex()

        self.status_mapping = {
            self.problem.solution.status.optimal:
            Status.OPTIMAL,
            self.problem.solution.status.optimal_tolerance:
            Status.OPTIMAL,
            self.problem.solution.status.unbounded:
            Status.UNBOUNDED,
            self.problem.solution.status.infeasible:
            Status.INFEASIBLE,
            self.problem.solution.status.infeasible_or_unbounded:
            Status.INF_OR_UNB,
            self.problem.solution.status.MIP_optimal:
            Status.OPTIMAL,
            self.problem.solution.status.MIP_unbounded:
            Status.UNBOUNDED,
            self.problem.solution.status.MIP_infeasible:
            Status.INFEASIBLE,
            self.problem.solution.status.MIP_infeasible_or_unbounded:
            Status.INF_OR_UNB
        }

        self.vartype_mapping = {
            VarType.BINARY: self.problem.variables.type.binary,
            VarType.INTEGER: self.problem.variables.type.integer,
            VarType.CONTINUOUS: self.problem.variables.type.continuous
        }

        self.parameter_mapping = {
            Parameter.TIME_LIMIT: self.problem.parameters.timelimit,
            Parameter.FEASIBILITY_TOL:
            self.problem.parameters.simplex.tolerances.feasibility,
            Parameter.OPTIMALITY_TOL:
            self.problem.parameters.simplex.tolerances.optimality,
            Parameter.INT_FEASIBILITY_TOL:
            self.problem.parameters.mip.tolerances.integrality,
            Parameter.MIP_ABS_GAP:
            self.problem.parameters.mip.tolerances.mipgap,
            Parameter.MIP_REL_GAP:
            self.problem.parameters.mip.tolerances.absmipgap,
            Parameter.POOL_SIZE: self.problem.parameters.mip.limits.populate,
            Parameter.POOL_GAP: self.problem.parameters.mip.pool.relgap
        }

        self.set_parameters(default_parameters)

        self.set_logging(False)
        self._cached_lin_obj = {}
        self._cached_sense = None
        self._cached_lower_bounds = {}
        self._cached_upper_bounds = {}
        self._cached_vars = []
        self._cached_constrs = []

        if model:
            self.build_problem(model)
Beispiel #8
0
    def __init__(self, A):
        self.upper_bound = 1000
        self.eps = 1e-10
        self.dimension = 0

        try:
            self.cpl = Cplex()
        except NameError, CplexSolverError:
            raise CplexNotInstalledError()
def copy_cplex(cpx):
    """
    Copy a Cplex object
    :param cpx: Cplex object
    :return: Copy of Cplex object
    """
    cpx_copy = Cplex(cpx)
    cpx_parameters = cpx.parameters.get_changed()
    for (pname, pvalue) in cpx_parameters:
        phandle = reduce(getattr, str(pname).split("."), cpx_copy)
        phandle.set(pvalue)
    return cpx_copy
Beispiel #10
0
    def create_cplex(cls, verbose=False):
        cpx = Cplex()
        # see if we want it to be verbose sometimes
        if not verbose:
            cpx.set_log_stream(None)
            cpx.set_results_stream(None)
            cpx.set_error_stream(None)
            cpx.set_warning_stream(None)

        # disable datacheck
        cpx_datacheck_id = 1056
        setintparam(cpx._env._e, cpx_datacheck_id, 0)
        return cpx
Beispiel #11
0
def lp(a, b, c):
    # number of equations (m) and unknowns (n)
    m, n = a.shape
    # objective function and lower bounds
    obj = c.copy()
    lb = zeros(n)
    # constraints
    count = 0
    sense = ""
    rows = []
    cols = []
    vals = []
    rhs = []
    for i in range(m):
        rows.extend([count for k in range(n)])
        cols.extend([k for k in range(n)])
        vals.extend(a[i, :])
        rhs.append(b[i])
        sense += "L"
        count += 1
    # cplex problem variable
    prob = Cplex()
    # quiet results
    #prob.set_results_stream(None)
    # maximiation problem
    prob.objective.set_sense(prob.objective.sense.maximize)
    # problem variables
    prob.variables.add(obj=obj, lb=lb)
    #for j in range(prob.variables.get_num()):
    #    prob.variables.set_types(j,prob.variables.type.integer)
    # linear constraints
    prob.linear_constraints.add(rhs=rhs, senses=sense)
    prob.linear_constraints.set_coefficients(zip(rows, cols, vals))
    # alg method
    alg = prob.parameters.lpmethod.values
    prob.parameters.lpmethod.set(alg.auto)
    # solve problem
    prob.solve()
    # solution variables
    var = prob.solution.get_values()
    x = var[0:n]
    opt = prob.solution.get_objective_value()
    # return
    return opt, x
Beispiel #12
0
    def __init_optimization_problem(self):

        problem = Cplex()

        sense = problem.objective.sense
        problem.objective.set_sense(sense=sense.maximize)

        variables = self.__build_variables()
        # self.__log('Variables: ', variables)

        objective = self.__build_objective(variables)
        # self.__log('Objective: ', objective)

        problem.variables.add(names=variables,
                              types=['C'] * len(variables),
                              ub=[1.0] * self.__problem.vertices_num(),
                              obj=objective)

        constraints = self.__build_constraints(variables)
        # self.__log('Constraints count: ', len(constraints))
        self.__set_constraints(problem, constraints)

        self.__optimization_problem = problem
def run_docplex_check_list():
    check_platform()

    # check requirements
    check_import("six")
    check_import("enum")
    check_import("cloudpickle")

    # check cplex
    try:
        from cplex import Cplex

        cpx = Cplex()
        del cpx
    except ImportError:
        print("Cplex DLL not found, if present , you must add it to PYTHNPATH")

    # check pandas
    try:
        import pandas as pd
        from pandas import DataFrame, Series
        dd = DataFrame({})
    except ImportError:
        print("-- pandas is not present, some features might be unavailable.")
Beispiel #14
0
    def __getProblem(self,
                     K,
                     ActionSet,
                     WorkingSet=[],
                     theta=0.0,
                     log_stream=False,
                     log_write=False):
        prob = Cplex()
        if (log_stream == False):
            prob.set_log_stream(None)
            prob.set_results_stream(None)
        prob.objective.set_sense(prob.objective.sense.minimize)
        prob.parameters.timelimit.set(self.time_limit_)
        # prob.parameters.timelimit.set(600)
        # prob.parameters.emphasis.mip.set(4)

        pi = [['pi_{}_{}'.format(d, m) for m in range(self.M_[d])]
              for d in range(self.D_)]
        if (self.interaction_):
            for d in range(self.D_):
                prob.variables.add(names=pi[d], types=['B'] * (self.M_[d]))
        else:
            for d in range(self.D_):
                prob.variables.add(obj=self.C_[d],
                                   names=pi[d],
                                   types=['B'] * (self.M_[d]))

        if (self.interaction_):
            psi = ['psi_{0}'.format(d) for d in range(self.D_)]
            prob.variables.add(obj=np.sqrt(self.eigenvalues_),
                               names=psi,
                               lb=[0] * (self.D_),
                               ub=self.ubs_,
                               types=['S'] * (self.D_))
            prob.linear_constraints.add(
                lin_expr=[[psi, np.sqrt(self.eigenvalues_)]],
                senses=['G'],
                rhs=[0])

        # constraint: decision function value
        coef_const = [[self.coef_[d] * a for a in self.A_[d]]
                      for d in range(self.D_)]
        coef = [[sum(pi, []), sum(coef_const, [])]]
        const = [theta + self.tol_ - self.intercept_
                 ] if self.y_ == 0 else [theta + self.tol_ - self.intercept_]
        sense = ['G'] if self.y_ == 0 else ['L']
        prob.linear_constraints.add(lin_expr=coef, senses=sense, rhs=const)

        # constraint: only one action is selected
        coef = [[pi[d], [1] * self.M_[d]] for d in range(self.D_)]
        # prob.linear_constraints.add(lin_expr=coef, senses=['E']*self.D_, rhs=[1]*self.D_)
        coef += [[[pi[d][self.m_[d]]], [1]] for d in range(self.D_)
                 if 'FIX' in self.types_[d]]
        if (len(WorkingSet) != 0):
            coef += [[[pi[d][self.m_[d]]], [1]] for d in range(self.D_)
                     if d not in WorkingSet]
        const = [1] * len(coef)
        prob.linear_constraints.add(lin_expr=coef,
                                    senses=['E'] * len(coef),
                                    rhs=const)

        # constraint: categorical features
        coef = [[[pi[d][1] for d in cat], [1] * len(cat)]
                for cat in self.categories_]
        prob.linear_constraints.add(lin_expr=coef,
                                    senses=['E'] * len(self.categories_),
                                    rhs=[1] * len(self.categories_))

        # constraint: max modification
        coef_const = [
            [0, 0] if d in self.categories_flatten_ and self.x_[d] == 1 else
            [0 if m == self.m_[d] else 1 for m in range(self.M_[d])]
            for d in range(self.D_)
        ]
        coef = [[sum(pi, []), sum(coef_const, [])]]
        prob.linear_constraints.add(lin_expr=coef, senses=['L'], rhs=[K])

        if (self.interaction_):
            # constraint: absolute values of psi
            prob.linear_constraints.add(
                lin_expr=[[sum(pi, []) + [psi[d]], self.uu_[d] + [1]]
                          for d in range(self.D_)],
                senses=['G'] * self.D_,
                rhs=[0] * self.D_)
            prob.linear_constraints.add(
                lin_expr=[[sum(pi, []) + [psi[d]], self.uu_[d] + [-1]]
                          for d in range(self.D_)],
                senses=['L'] * self.D_,
                rhs=[0] * self.D_)

        if (self.alpha_ > 0):

            def cost_dev(n, m):
                R_n = self.R_[n]
                R_m = self.R_[m]
                return sum([[R_n[d][i] - R_m[d][i] for i in range(self.M_[d])]
                            for d in range(self.D_)], [])

            nu = ['nu_{}'.format(n) for n in range(self.N_reg_)]
            prob.variables.add(names=nu, types=['B'] * self.N_reg_)
            prob.linear_constraints.add(lin_expr=[[nu, [1] * self.N_reg_]],
                                        senses=['E'],
                                        rhs=[self.n_neighbors_])
            pi_flatten = sum(pi, [])

            if (self.n_neighbors_ == 1):
                rho = ['rho_{}'.format(n) for n in range(self.N_reg_)]
                prob.variables.add(obj=[
                    (self.alpha_ * self.lrd_[n]) / self.n_neighbors_
                    for n in range(self.N_reg_)
                ],
                                   names=rho,
                                   lb=[0] * self.N_reg_,
                                   ub=self.R_ubs_,
                                   types=['S'] * self.N_reg_)
                prob.linear_constraints.add(
                    lin_expr=[[[nu[n], rho[n]], [self.k_dists_[n], -1]]
                              for n in range(self.N_reg_)],
                    senses=['L'] * self.N_reg_,
                    rhs=[0] * self.N_reg_)
                prob.linear_constraints.add(lin_expr=[[
                    pi_flatten + [nu[n], rho[n]],
                    sum(self.R_[n], []) + [self.R_ubs_[n], -1]
                ] for n in range(self.N_reg_)],
                                            senses=['L'] * self.N_reg_,
                                            rhs=self.R_ubs_)
                for n in range(self.N_reg_):
                    prob.linear_constraints.add(
                        lin_expr=[[
                            pi_flatten + [nu[n]],
                            cost_dev(n, m) + [self.R_ubs_[n]]
                        ] for m in range(self.N_reg_) if m != n],
                        senses=['L'] * (self.N_reg_ - 1),
                        rhs=[self.R_ubs_[n]] * (self.N_reg_ - 1))
                # prob.variables.add(obj=[self.alpha_/(self.dists_[n]*(self.n_neighbors_)) for n in range(self.N_reg_)], names=rho, lb=[0]*self.N_reg_, ub=self.R_ubs_, types=['S']*self.N_reg_)
                # prob.linear_constraints.add(lin_expr=[[[nu[n],rho[n]], [self.dists_[n],-1]] for n in range(self.N_reg_)], senses=['L']*self.N_reg_, rhs=[0]*self.N_reg_)
                # prob.linear_constraints.add(lin_expr=[[pi_flatten+[nu[n],rho[n]], sum(self.R_[n], [])+[self.R_ubs_[n],-1]] for n in range(self.N_reg_)], senses=['L']*self.N_reg_, rhs=self.R_ubs_)
                # for n in range(self.N_reg_):
                #     prob.linear_constraints.add(lin_expr=[[ pi_flatten+[nu[n]], cost_dev(n,m)+[self.R_ubs_[n]] ] for m in range(self.N_reg_) if m!=n], senses=['L']*(self.N_reg_-1), rhs=[self.R_ubs_[n]]*(self.N_reg_-1))
                # indicator
                # for m in range(self.N_reg_):
                # if(m!=n): prob.indicator_constraints.add(lin_expr=[pi_flatten, cost_dev(n,m)], sense=['L'], rhs=0, indvar=nu[n], complemented=0)
            else:
                mu = [['mu_{}_{}'.format(n, m) for m in range(self.N_reg_)]
                      for n in range(self.N_reg_)]
                for n in range(self.N_reg_):
                    prob.variables.add(
                        names=[mu[n][m] for m in range(self.N_reg_) if m != n],
                        types=['B'] * (self.N_reg_ - 1))
                for n in range(self.N_reg_):
                    coef = []
                    const = []
                    for m in range(self.N_reg_):
                        if (m == n): continue
                        coef += [[[nu[n], nu[m], mu[n][m]], [-1, 1, -1]],
                                 [[nu[n], mu[n][m]], [1, 1]],
                                 [[nu[m], mu[n][m]], [-1, 1]]]
                        const += [0, 1, 0]
                    prob.linear_constraints.add(lin_expr=coef,
                                                senses=['L'] * 3 *
                                                (self.N_reg_ - 1),
                                                rhs=const)
                for n in range(self.N_reg_):
                    coef = []
                    const = []
                    for m in range(self.N_reg_):
                        if (m == n): continue
                        coef += [[
                            pi_flatten + [mu[n][m]],
                            cost_dev(n, m) + [-1 * self.R_ubs_[m]]
                        ]]
                        const += [-1 * self.R_ubs_[m]]
                    prob.linear_constraints.add(lin_expr=coef,
                                                senses=['G'] *
                                                (self.N_reg_ - 1),
                                                rhs=const)
                rho = [['rho_{}_{}'.format(n, m) for m in range(self.N_reg_)]
                       for n in range(self.N_reg_)]
                for n in range(self.N_reg_):
                    prob.variables.add(obj=[
                        self.alpha_ / (self.dists_[m] * (self.n_neighbors_**2))
                        for m in range(self.N_reg_)
                    ],
                                       names=rho[n],
                                       lb=[0] * (self.N_reg_),
                                       ub=[self.R_ubs_[n]] * (self.N_reg_),
                                       types=['S'] * (self.N_reg_))
                eta = [['eta_{}_{}'.format(n, m) for m in range(self.N_reg_)]
                       for n in range(self.N_reg_)]
                for n in range(self.N_reg_):
                    prob.variables.add(names=eta[n],
                                       types=['B'] * (self.N_reg_))
                for n in range(self.N_reg_):
                    coef = []
                    const = []
                    for m in range(self.N_reg_):
                        coef += [[
                            pi_flatten + [eta[n][m], rho[n][m]],
                            sum(self.R_[n], []) + [self.R_ubs_[n], -1]
                        ], [[eta[n][m], rho[n][m]], [self.dists_[n], -1]]]
                        const += [self.R_ubs_[n], 0]
                    prob.linear_constraints.add(lin_expr=coef,
                                                senses=['L'] *
                                                (2 * self.N_reg_),
                                                rhs=const)
                coef = []
                for n in range(self.N_reg_):
                    coef += [[[eta[n][m], eta[m][n]], [1, -1]]
                             for m in range(self.N_reg_) if m != n]
                coef += [[[eta[n][n], nu[n]], [1, -1]]
                         for n in range(self.N_reg_)]
                prob.linear_constraints.add(lin_expr=coef,
                                            senses=['E'] * len(coef),
                                            rhs=[0] * len(coef))
                for n in range(self.N_reg_):
                    coef = []
                    const = []
                    for m in range(n + 1, self.N_reg_):
                        coef += [[[nu[n], nu[m], eta[n][m]], [1, 1, -1]],
                                 [[nu[n], eta[n][m]], [-1, 1]],
                                 [[nu[m], eta[n][m]], [-1, 1]]]
                        const += [1, 0, 0]
                    prob.linear_constraints.add(lin_expr=coef,
                                                senses=['L'] * len(const),
                                                rhs=const)

        if (log_write): prob.write('mylog.lp')

        return prob
Beispiel #15
0
    def __getProblem(self, K, WorkingSet=[], theta=0.5, log_stream=False, log_write=False):
        prob = Cplex()
        if(log_stream==False):
            prob.set_log_stream(None)
            prob.set_results_stream(None)
        prob.objective.set_sense(prob.objective.sense.minimize)
        prob.parameters.timelimit.set(self.time_limit_)
        # prob.parameters.emphasis.mip.set(4)

        pi = [['pi_{0}_{1}'.format(d, j) for j in range(self.M_[d])] for d in range(self.D_)]
        if(self.interaction_):
            for d in range(self.D_): prob.variables.add(names=pi[d], types=['B']*(self.M_[d]))
        else:
            for d in range(self.D_):
                prob.variables.add(obj=self.C_[d], names=pi[d], types=['B']*(self.M_[d]))
            prob.linear_constraints.add(lin_expr=[[sum(pi, []), sum(self.C_, [])]], senses=['G'], rhs=[0])

        phi = [['phi_{0}_{1}'.format(t, l) for l in range(self.L_[t])] for t in range(self.T_)]
        for t in range(self.T_): prob.variables.add(names=phi[t], types=['B']*(self.L_[t]))

        if(self.interaction_):
            psi = ['psi_{0}'.format(d) for d in range(self.D_)]
            prob.variables.add(obj=np.sqrt(self.eigenvalues_), names=psi, lb=[0]*(self.D_), ub=self.ubs_, types=['S']*(self.D_))
            prob.linear_constraints.add(lin_expr=[[psi, np.sqrt(self.eigenvalues_)]], senses=['G'], rhs=[0])

        # constraint: decision function value
        phi_flatten = sum(phi, [])
        H_flatten = sum(self.H_, [])
        coef = [[phi_flatten, H_flatten]]
        const = [theta + self.tol_] if self.y_==0 else [theta]
        sense = ['G'] if self.y_==0 else ['L']
        prob.linear_constraints.add(lin_expr=coef, senses=sense, rhs=const)

        # constraint: interval
        coef = [[pi[d], [1]*self.M_[d]] for d in range(self.D_)]
        coef += [[[pi[d][self.m_[d]]], [1]] for d in range(self.D_) if 'FIX' in self.types_[d]]
        if(len(WorkingSet)!=0): coef += [[[pi[d][self.m_[d]]], [1]] for d in range(self.D_) if d not in WorkingSet]
        const = [1]*len(coef)
        prob.linear_constraints.add(lin_expr=coef, senses=['E']*len(coef), rhs=const)
        coef = [[[pi[d][1] for d in cat], [1]*len(cat)] for cat in self.categories_]
        const = [1]*len(coef)
        prob.linear_constraints.add(lin_expr=coef, senses=['E']*len(coef), rhs=const)

        # constraint: decision logic
        for t in range(self.T_):
            coef = []
            for l in range(self.L_[t]):
                temp = []
                for d in self.Anc_[t][l]:
                    temp += [pi[d][j] for j in self.S_[t][l][d]]
                coef.append([[phi[t][l]]+temp, [len(self.Anc_[t][l])]+[-1]*len(temp)])
            const = [0]*len(coef)
            prob.linear_constraints.add(lin_expr=coef, senses=['L']*len(coef), rhs=const)

        # constraint: leaf
        coef = [[phi[t], [1]*self.L_[t]] for t in range(self.T_)]
        const = [1]*self.T_
        prob.linear_constraints.add(lin_expr=coef, senses=['E']*self.T_, rhs=const)

        # constraint: max modification number
        coef_const = [[0,0] if d in self.categories_flatten_ and self.x_[d]==1 else [0 if m==self.m_[d] else 1 for m in range(self.M_[d])] for d in range(self.D_)]
        coef = [ [sum(pi, []), sum(coef_const, [])] ]
        prob.linear_constraints.add(lin_expr=coef, senses=['L'], rhs=[K])

        if(self.interaction_):
            # constraint: absolute cost
            for d in range(self.D_):
                coef_vars = []
                coef_const = []
                for d_ in range(self.D_):
                    coef_vars += pi[d_]
                    coef_const += [self.eigenmatrix_[d][d_] * self.C_[d_][m] for m in range(self.M_[d_])]
                coef_vars += ['psi'] if self.p_=='infty' else [psi[d]]
                prob.linear_constraints.add(lin_expr=[[coef_vars, coef_const+[1]]], senses=['G'], rhs=[0])
                prob.linear_constraints.add(lin_expr=[[coef_vars, coef_const+[-1]]], senses=['L'], rhs=[0])

        if(self.alpha_>0):
            def cost_dev(n,m):
                R_n = self.R_[n]
                R_m = self.R_[m]
                return sum([[R_n[d][i] - R_m[d][i] for i in range(self.M_[d])] for d in range(self.D_)], [])

            nu = ['nu_{}'.format(n) for n in range(self.N_reg_)]
            prob.variables.add(names=nu, types=['B']*self.N_reg_)
            prob.linear_constraints.add(lin_expr=[[nu, [1]*self.N_reg_]], senses=['E'], rhs=[self.n_neighbors_])
            pi_flatten = sum(pi, [])

            if(self.n_neighbors_ == 1):
                rho = ['rho_{}'.format(n) for n in range(self.N_reg_)]
                prob.variables.add(obj=[(self.alpha_ * self.lrd_[n])/self.n_neighbors_ for n in range(self.N_reg_)], names=rho, lb=[0]*self.N_reg_, ub=self.R_ubs_, types=['S']*self.N_reg_)
                prob.linear_constraints.add(lin_expr=[[[nu[n],rho[n]], [self.k_dists_[n],-1]] for n in range(self.N_reg_)], senses=['L']*self.N_reg_, rhs=[0]*self.N_reg_)
                prob.linear_constraints.add(lin_expr=[[pi_flatten+[nu[n],rho[n]], sum(self.R_[n], [])+[self.R_ubs_[n],-1]] for n in range(self.N_reg_)], senses=['L']*self.N_reg_, rhs=self.R_ubs_)
                for n in range(self.N_reg_):
                    prob.linear_constraints.add(lin_expr=[[ pi_flatten+[nu[n]], cost_dev(n,m)+[self.R_ubs_[n]] ] for m in range(self.N_reg_) if m!=n], senses=['L']*(self.N_reg_-1), rhs=[self.R_ubs_[n]]*(self.N_reg_-1))
            else:
                mu = [['mu_{}_{}'.format(n,m) for m in range(self.N_reg_)] for n in range(self.N_reg_)]
                for n in range(self.N_reg_): prob.variables.add(names=[mu[n][m] for m in range(self.N_reg_) if m!=n], types=['B']*(self.N_reg_-1))
                for n in range(self.N_reg_):
                    coef = []
                    const = []
                    for m in range(self.N_reg_):
                        if(m==n): continue
                        coef += [[[nu[n],nu[m],mu[n][m]], [-1,1,-1]], [[nu[n],mu[n][m]], [1,1]], [[nu[m],mu[n][m]], [-1,1]]]
                        const += [0,1,0]
                    prob.linear_constraints.add(lin_expr=coef, senses=['L']*3*(self.N_reg_-1), rhs=const)
                for n in range(self.N_reg_):
                    coef = []
                    const = []
                    for m in range(self.N_reg_):
                        if(m==n): continue
                        coef += [[pi_flatten+[mu[n][m]], cost_dev(n,m)+[-1*self.R_ubs_[m]]]]
                        const += [-1*self.R_ubs_[m]]
                    prob.linear_constraints.add(lin_expr=coef, senses=['G']*(self.N_reg_-1), rhs=const)
                rho = [['rho_{}_{}'.format(n,m) for m in range(self.N_reg_)] for n in range(self.N_reg_)]
                for n in range(self.N_reg_): prob.variables.add(obj=[self.alpha_/(self.dists_[m]*(self.n_neighbors_**2)) for m in range(self.N_reg_)], names=rho[n], lb=[0]*(self.N_reg_), ub=[self.R_ubs_[n]]*(self.N_reg_), types=['S']*(self.N_reg_))
                eta = [['eta_{}_{}'.format(n,m) for m in range(self.N_reg_)] for n in range(self.N_reg_)]
                for n in range(self.N_reg_): prob.variables.add(names=eta[n], types=['B']*(self.N_reg_))
                for n in range(self.N_reg_):
                    coef = []
                    const = []
                    for m in range(self.N_reg_):
                        coef += [[pi_flatten+[eta[n][m],rho[n][m]], sum(self.R_[n],[])+[self.R_ubs_[n], -1]], [[eta[n][m], rho[n][m]], [self.dists_[n], -1]]]
                        const += [self.R_ubs_[n], 0]
                    prob.linear_constraints.add(lin_expr=coef, senses=['L']*(2*self.N_reg_), rhs=const)
                coef = []
                for n in range(self.N_reg_): coef += [[[eta[n][m], eta[m][n]], [1,-1]] for m in range(self.N_reg_) if m!=n]
                coef += [[[eta[n][n], nu[n]], [1,-1]] for n in range(self.N_reg_)]
                prob.linear_constraints.add(lin_expr=coef, senses=['E']*len(coef), rhs=[0]*len(coef))
                for n in range(self.N_reg_):
                    coef = []
                    const = []
                    for m in range(n+1, self.N_reg_):
                        coef += [[[nu[n],nu[m],eta[n][m]], [1,1,-1]], [[nu[n],eta[n][m]], [-1,1]], [[nu[m],eta[n][m]], [-1,1]]]
                        const += [1,0,0]
                    prob.linear_constraints.add(lin_expr=coef, senses=['L']*len(const), rhs=const)

        if(log_write): prob.write('mylog.lp')
        return prob
Beispiel #16
0
def create_risk_slim(coef_set, input):
    """
    create RiskSLIM MIP object

    Parameters
    ----------
    input - dictionary of RiskSLIM parameters and formulation

    Returns
    -------
    mip - RiskSLIM surrogate MIP without 0 cuts

    Issues
    ----
    no support for non-integer Lset "values"
    only drops intercept index for variable_names that match '(Intercept)'
    """
    assert isinstance(coef_set, CoefficientSet)
    assert isinstance(input, dict)

    # setup printing and loading
    function_print_flag = input.get('print_flag', False)
    print_from_function = lambda msg: print_log(msg) if function_print_flag else lambda msg: None
    update_parameter = lambda pname, pvalue: get_or_set_default(input, pname, pvalue, print_flag = function_print_flag)

    # set default parameters
    input = update_parameter('w_pos', 1.0)
    input = update_parameter('w_neg', 2.0 - input['w_pos'])
    input = update_parameter('C_0', 0.01)
    input = update_parameter('include_auxillary_variable_for_objval', True)
    input = update_parameter('include_auxillary_variable_for_L0_norm', True)
    input = update_parameter('loss_min', 0.00)
    input = update_parameter('loss_max', float(CPX_INFINITY))
    input = update_parameter('L0_min', 0)
    input = update_parameter('L0_max', len(coef_set))
    input = update_parameter('objval_min', 0.00)
    input = update_parameter('objval_max', float(CPX_INFINITY))
    input = update_parameter('relax_integer_variables', False)
    input = update_parameter('drop_variables', True)
    input = update_parameter('tight_formulation', False)
    input = update_parameter('set_cplex_cutoffs', True)

    # variables
    P = len(coef_set)
    w_pos, w_neg = input['w_pos'], input['w_neg']
    C_0j = np.copy(coef_set.c0)
    L0_reg_ind = np.isnan(C_0j)
    C_0j[L0_reg_ind] = input['C_0']
    C_0j = C_0j.tolist()
    C_0_rho = np.copy(C_0j)
    trivial_L0_min = 0
    trivial_L0_max = np.sum(L0_reg_ind)

    rho_ub = list(coef_set.ub)
    rho_lb = list(coef_set.lb)
    rho_type = ''.join(list(coef_set.vtype))

    # calculate min/max values for loss
    loss_min = max(0.0, float(input['loss_min']))
    loss_max = min(CPX_INFINITY, float(input['loss_max']))

    # calculate min/max values for model size
    L0_min = max(input['L0_min'], 0.0)
    L0_max = min(input['L0_max'], trivial_L0_max)
    L0_min = ceil(L0_min)
    L0_max = floor(L0_max)
    assert L0_min <= L0_max

    # calculate min/max values for objval
    objval_min = max(input['objval_min'], 0.0)
    objval_max = min(input['objval_max'], CPX_INFINITY)
    assert objval_min <= objval_max

    # include constraint on min/max model size?
    nontrivial_L0_min = L0_min > trivial_L0_min
    nontrivial_L0_max = L0_max < trivial_L0_max
    include_auxillary_variable_for_L0_norm = input['include_auxillary_variable_for_L0_norm'] or \
                                             nontrivial_L0_min or \
                                             nontrivial_L0_max

    # include constraint on min/max objective value?
    nontrivial_objval_min = objval_min > 0.0
    nontrivial_objval_max = objval_max < CPX_INFINITY
    include_auxillary_variable_for_objval = input['include_auxillary_variable_for_objval'] or \
                                            nontrivial_objval_min or \
                                            nontrivial_objval_max

    has_intercept = '(Intercept)' in coef_set.variable_names
    """
    RiskSLIM MIP Formulation
    
    minimize w_pos*loss_pos + w_neg *loss_minus + 0*rho_j + C_0j*alpha_j
    
    such that 
    
    L0_min <= L0 <= L0_max
    -rho_min * alpha_j < lambda_j < rho_max * alpha_j

    L_0 in 0 to P
    rho_j in [rho_min_j, rho_max_j]
    alpha_j in {0,1}

    x = [loss_pos, loss_neg, rho_j, alpha_j]

    optional constraints:
    objval = w_pos * loss_pos + w_neg * loss_min + sum(C_0j * alpha_j) (required for callback)
    L0_norm = sum(alpha_j) (required for callback)


    Changes for Tight Formulation (included when input['tight_formulation'] = True):

    sigma_j in {0,1} for j s.t. lambda_j has free sign and alpha_j exists
    lambda_j >= delta_pos_j if alpha_j = 1 and sigma_j = 1
    lambda_j <= -delta_neg_j if alpha_j = 1 and sigma_j = 0
    lambda_j >= alpha_j for j such that lambda_j >= 0
    lambda_j <= -alpha_j for j such that lambda_j <= 0
    
    """

    # create MIP object
    mip = Cplex()
    vars = mip.variables
    cons = mip.linear_constraints

    # set sense
    mip.objective.set_sense(mip.objective.sense.minimize)

    # add main variables
    loss_obj = [w_pos]
    loss_ub = [loss_max]
    loss_lb = [loss_min]
    loss_type = 'C'
    loss_names = ['loss']

    obj = loss_obj + [0.0] * P + C_0j
    ub = loss_ub + rho_ub + [1.0] * P
    lb = loss_lb + rho_lb + [0.0] * P
    ctype = loss_type + rho_type + 'B' * P

    rho_names = ['rho_%d' % j for j in range(P)]
    alpha_names = ['alpha_%d' % j for j in range(P)]
    varnames = loss_names + rho_names + alpha_names

    if include_auxillary_variable_for_objval:
        objval_auxillary_name = ['objval']
        objval_auxillary_ub = [objval_max]
        objval_auxillary_lb = [objval_min]
        objval_type = 'C'

        print_from_function("adding auxiliary variable for objval s.t. %1.4f <= objval <= %1.4f" % (objval_min, objval_max))
        obj += [0.0]
        ub += objval_auxillary_ub
        lb += objval_auxillary_lb
        varnames += objval_auxillary_name
        ctype += objval_type


    if include_auxillary_variable_for_L0_norm:
        L0_norm_auxillary_name = ['L0_norm']
        L0_norm_auxillary_ub = [L0_max]
        L0_norm_auxillary_lb = [L0_min]
        L0_norm_type = 'I'

        print_from_function("adding auxiliary variable for L0_norm s.t. %d <= L0_norm <= %d" % (L0_min, L0_max))
        obj += [0.0]
        ub += L0_norm_auxillary_ub
        lb += L0_norm_auxillary_lb
        varnames += L0_norm_auxillary_name
        ctype += L0_norm_type

    if input['relax_integer_variables']:
        ctype = ctype.replace('I', 'C')
        ctype = ctype.replace('B', 'C')

    vars.add(obj = obj, lb = lb, ub = ub, types = ctype, names = varnames)

    # 0-Norm LB Constraints:
    # lambda_j,lb * alpha_j <= lambda_j <= Inf
    # 0 <= lambda_j - lambda_j,lb * alpha_j < Inf
    for j in range(P):
        cons.add(names = ["L0_norm_lb_" + str(j)],
                 lin_expr = [SparsePair(ind=[rho_names[j], alpha_names[j]], val=[1.0, -rho_lb[j]])],
                 senses = "G",
                 rhs = [0.0])

    # 0-Norm UB Constraints:
    # lambda_j <= lambda_j,ub * alpha_j
    # 0 <= -lambda_j + lambda_j,ub * alpha_j
    for j in range(P):
        cons.add(names = ["L0_norm_ub_" + str(j)],
                 lin_expr =[SparsePair(ind=[rho_names[j], alpha_names[j]], val=[-1.0, rho_ub[j]])],
                 senses = "G",
                 rhs = [0.0])

    # objval_max constraint
    # loss_var + sum(C_0j .* alpha_j) <= objval_max
    if include_auxillary_variable_for_objval:
        print_from_function("adding constraint so that objective value <= " + str(objval_max))
        cons.add(names = ["objval_def"],
                 lin_expr = [SparsePair(ind = objval_auxillary_name + loss_names + alpha_names, val=[-1.0] + loss_obj + C_0j)],
                 senses = "E",
                 rhs = [0.0])

    # Auxiliary L0_norm variable definition:
    # L0_norm = sum(alpha_j)
    # L0_norm - sum(alpha_j) = 0
    if include_auxillary_variable_for_L0_norm:
        cons.add(names = ["L0_norm_def"],
                 lin_expr = [SparsePair(ind = L0_norm_auxillary_name + alpha_names, val = [1.0] + [-1.0] * P)],
                 senses = "E",
                 rhs = [0.0])


    # drop L0_norm_lb constraint for any variable with rho_lb >= 0
    dropped_variables = []
    constraints_to_drop = []

    # drop alpha / L0_norm_ub / L0_norm_lb for ('Intercept')
    if input['drop_variables']:
        # drop L0_norm_ub/lb constraint for any variable with rho_ub/rho_lb >= 0
        sign_pos_ind = np.flatnonzero(coef_set.sign > 0)
        sign_neg_ind = np.flatnonzero(coef_set.sign < 0)
        constraints_to_drop.extend(["L0_norm_lb_" + str(j) for j in sign_pos_ind])
        constraints_to_drop.extend(["L0_norm_ub_" + str(j) for j in sign_neg_ind])

        # drop alpha for any variable where rho_ub = rho_lb = 0
        fixed_value_ind = np.flatnonzero(coef_set.ub == coef_set.lb)
        variables_to_drop = ["alpha_" + str(j) for j in fixed_value_ind]
        vars.delete(variables_to_drop)
        dropped_variables += variables_to_drop
        alpha_names = [alpha_names[j] for j in range(P) if alpha_names[j] not in dropped_variables]

    if has_intercept:
        intercept_idx = coef_set.variable_names.index('(Intercept)')
        intercept_alpha_name = 'alpha_' + str(intercept_idx)
        vars.delete([intercept_alpha_name])

        alpha_names.remove(intercept_alpha_name)
        dropped_variables.append(intercept_alpha_name)

        print_from_function("dropped L0 indicator for '(Intercept)'")
        constraints_to_drop.extend(["L0_norm_ub_" + str(intercept_idx), "L0_norm_lb_" + str(intercept_idx)])

    if len(constraints_to_drop) > 0:
        constraints_to_drop = list(set(constraints_to_drop))
        cons.delete(constraints_to_drop)

    # indices
    indices = {
        'n_variables': vars.get_num(),
        'n_constraints': cons.get_num(),
        'names': vars.get_names(),
        'loss_names': loss_names,
        'rho_names': rho_names,
        'alpha_names': alpha_names,
        'loss': vars.get_indices(loss_names),
        'rho': vars.get_indices(rho_names),
        'alpha': vars.get_indices(alpha_names),
        'L0_reg_ind': L0_reg_ind,
        'C_0_rho': C_0_rho,
        'C_0_alpha': mip.objective.get_linear(alpha_names) if len(alpha_names) > 0 else [],
        }

    if include_auxillary_variable_for_objval:
        indices.update({
            'objval_name': objval_auxillary_name,
            'objval': vars.get_indices(objval_auxillary_name)[0],
            })

    if include_auxillary_variable_for_L0_norm:
        indices.update({
            'L0_norm_name': L0_norm_auxillary_name,
            'L0_norm': vars.get_indices(L0_norm_auxillary_name)[0],
            })

    # officially change the problem to LP if variables are relaxed
    if input['relax_integer_variables']:
        old_problem_type = mip.problem_type[mip.get_problem_type()]
        mip.set_problem_type(mip.problem_type.LP)
        new_problem_type = mip.problem_type[mip.get_problem_type()]
        print_from_function("changed problem type from %s to %s" % (old_problem_type, new_problem_type))

    if input['set_cplex_cutoffs'] and not input['relax_integer_variables']:
        mip.parameters.mip.tolerances.lowercutoff.set(objval_min)
        mip.parameters.mip.tolerances.uppercutoff.set(objval_max)

    return mip, indices
Beispiel #17
0
    def Quadratic_constraint(self):
        """Adds Quadratic constraint to the model's Gurobi/Cplex Interface.
        (x-mu).T @ inv(cov) @ (x-mu) <= chi-square
        Note: This one creates one ellipsoidal constraint for all the metabolites that has non zero or non 'nan' formation energy, irrespective of the magnitude of variance. if the model is infeasible after adding this constraint, refer to util_func.py, find_correlated metabolites to add different ellipsoidal constraints to high variance and normal compounds to avoid possible numerical issues.

        Unable to retrieve quadratic constraints in Gurobi model, can see the QC when printed.

        :raises NotImplementedError: Implemented only for Gurobi/Cplex interfaces.
        :return: [description]
        :rtype: [type]
        """

        # Pick indices of components present in the current model
        model_component_indices = [
            i for i in range(self.compound_vector_matrix.shape[1])
            if np.any(self.compound_vector_matrix[:, i])
        ]

        # Reduced the compound_vector to contain only the non zero entries
        model_compound_vector = self.compound_vector_matrix[:,
                                                            model_component_indices]

        # Now extract the sub covariance matrix containing only the components present in the model
        component_model_covariance = covariance[:, model_component_indices][
            model_component_indices, :]

        # Now separate the compounds that have variance > 1000 and others to avoid numerical issues
        high_variance_indices = np.where(
            np.diag(component_model_covariance) > 1000)[0]
        low_variance_indices = np.where(
            np.diag(component_model_covariance) < 1000)[0]

        # Calculate cholesky matrix for two different covariance matrices
        if len(low_variance_indices) > 0:
            small_component_covariance = component_model_covariance[:, low_variance_indices][
                low_variance_indices, :]
            cholesky_small_variance = matrix_decomposition(
                small_component_covariance)
            chi2_value_small = stats.chi2.isf(
                q=0.05, df=cholesky_small_variance.shape[1]
            )  # Chi-square value to map confidence interval

            for i in high_variance_indices:
                zeros_axis = np.zeros((cholesky_small_variance.shape[1], ))
                cholesky_small_variance = np.insert(cholesky_small_variance,
                                                    i,
                                                    zeros_axis,
                                                    axis=0)

            metabolite_sphere_small = (
                model_compound_vector @ cholesky_small_variance
            )  # This is a fixed term compound_vector @ cholesky

        if len(high_variance_indices) > 0:
            large_component_covariance = component_model_covariance[:, high_variance_indices][
                high_variance_indices, :]  # Covariance matrix for the high variance components

            cholesky_large_variance = matrix_decomposition(
                large_component_covariance)
            chi2_value_high = stats.chi2.isf(
                q=0.05, df=cholesky_large_variance.shape[1])

            # Insert empty rows for the low_variance_components
            for i in low_variance_indices:
                zeros_axis = np.zeros((cholesky_large_variance.shape[1], ))
                cholesky_large_variance = np.insert(cholesky_large_variance,
                                                    i,
                                                    zeros_axis,
                                                    axis=0)
            metabolite_sphere_large = (
                model_compound_vector @ cholesky_large_variance
            )  # This is a fixed term compound_vector @ cholesky

        proton_indices = [
            self.metabolites.index(metabolite)
            for metabolite in self.metabolites
            if metabolite.equilibrator_accession is not None
            if metabolite.equilibrator_accession.inchi_key == PROTON_INCHI_KEY
        ]  # Get indices of protons in metabolite list to avoid double correcting them for concentrations

        if self.solver.__class__.__module__ == "optlang.cplex_interface":

            from cplex import Cplex, SparsePair, SparseTriple

            # Instantiate Cplex model
            cplex_model = Cplex()

            rand_str = "".join(
                choices(string.ascii_lowercase + string.digits, k=6))
            # write cplex model to mps file in random directory and re read
            with tempfile.TemporaryDirectory() as td:
                temp_filename = os.path.join(td, rand_str + ".mps")
                self.solver.problem.write(temp_filename)
                cplex_model.read(temp_filename)

            # Stop printing output in cplex
            cplex_model.set_log_stream(None)
            cplex_model.set_error_stream(None)
            cplex_model.set_warning_stream(None)
            cplex_model.set_results_stream(None)

            # Remove the unnecessary variables and constraints
            remove_vars = [
                var for var in cplex_model.variables.get_names()
                if var.startswith("component_") or var.startswith("dG_err_")
            ]  # Remove error variables

            remove_constrs = [
                cons for cons in cplex_model.linear_constraints.get_names()
                if cons.startswith("delG_") or cons.startswith("std_dev_")
            ]  # Remove delG constraint and re-add with component variables

            cplex_model.linear_constraints.delete(
                remove_constrs)  # Removing constr
            cplex_model.variables.delete(remove_vars)  # Removing Vars

            # QC for small variance components
            if len(low_variance_indices) > 0:
                indices_sphere1 = cplex_model.variables.add(
                    names=[
                        "Sphere1_{}".format(i)
                        for i in range(cholesky_small_variance.shape[1])
                    ],
                    lb=[-1] * cholesky_small_variance.shape[1],
                    ub=[1] * cholesky_small_variance.shape[1],
                )  # Adding independent component variables to the model, store the variable indices

                # Add the Sphere constraint
                cplex_model.quadratic_constraints.add(
                    quad_expr=SparseTriple(
                        ind1=indices_sphere1,
                        ind2=indices_sphere1,
                        val=len(indices_sphere1) * [1],
                    ),
                    sense="L",
                    rhs=1,
                    name="unit_normal_small_variance",
                )
            else:
                indices_sphere1 = [
                ]  # Just to adjust the matrix dimensions later

            # QC for large variance components
            if len(high_variance_indices) > 0:
                indices_sphere2 = cplex_model.variables.add(
                    names=[
                        "Sphere2_{}".format(i)
                        for i in range(cholesky_large_variance.shape[1])
                    ],
                    lb=[-1] * cholesky_large_variance.shape[1],
                    ub=[1] * cholesky_large_variance.shape[1],
                )  # Independent large variance components

                cplex_model.quadratic_constraints.add(
                    quad_expr=SparseTriple(
                        ind1=indices_sphere2,
                        ind2=indices_sphere2,
                        val=len(indices_sphere2) * [1],
                    ),
                    rhs=1,
                    sense="L",
                    name="unit_normal_high_variance",
                )
            else:
                indices_sphere2 = []  # Balancing matrix dimensions

            concentration_variables = [
                "lnc_{}".format(metabolite.id)
                for metabolite in self.metabolites
            ]

            # Add the delG constraints
            for reaction in self.reactions:
                if reaction.id in self.Exclude_reactions:
                    continue
                rxn_stoichiometry = reaction.cal_stoichiometric_matrix()
                rxn_stoichiometry = rxn_stoichiometry[np.newaxis, :]

                if len(low_variance_indices) > 0:
                    coefficient_matrix_small_variance = (
                        np.sqrt(chi2_value_small) *
                        rxn_stoichiometry @ metabolite_sphere_small
                    )  # Coefficient array for small variance ellipsoid
                else:
                    coefficient_matrix_small_variance = np.array(())

                if len(high_variance_indices) > 0:
                    coefficient_matrix_large_variance = (
                        np.sqrt(chi2_value_high) *
                        rxn_stoichiometry @ metabolite_sphere_large
                    )  # Coefficient array for large variance ellipsoid
                else:
                    coefficient_matrix_large_variance = np.array(())

                concentration_coefficients = RT * rxn_stoichiometry
                concentration_coefficients[0, proton_indices] = 0

                coefficients_forward = np.hstack((
                    np.array((1)),
                    -1 * concentration_coefficients.flatten(),
                    -1 * coefficient_matrix_small_variance.flatten(),
                    -1 * coefficient_matrix_large_variance.flatten(),
                ))

                coefficients_reverse = np.hstack((
                    np.array((1)),
                    concentration_coefficients.flatten(),
                    coefficient_matrix_small_variance.flatten(),
                    coefficient_matrix_large_variance.flatten(),
                ))

                variable_order_forward = (
                    ["dG_{}".format(reaction.forward_variable.name)] +
                    concentration_variables + list(indices_sphere1) +
                    list(indices_sphere2))
                variable_order_reverse = (
                    ["dG_{}".format(reaction.reverse_variable.name)] +
                    concentration_variables + list(indices_sphere1) +
                    list(indices_sphere2))

                rhs = reaction.delG_prime + reaction.delG_transport

                cplex_model.linear_constraints.add(
                    lin_expr=[
                        SparsePair(
                            ind=variable_order_forward,
                            val=coefficients_forward.tolist(),
                        )
                    ],
                    senses=["E"],
                    rhs=[rhs],
                    names=["delG_{}".format(reaction.forward_variable.name)],
                )  # delG constraint for forward reaction

                cplex_model.linear_constraints.add(
                    lin_expr=[
                        SparsePair(
                            ind=variable_order_reverse,
                            val=coefficients_reverse.tolist(),
                        )
                    ],
                    senses=["E"],
                    rhs=[-rhs],
                    names=["delG_{}".format(reaction.reverse_variable.name)],
                )  # delG constraint for reverse reaction

            return cplex_model

        elif self.solver.__class__.__module__ == "optlang.gurobi_interface":
            from gurobipy import GRB, LinExpr

            gurobi_model = self.solver.problem.copy()

            # Remove unnecessary variables and constraints and rebuild  appropriate ones
            remove_vars = [
                var for var in gurobi_model.getVars()
                if var.VarName.startswith("component_")
                or var.VarName.startswith("dG_err_")
            ]

            remove_constrs = [
                cons for cons in gurobi_model.getConstrs()
                if cons.ConstrName.startswith("delG_")
                or cons.ConstrName.startswith("std_dev_")
            ]

            gurobi_model.remove(remove_constrs + remove_vars)

            # Add sphere variables for smaller set and larger set separately
            if len(low_variance_indices) > 0:
                for i in range(cholesky_small_variance.shape[1]):
                    gurobi_model.addVar(lb=-1,
                                        ub=1,
                                        name="Sphere1_{}".format(i))

                gurobi_model.update()
                sphere1_variables = [
                    var for var in gurobi_model.getVars()
                    if var.VarName.startswith("Sphere1_")
                ]

                gurobi_model.addQConstr(
                    np.sum(np.square(np.array(sphere1_variables))) <= 1,
                    name="unit_normal_small_variance",
                )
                gurobi_model.update()
            else:
                sphere1_variables = []

            # QC for large variance components
            if len(high_variance_indices) > 0:
                for i in range(cholesky_large_variance.shape[1]):
                    gurobi_model.addVar(lb=-1,
                                        ub=1,
                                        name="Sphere2_{}".format(i))

                gurobi_model.update()
                sphere2_variables = [
                    var for var in gurobi_model.getVars()
                    if var.VarName.startswith("Sphere2_")
                ]

                gurobi_model.addQConstr(
                    np.sum(np.square(np.array(sphere2_variables))) <= 1,
                    name="unit_normal_high_variance",
                )
                gurobi_model.update()
            else:
                sphere2_variables = []

            # Create a list of metabolite concentration variables
            concentration_variables = []
            for metabolite in self.metabolites:
                varname = "lnc_{}".format(metabolite.id)
                conc_var = gurobi_model.getVarByName(varname)
                concentration_variables.append(conc_var)

            # Add the delG constraints
            for reaction in self.reactions:
                if reaction.id in self.Exclude_reactions:
                    continue
                rxn_stoichiometry = reaction.cal_stoichiometric_matrix()
                rxn_stoichiometry = rxn_stoichiometry[np.newaxis, :]

                if len(low_variance_indices) > 0:
                    coefficient_matrix_small_variance = (
                        np.sqrt(chi2_value_small) *
                        rxn_stoichiometry @ metabolite_sphere_small
                    )  # Coefficient array for small variance ellipsoid
                else:
                    coefficient_matrix_small_variance = np.array(())

                if len(high_variance_indices) > 0:
                    coefficient_matrix_large_variance = (
                        np.sqrt(chi2_value_high) *
                        rxn_stoichiometry @ metabolite_sphere_large
                    )  # Coefficient array for large variance ellipsoid
                else:
                    coefficient_matrix_large_variance = np.array(())

                concentration_coefficients = RT * rxn_stoichiometry
                concentration_coefficients[0, proton_indices] = 0

                coefficients_forward = np.hstack((
                    -1 * concentration_coefficients.flatten(),
                    -1 * coefficient_matrix_small_variance.flatten(),
                    -1 * coefficient_matrix_large_variance.flatten(),
                ))

                coefficients_reverse = np.hstack((
                    concentration_coefficients.flatten(),
                    coefficient_matrix_small_variance.flatten(),
                    coefficient_matrix_large_variance.flatten(),
                ))

                variable_order = (concentration_variables + sphere1_variables +
                                  sphere2_variables)

                delG_err_forward = LinExpr(coefficients_forward.tolist(),
                                           variable_order)
                delG_err_reverse = LinExpr(coefficients_reverse.tolist(),
                                           variable_order)

                delG_for_var = gurobi_model.getVarByName("dG_{}".format(
                    reaction.forward_variable.name))
                delG_rev_var = gurobi_model.getVarByName("dG_{}".format(
                    reaction.reverse_variable.name))
                rhs = reaction.delG_prime + reaction.delG_transport

                gurobi_model.addConstr(
                    delG_for_var + delG_err_forward,
                    GRB.EQUAL,
                    rhs,
                    name="delG_{}".format(reaction.forward_variable.name),
                )

                gurobi_model.addConstr(
                    delG_rev_var + delG_err_reverse,
                    GRB.EQUAL,
                    -rhs,
                    name="delG_{}".format(reaction.reverse_variable.name),
                )

            gurobi_model.update()

            return gurobi_model

        else:
            raise NotImplementedError("Current solver doesn't support QC")
            logging.error(
                "Current solver doesnt support problesm of type MIQC")
Beispiel #18
0
def create_problem(cobra_model, quadratic_component=None, **kwargs):
    """Solver-specific method for constructing a solver problem from
    a cobra.Model.  This can be tuned for performance using kwargs


    """
    # Process parameter defaults
    the_parameters = parameter_defaults
    if kwargs:
        the_parameters = parameter_defaults.copy()
        the_parameters.update(kwargs)
    if 'relax_b' in the_parameters:
        relax_b = the_parameters.pop("relax_b")
        warn('need to reimplement relax_b')
        relax_b = False
    else:
        relax_b = False

    # Begin problem creation
    lp = Cplex()
    for k, v in iteritems(the_parameters):
        set_parameter(lp, k, v)
    objective_coefficients = [
        float(x.objective_coefficient) for x in cobra_model.reactions
    ]
    lower_bounds = [_float(x.lower_bound) for x in cobra_model.reactions]
    upper_bounds = [_float(x.upper_bound) for x in cobra_model.reactions]
    variable_names = cobra_model.reactions.list_attr("id")
    variable_kinds = [
        variable_kind_dict[x.variable_kind] for x in cobra_model.reactions
    ]
    # Cplex decides that the problem is a MIP if variable_kinds are supplied
    # even if there aren't any integers.
    if variable_kind_dict['integer'] in variable_kinds:
        lp.variables.add(obj=objective_coefficients,
                         lb=lower_bounds,
                         ub=upper_bounds,
                         names=variable_names,
                         types=variable_kinds)
    else:
        lp.variables.add(obj=objective_coefficients,
                         lb=lower_bounds,
                         ub=upper_bounds,
                         names=variable_names)

    constraint_sense = []
    constraint_names = []
    constraint_limits = []

    for x in cobra_model.metabolites:
        constraint_sense.append(x._constraint_sense)
        constraint_names.append(x.id)
        constraint_limits.append(float(x._bound))

    the_linear_expressions = []
    # NOTE: This won't work with metabolites that aren't in any reaction
    for the_metabolite in cobra_model.metabolites:
        variable_list = []
        coefficient_list = []
        for the_reaction in the_metabolite._reaction:
            variable_list.append(the_reaction.id)
            coefficient_list.append(
                _float(the_reaction._metabolites[the_metabolite]))
        the_linear_expressions.append(
            SparsePair(ind=variable_list, val=coefficient_list))
    # Set objective to quadratic program
    if quadratic_component is not None:
        set_quadratic_objective(lp, quadratic_component)

    if relax_b:
        lp.linear_constraints.add(lin_expr=the_linear_expressions,
                                  rhs=constraint_limits,
                                  range_values=list(range_values),
                                  senses=constraint_sense,
                                  names=constraint_names)

    else:
        lp.linear_constraints.add(lin_expr=the_linear_expressions,
                                  rhs=constraint_limits,
                                  senses=constraint_sense,
                                  names=constraint_names)

    # Set the problem type as cplex doesn't appear to do this correctly
    problem_type = Cplex.problem_type.LP
    if Cplex.variables.type.integer in variable_kinds:
        if quadratic_component is not None:
            problem_type = Cplex.problem_type.MIQP
        else:
            problem_type = Cplex.problem_type.MILP
    elif quadratic_component is not None:
        problem_type = Cplex.problem_type.QP
    lp.set_problem_type(problem_type)
    return (lp)
Beispiel #19
0
from cplex import Cplex, infinity
from cplex.exceptions import CplexError

problem = Cplex()

problem.objective.set_sense(problem.objective.sense.minimize)

objective = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]

constraints_matrix = [
    [1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
    [1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
    [1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0],
    [0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0],
    [1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0],
    [0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0],
    [0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0],
    [0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0],
    [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0],
    [0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0],
    [0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0],
    [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0],
]

col_names = [
    "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12"
]

row_names = [
    "c1", "c2", "c3", "c4", "c5", "c6", "c7", "c8", "c9", "c10", "c11", "c12"
]
Beispiel #20
0
def zf_std(a):
    # number of vertices
    n,_ = a.shape
    # Edge set
    edges = []
    m = 0 # number of edges
    for i in range(n):
        for j in range(n):
            if(a[i,j]==1):
                edges.append((i,j))
                m += 1
    # objective function
    T = n-1 # maximal propogation time
    obj = concatenate((ones(n),zeros(n),zeros(m)))
    # lower and upper bounds
    lb = concatenate((zeros(n),zeros(n),zeros(m)))
    ub = concatenate((ones(n),T*ones(n),ones(m)))
    # constraints
    count = 0; sense = ""
    rows = []; cols = []; vals = []; rhs = []
    # constraint 1
    for v in range(n):
        # s_{v}
        rows.append(count)
        cols.append(v)
        vals.append(1)
        for k in range(m):
            if(edges[k][1]==v):
                # y_{e}, where e = (u,v)
                rows.append(count)
                cols.append(2*n+k)
                vals.append(1)
        # = 1
        rhs.append(1)
        sense += "E"
        count += 1
    # constraint 2
    for k in range(m):
        # x_{u} - x_{v} + (T+1)y_{e}, where e = (u,v)
        rows.extend([count,count,count])
        cols.extend([n+edges[k][0],n+edges[k][1],2*n+k])
        vals.extend([1,-1,T+1])
        # <= T
        rhs.append(T)
        sense  += "L"
        count += 1
    # constraint 3
    for k in range(m):
        for w in range(n):
            if(w!=edges[k][1] and a[edges[k][0],w]==1):
                # x_{w} - x_{v} + (T+1)y_{e}, where e = (u,v) and w!=v, u~w
                rows.extend([count,count,count])
                cols.extend([n+w,n+edges[k][1],2*n+k])
                vals.extend([1,-1,T+1])
                # <= T
                rhs.append(T)
                sense += "L"
                count += 1
    # cplex problem variable
    prob = Cplex()
    # quiet results
    prob.set_results_stream(None)
    # maximiation problem
    prob.objective.set_sense(prob.objective.sense.minimize)
    # problem variables
    prob.variables.add(obj=obj, lb=lb, ub=ub)
    for j in range(prob.variables.get_num()):
        prob.variables.set_types(j,prob.variables.type.integer)
    # linear constraints
    prob.linear_constraints.add(rhs=rhs, senses=sense)
    prob.linear_constraints.set_coefficients(zip(rows, cols, vals))
    # write lp file
    # prob.write("zero_forcing.lp")
    # alg method
    alg = prob.parameters.lpmethod.values
    prob.parameters.lpmethod.set(alg.auto)
    # solve problem
    prob.solve()
    # solution variables
    var = prob.solution.get_values()
    s = var[0:n]
    x = var[n:2*n]
    y = var[2*n:]
    opt = prob.solution.get_objective_value()
    # return
    return opt, s, x, y
Beispiel #21
0
def run_docplex_check_list():
    check_platform()
    from docplex.version import latest_cplex_major, latest_cplex_minor
    cplex_latest_version_as_tuple = (latest_cplex_major, latest_cplex_minor)

    diagnostics = []

    # check requirements
    for rm in ["six", "enum", "cloudpickle"]:
        if not check_import(rm):
            diagnostics.append(
                "Module {0} is missing, run: pip install {0}".format(rm))

    # check pandas
    try:
        import pandas as pd  # @UnusedImport
        # noinspection PyUnresolvedReferences
        from pandas import DataFrame
        DataFrame({})
    except ImportError:
        print("-- pandas is not present, some features might be unavailable.")

    from docplex.mp.environment import Environment
    Environment().print_information()

    # check cplex
    try:
        # noinspection PyUnresolvedReferences
        from cplex import Cplex

        cpx = Cplex()
        cpxv = cpx.get_version()
        cpxvt = tuple(float(x) for x in cpx.get_version().split("."))[:2]
        lcpxv = ".".join(str(z) for z in cplex_latest_version_as_tuple)
        if cpxvt < cplex_latest_version_as_tuple:
            print(
                "Warning: Your cplex version {0} is not the latest, {1} is available"
                .format(cpxv, lcpxv))
        elif cpxvt > cplex_latest_version_as_tuple:
            print(
                "* Your cplex version {0} is ahead of the latest DOcplex-compatible version {1}, this might not be compatible."
                .format(cpxv, lcpxv))
        else:
            print("* Your cplex version {0} is the latest available".format(
                cpxv))
        cpx.end()

    except ImportError as ie:
        Cplex = None
        diagnostics.append("No local installation of CPLEX has been found.")
        print("Cplex DLL not found, error importing cplex: {0!s}".format(ie))
        check_python_path(diagnostics)
    # check creation of an empty model...

    try:
        if Cplex:
            # noinspection PyUnresolvedReferences
            from docplex.mp.model import Model
            Model()
            # promotional?
            if Model.is_cplex_ce():
                print(
                    "! Cplex promotional version, limited to 1000 variables, 1000 constraints"
                )
                diagnostics.append(
                    "Your local CPLEX edition is limited. Consider purchasing a full license."
                )

    except ImportError:
        print("Docplex is not present: cannot import class docplex.mp.model")
        diagnostics.append("Your installation of DOcplex may be corrupted.")
    except Exception as e:
        print(
            "Exception raised when creating one model instance: {0!s}".format(
                e))
        diagnostics.append("Your installation of DOcplex may be corrupted.")

    if diagnostics:
        print("\n!! diagnostics: {0}".format(len(diagnostics)))
        for s in diagnostics:
            print("  -- {0}".format(s))
    else:
        print("> No problem found: you're all set!")
Beispiel #22
0
def variability_legacy_cplex(
    model,
    variable_list=None,
    params=False,
):
    """Custom function to perform TVA on MIQC problem using gurobi.

    Parameters
    ----------
    model : multitfa.core.tmodel
        multitfa model after thermodynamic constraints are added
    variable_list : List, optional
        List of variables to perform TVA on, by default None
    params : Bool, optional
        If True sets the Timelimit option to 300 sec and reduced the mip gap to 0.005

    Returns
    -------
    pd.DataFrame
        Dataframe of min max ranges of variables

    Raises
    ------
    ValueError
        [description]
    """
    # Instead of copying the whole model, just copy the cplex solver object by writing to a file and reading again.
    from cplex import Cplex, SparsePair

    tmp_dir = (os.path.normpath(os.path.dirname(os.path.abspath(__file__))) +
               os.sep + os.pardir + os.sep + "tmp")
    if not os.path.exists(tmp_dir):
        os.makedirs(tmp_dir)

    # Instantiate Cplex model
    cplex_model = Cplex()
    rand_str = "".join(choices(string.ascii_lowercase + string.digits, k=6))

    # write cplex model to mps file and re read
    with tempfile.TemporaryDirectory() as td:
        temp_filename = os.path.join(td, rand_str + ".mps")
        model.cplex_interface.write(temp_filename)
        cplex_model.read(temp_filename)

    cplex_model.set_log_stream(None)
    cplex_model.set_error_stream(None)
    cplex_model.set_warning_stream(None)
    cplex_model.set_results_stream(None)

    if params:
        # print("lol")
        cplex_model.parameters.mip.tolerances.mipgap = 0.005
        cplex_model.parameters.timelimit = 300
        cplex_model.parameters.mip.limits.probetime = 300

    # Make shorts for sense
    max_sense = cplex_model.objective.sense.maximize
    min_sense = cplex_model.objective.sense.minimize

    if variable_list == None:
        variables = model.cplex_interface.variables.get_names()
    else:
        variables = [var for var in variable_list]

    vars_list_cplex = cplex_model.variables.get_names()

    fluxes_min = np.empty(len(variables))
    fluxes_max = np.empty(len(variables))
    rxn_name = list()

    rxn_ids = [rxn.id for rxn in model.reactions]

    for i in range(len(variables)):
        # Reset objective vector for each iteration
        for varname in vars_list_cplex:
            cplex_model.objective.set_linear(varname, 0)

        # if the variable is reactions optimize for forward - reverse variables else optimize for the variable
        if variables[i] in rxn_ids:
            rxn = model.reactions.get_by_id(variables[i])
            cplex_model.objective.set_linear([(rxn.forward_variable.name, 1),
                                              (rxn.reverse_variable.name, -1)])

        else:
            cplex_model.objective.set_linear(variables[i], 1)

        rxn_name.append(variables[i])

        # minimization
        cplex_model.objective.set_sense(min_sense)
        cplex_model.solve()
        objective_value = cplex_model.solution.get_objective_value()
        fluxes_min[i] = objective_value

        # maximiztion
        cplex_model.objective.set_sense(max_sense)
        cplex_model.solve()
        objective_value = cplex_model.solution.get_objective_value()
        fluxes_max[i] = objective_value

    return DataFrame({
        "minimum": Series(index=rxn_name, data=fluxes_min),
        "maximum": Series(index=rxn_name, data=fluxes_max),
    })
Beispiel #23
0
def _optimize_cplex(cobra_model,
                    new_objective=None,
                    objective_sense='maximize',
                    min_norm=0,
                    the_problem=None,
                    tolerance_optimality=1e-6,
                    tolerance_feasibility=1e-6,
                    tolerance_integer=1e-9,
                    tolerance_barrier=1e-8,
                    error_reporting=None,
                    print_solver_time=False,
                    lp_method=1,
                    lp_parallel=0,
                    copy_problem=False,
                    relax_b=None,
                    quadratic_component=None,
                    reuse_basis=True,
                    update_problem_reaction_bounds=True):
    """Uses the ILOG/CPLEX (www.ibm.com/software/integration/optimization/cplex-optimizer/)
    optimizer to perform an optimization on cobra_model for the objective_coefficients in
    cobra_model._objective_coefficients based on the objective sense.

    cobra_model: A cobra.Model object

    new_objective: Reaction, String, or Integer referring to a reaction in
    cobra_model.reactions to set as the objective.  Currently, only supports single
    objective coeffients.  Will expand to include mixed objectives.

    objective_sense: 'maximize' or 'minimize'

    min_norm: not implemented

    the_problem: None or a problem object for the specific solver that can be used to hot
    start the next solution.

    tolerance_optimality: Solver tolerance for optimality.

    tolerance_feasibility: Solver tolerance for feasibility.

    error_reporting: None or True to disable or enable printing errors encountered
    when trying to find the optimal solution.
    
    print_solver_time: False or True.  Indicates if the time to calculate the solution
    should be displayed.

    quadratic_component: None or 
          scipy.sparse.dok of dim(len(cobra_model.reactions),len(cobra_model.reactions))
         If not None:
          Solves quadratic programming problems for cobra_models of the form:
          minimize: 0.5 * x' * quadratic_component * x + cobra_model._objective_coefficients' * x
          such that,
            cobra_model._lower_bounds <= x <= cobra_model._upper_bounds
            cobra_model._S * x (cobra_model._constraint_sense) cobra_model._b
            
    reuse_basis: Boolean.  If True and the_problem is a model object for the solver,
    attempt to hot start the solution.


    update_problem_reaction_bounds: Boolean.  Set to True if you're providing the_problem
    and you've modified reaction bounds on your cobra_model since creating the_problem.  Only
    necessary for CPLEX

    method for linear optimization: 0 = automatic
    1 = primal simplex, 2 = dual simplex, 3 = network simplex,
    4 = barrier, 5 = sifting, 6 = concurrent dual, barrier, and primal
    
    lp.solve() with Salmonella model:
         cold start: 0.05 seconds
         hot start: 0.05 seconds (slow due to copying the LP)

    """
    if relax_b is not None:
        raise Exception('Need to reimplement constraint relaxation')
    from numpy import array, nan, zeros
    from cobra.flux_analysis.objective import update_objective
    from cobra.solvers.legacy import status_dict, variable_kind_dict

    if error_reporting == 'time' or print_solver_time:
        from time import time
        start_time = time()
    try:
        from cplex import Cplex, SparsePair
        variable_kind_dict = eval(variable_kind_dict['cplex'])
        status_dict = eval(status_dict['cplex'])
    except ImportError as e:
        import sys
        if 'wrong architecture' in e[0] and sys.maxsize > 2**32:
            print 'CPLEX python API is not 64-bit.  please contact your IBM representative'
        else:
            print e
    if new_objective and new_objective != 'update problem':
        update_objective(cobra_model, new_objective)
    if the_problem == None or the_problem in ['return', 'setup', 'parallel'] \
           or not isinstance(the_problem, Cplex):
        lp = Cplex()
        #Using the new objects
        #NOTE: This might be slow
        objective_coefficients = []
        lower_bounds = []
        upper_bounds = []
        variable_names = []
        variable_kinds = []
        [(objective_coefficients.append(x.objective_coefficient),
          lower_bounds.append(x.lower_bound),
          upper_bounds.append(x.upper_bound), variable_names.append(x.id),
          variable_kinds.append(variable_kind_dict[x.variable_kind]))
         for x in cobra_model.reactions]
        #Cplex decides that the problem is a MIP if variable_kinds are supplied
        #even if there aren't any integers.
        if Cplex.variables.type.integer in variable_kinds:
            lp.variables.add(obj=objective_coefficients,
                             lb=lower_bounds,
                             ub=upper_bounds,
                             names=variable_names,
                             types=variable_kinds)
        else:
            lp.variables.add(obj=objective_coefficients,
                             lb=lower_bounds,
                             ub=upper_bounds,
                             names=variable_names)

        if relax_b:
            raise Exception('need to reimplement relax_b')
            ## range_values = zeros(len(cobra_model.metabolites))
            ## b_values = array([x._bound for x in cobra_model.metabolties])
            ## for the_nonzero in list(b_values.nonzero()[0]):
            ##     range_values[the_nonzero] = -relax_b
        constraint_sense = []
        constraint_names = []
        constraint_limits = []
        [(constraint_sense.append(x._constraint_sense),
          constraint_names.append(x.id), constraint_limits.append(x._bound))
         for x in cobra_model.metabolites]

        the_linear_expressions = []
        #NOTE: This won't work with metabolites that aren't in any reaction
        for the_metabolite in cobra_model.metabolites:
            variable_list = []
            coefficient_list = []
            for the_reaction in the_metabolite._reaction:
                variable_list.append(the_reaction.id)
                coefficient_list.append(
                    the_reaction._metabolites[the_metabolite])
            the_linear_expressions.append(
                SparsePair(ind=variable_list, val=coefficient_list))
        if quadratic_component is not None:
            if not hasattr(quadratic_component, 'todok'):
                raise Exception(
                    'quadratic component must be a scipy.sparse type array')
            quadratic_component_scaled = quadratic_component.todok()

            lp.parameters.emphasis.numerical.set(1)
            for k, v in quadratic_component_scaled.items():
                lp.objective.set_quadratic_coefficients(
                    int(k[0]), int(k[1]), v)

        if relax_b:
            lp.linear_constraints.add(lin_expr=the_linear_expressions,
                                      rhs=constraint_limits,
                                      range_values=list(range_values),
                                      senses=constraint_sense,
                                      names=constraint_names)

        else:
            lp.linear_constraints.add(lin_expr=the_linear_expressions,
                                      rhs=constraint_limits,
                                      senses=constraint_sense,
                                      names=constraint_names)

        if error_reporting == 'time':
            print 'setup new problem: ' + repr(time() - start_time)
            start_time = time()

        #Set the problem type as cplex doesn't appear to do this correctly
        problem_type = Cplex.problem_type.LP
        if Cplex.variables.type.integer in variable_kinds:
            if quadratic_component is not None:
                problem_type = Cplex.problem_type.MIQP
            else:
                problem_type = Cplex.problem_type.MILP
        elif quadratic_component is not None:
            problem_type = Cplex.problem_type.QP
        lp.set_problem_type(problem_type)

    else:
        if copy_problem:
            lp = Cplex(the_problem)
            if error_reporting == 'time':
                print 'copy problem: ' + repr(time() - start_time)
                start_time = time()

        else:
            lp = the_problem

        if new_objective:
            lp.objective.set_linear([(x.id, float(x.objective_coefficient))
                                     for x in cobra_model.reactions])
            if error_reporting == 'time':
                print 'set lp objective: ' + repr(time() - start_time)
                start_time = time()
        #SPEED THIS UP
        if update_problem_reaction_bounds:
            lp.variables.set_upper_bounds([(x.id, float(x.upper_bound))
                                           for x in cobra_model.reactions])
            lp.variables.set_lower_bounds([(x.id, float(x.lower_bound))
                                           for x in cobra_model.reactions])

        if error_reporting == 'time':
            print 'changed all bounds: ' + repr(time() - start_time)
            start_time = time()

    if objective_sense == 'maximize':
        lp.objective.set_sense(lp.objective.sense.maximize)
    else:
        lp.objective.set_sense(lp.objective.sense.minimize)
    if tolerance_optimality < 1e-10:
        lp.parameters.simplex.perturbation.constant.set(1)
        lp.parameters.simplex.pgradient.set(1)
        lp.parameters.emphasis.memory.set(1)
        #lp.parameters.simplex.tolerances.markowitz.set(.01)
        lp.parameters.advance.set(2)

    lp.parameters.simplex.tolerances.optimality.set(tolerance_optimality)
    lp.parameters.simplex.tolerances.feasibility.set(tolerance_feasibility)

    if lp.get_problem_type() in [
            Cplex.problem_type.LP, Cplex.problem_type.MILP
    ]:
        lp.parameters.lpmethod.set(lp_method)
    elif lp.get_problem_type() in [
            Cplex.problem_type.QP, Cplex.problem_type.MIQP
    ]:
        lp.parameters.qpmethod.set(lp_method)

    if lp_parallel > 1:
        lp.parameters.threads.set(lp_parallel)
    #lp.parameters.parallel.set(lp_parallel)
    lp.parameters.barrier.convergetol.set(tolerance_barrier)

    if the_problem == 'setup':
        return lp

    if not error_reporting:
        lp.set_results_stream(None)
        lp.set_warning_stream(None)
    if print_solver_time:
        start_time = time()
    if not isinstance(the_problem, Cplex):
        #TODO: set tolerance
        lp.solve()
        # Solve this LP with the simplex method.  Takes about 0.2 s without hot start
        lp.status = lp.solution.status[lp.solution.get_status()]
        if lp.status in status_dict:
            status = status_dict[lp.status]
        else:
            status = 'failed'
    else:
        if isinstance(the_problem, Cplex) and reuse_basis:
            try:
                the_basis = the_problem.solution.basis.get_basis()
                lp.start.set_basis(the_basis[0], the_basis[1])
                #TODO: Determine whether the primal or dual works best for the
                #problem of interest.  For the ME matrix the primal appears to
                #work best
                lp_method = 1
                lp.parameters.preprocessing.presolve.set(0)
                lp.parameters.lpmethod.set(lp_method)
            except:
                print 'no basis in the_problem'
        #TODO: set tolerance and time limit
        #lp.parameters.timelimit.set()
        lp.solve()
        #If the solver takes more than 0.1 s with a hot start it is likely stuck
        lp.status = lp.solution.status[lp.solution.get_status()]
        if lp.status in status_dict:
            status = status_dict[lp.status]
        else:
            status = 'failed'
        if status != 'optimal':
            #Cycle through the different solver options, if a solution is not found
            for lp_method in (1, 2, 3, 4, 5, 6):
                lp = optimize_cplex(
                    cobra_model,
                    new_objective=new_objective,
                    objective_sense=objective_sense,
                    min_norm=min_norm,
                    the_problem=None,
                    print_solver_time=print_solver_time,
                    tolerance_optimality=tolerance_optimality,
                    tolerance_feasibility=tolerance_feasibility,
                    lp_method=lp_method,
                    quadratic_component=quadratic_component)['the_problem']
                lp.status = lp.solution.status[lp.solution.get_status()]
                if lp.status in status_dict:
                    status = status_dict[lp.status]
                else:
                    status = 'failed'
                if status == 'optimal':
                    break
    if error_reporting == 'time':
        print 'solver time: ' + repr(
            time() - start_time) + ' with method ' + repr(lp_method)
        start_time = time()

    if print_solver_time:
        print 'cplex time: %f' % (time() - start_time)
    #TODO: It might be able to speed this up a little.
    if status == 'optimal':
        objective_value = lp.solution.get_objective_value()
        #This can be sped up a little
        x_dict = dict(zip(lp.variables.get_names(), lp.solution.get_values()))
        x = array(lp.solution.get_values())
        x = x.reshape(x.shape[0], 1)
        #MIP's don't have duals
        if lp.get_problem_type() in (Cplex.problem_type.MIQP,
                                     Cplex.problem_type.MILP):

            y = y_dict = None
        else:
            y_dict = dict(
                zip(lp.linear_constraints.get_names(),
                    lp.solution.get_dual_values()))
            y = array(lp.solution.get_dual_values())
            y = y.reshape(y.shape[0], 1)
    else:
        x = y = x_dict = y_dict = objective_value = None
        if error_reporting:
            print 'cplex failed: %s' % lp.status

    cobra_model.solution = the_solution = Solution(objective_value,
                                                   x=x,
                                                   x_dict=x_dict,
                                                   status=status,
                                                   y=y,
                                                   y_dict=y_dict)
    solution = {'the_problem': lp, 'the_solution': the_solution}
    return solution
Beispiel #24
0
def build_polishing_mip(cpx,
                        polish_after_solutions=1,
                        polish_after_time=float('inf'),
                        display_flag=True):

    # copy mip
    polishing_mip = Cplex(cpx)
    p = polishing_mip.parameters

    # display
    #p.mip.display.set(display_flag)

    # general
    p.randomseed.set(0)
    p.parallel.set(1)
    p.threads.set(1)
    p.output.clonelog.set(0)

    # set polish start time
    if polish_after_time < p.mip.polishafter.time.max():
        p.mip.polishafter.time.set(float(polish_after_time))

    if polish_after_solutions < p.mip.polishafter.solutions.max():
        p.mip.polishafter.solutions.set(int(polish_after_solutions))

    # solution pool
    p.mip.pool.intensity.set(
        2
    )  # 0 auto; 1 normal; 2 more; 3 more with ; 4 all feasible solutions (set to 1-3)

    # MIP Strategy
    p.emphasis.mip.set(1)
    #p.mip.strategy.variableselect.set(0)
    #p.mip.strategy.nodeselect.set(2) #0: depth first, 1: best bound, 2 best-estimate, 3-best-estimate alternative
    #p.mip.strategy.bbinterval (for best bound search)
    p.mip.strategy.search.set(2)  # 1 for traditional B&C, 2 for dynamic search
    p.mip.strategy.probe.set(0)  # -1 for off;/ 0 for automatic
    p.mip.strategy.dive.set(
        2
    )  # 0 automatic;1 dive; 2 probing dive; 3 guided dive (set to 2 for probing)

    #Preprocessing
    p.preprocessing.symmetry.set(
        0
    )  #turn off symmetry breaking (there should not be symmetry in this model)
    p.preprocessing.boundstrength.set(
        0)  #-1 to turn off; 1 to turn on; 0 for CPLEX to choose

    # Cut Generation (No Cuts for Heuristic)
    p.mip.cuts.implied.set(-1)
    p.mip.cuts.localimplied.set(-1)  #
    p.mip.cuts.zerohalfcut.set(-1)  #-1 off; auto, 1 on, 2 aggreesive
    p.mip.cuts.mircut.set(-1)  #-1 off; 0 auto, 1 on, 2 aggressive
    p.mip.cuts.covers.set(-1)  #-1 off; 0 auto; 1-3 aggression level

    # General Heuristics
    #p.mip.strategy.heuristicfreq.set(100) #-1 for none, or # of nodes
    p.mip.strategy.rinsheur.set(
        0)  #RINS: -1 off; 0 auto; 0 for none; n >= as frequency
    p.mip.strategy.fpheur.set(
        -1
    )  #Feasibility Pump: -1: off; 0 auto; 1 to find feasible only; 2 to find feasible with good obj (use -1 or 2)
    p.mip.strategy.lbheur.set(0)  #Local Branching: 0 off; 1 on

    return polishing_mip
Beispiel #25
0
def ZFD(a,s):
    n,_ = a.shape # number of vertices
    edges = [] # set of edges
    m = 0 # number of edges

    # populate edge set
    for i in range(n):
     for j in range(n):
         if(a[i,j] == 1):
             edges.append((i,j))
             m+=1

    # objective function -> (s, s', x, x', y, y', z)
    obj = concatenate((zeros(n), zeros(n), zeros(n), zeros(n), zeros(m), zeros(m), ones(n)))
    
    T = n - 1 # max propogation time

    # lower/upper bounds
    lb = concatenate((zeros(n),zeros(n),zeros(n), zeros(n), zeros(m), zeros(m), zeros(n))) 
    ub = concatenate((ones(n), ones(n), T*ones(n), T*ones(n), ones(m), ones(m), ones(n)))

    # initialize data for model setup
    count = 0; sense = ""; rows = []; cols = []; vals = []; rhs = []

    # constraint 1
    for v in range(n):
        # s_v
        rows.append(count)
        cols.append(v) 
        vals.append(1)
        for k in range(m):
            if(edges[k][1] == v):
                # y_e with e = (u,v)
                rows.append(count)
                cols.append(4*n + k)
                vals.append(1)

        rhs.append(1)
        sense += "E"
        count += 1

    # constraint 2
    for v in range(n):
        # s_v'
        rows.append(count) 
        cols.append(n + v) 
        vals.append(1) 
        for k in range(m):
            if(edges[k][1] == v):
                # y_e' with e = (u,v)
                rows.append(count)
                cols.append(4*n + m + k) # n+n+n+n+m+k
                vals.append(1)

        rhs.append(1)
        sense += "E"
        count += 1

    # constraint 3
    for k in range(m):
        # x_u - x_v + (T+1)y_e with e = (u,v)
        rows.extend([count, count, count])
        cols.extend([2*n + edges[k][0], 2*n + edges[k][1], 4*n + k])
        vals.extend([1, -1, T + 1])
        rhs.append(T) # <= T
        sense  += "L"
        count += 1

    # constraint 4
    for k in range(m):
        # x_u' - x_v' + (T+1)y_e' with e = (u,v)
        rows.extend([count, count, count])
        cols.extend([3*n + edges[k][0], 3*n + edges[k][1], 4*n + m + k])
        vals.extend([1, -1, T + 1])
        rhs.append(T) # <= T
        sense  += "L"
        count += 1

    # constraint 5
    for k in range(m):
        for w in range(n):
            if(w != edges[k][1] and a[edges[k][0],w] == 1):
                # x_w - x_v + (T+1)y_e, where e = (u,v) and w!=v, u~w
                rows.extend([count, count, count])
                cols.extend([2*n + w, 2*n + edges[k][1], 4*n + k])
                vals.extend([1, -1, T + 1])
                rhs.append(T) #<= T
                sense += "L"
                count += 1

    # constraint 6
    for k in range(m):
        for w in range(n):
            if(w != edges[k][1] and a[edges[k][0],w] == 1):
                # x_w' - x_v' + (T+1)y_e', where e = (u,v) and w!=v, u~w
                rows.extend([count, count, count])
                cols.extend([3*n + w, 3*n + edges[k][1], 4*n + m + k])
                vals.extend([1, -1, T + 1])
                rhs.append(T) # <= T
                sense += "L"
                count += 1

    # constraint 7
    for v in range(n):
        # s_v
        rows.append(count)
        cols.append(v) 
        vals.append(1) 

    rhs.append(s)
    sense += "E"
    count += 1

    # comtraint 8
    for v in range(n):
        # s_v'
        rows.append(count)
        cols.append(n + v) 
        vals.append(1) 

    rhs.append(s)
    sense += "E"
    count += 1

    # constraint 9
    for v in range(n):
        #s_v + s_v' - z_v <= 1
        rows.extend([count, count, count])
        cols.extend([v, n + v, 4*n + 2*m + v])
        vals.extend([1, 1, -1])
        rhs.append(1)
        sense  += "L"
        count += 1


    IP = Cplex() # integer program/cplex problem
    IP.set_results_stream(None)
    
    IP.objective.set_sense(IP.objective.sense.minimize) # minimization problem
    
    # variables
    IP.variables.add(obj = obj, lb = lb, ub = ub)
    for j in range(IP.variables.get_num()):
        IP.variables.set_types(j,IP.variables.type.integer)
        
    # linear constraints
    IP.linear_constraints.add(rhs = rhs, senses = sense)
    IP.linear_constraints.set_coefficients(zip(rows, cols, vals))

    # write lp file
    # IP.write("diverse_zf_ip.lp")
    
    # alg method
    alg = IP.parameters.lpmethod.values
    IP.parameters.lpmethod.set(alg.auto)
    
    # solve integer program
    IP.solve()
    
    # solution variables
    var = IP.solution.get_values()

    # solutions for each variable
    s1 = var[0:n] # s
    s2 = var[n:2*n] # s'
    x1 = var[2*n:3*n] # x
    x2 = var[3*n:4*n] # x'
    y1 = var[4*n:4*n + m] # y
    y2 = var[4*n + m:4*n + 2*m] # y'
    z = var[4*n + 2*m:5*n + 2*m] # z

    # optimal solution
    optSol = IP.solution.get_objective_value()
    
    return optSol, s1, s2, x1, x2, y1, y2
    def build_mip(self):
        """
        returns an optimization problem that can be solved to determine an item in a flipset for x
        :return:
        """

        # setup MIP related parameters
        cost_type = self.mip_cost_type
        min_items = self.min_items
        max_items = self.max_items
        #assert min_items <= max_items

        # cost/action information
        build_info, indices = self._get_mip_build_info()

        # if build_info is empty, then reset mip and return
        if len(build_info) == 0:
            self._mip = None
            self._mip_indices = dict()
            return

        # initialize mip
        mip = Cplex()
        mip.set_problem_type(mip.problem_type.MILP)
        vars = mip.variables
        cons = mip.linear_constraints
        n_actionable = len(build_info)
        n_indicators = len(indices['action_ind_names'])

        # define a[j]
        vars.add(names=indices['action_var_names'],
                 types=['C'] * n_actionable,
                 lb=indices['action_lb'],
                 ub=indices['action_ub'])

        # sum_j w[j] a[j] > -score
        cons.add(names=['score'],
                 lin_expr=[
                     SparsePair(ind=indices['action_var_names'],
                                val=indices['coefficients'])
                 ],
                 senses=['G'],
                 rhs=[-self.score()])

        # define indicators u[j][k] = 1 if a[j] = actions[j][k]
        vars.add(names=indices['action_ind_names'], types=['B'] * n_indicators)

        # restrict a[j] to feasible values using a 1 of K constraint setup
        for info in build_info.values():

            # restrict a[j] to actions in feasible set and make sure exactly 1 indicator u[j][k] is on
            # 1. a[j]  =   sum_k u[j][k] * actions[j][k] - > 0.0   =   sum u[j][k] * actions[j][k] - a[j]
            # 2.sum_k u[j][k] = 1.0
            cons.add(
                names=['set_a[%d]' % info['idx'],
                       'pick_a[%d]' % info['idx']],
                lin_expr=[
                    SparsePair(ind=info['action_var_name'] +
                               info['action_ind_names'],
                               val=[-1.0] + info['actions']),
                    SparsePair(ind=info['action_ind_names'],
                               val=[1.0] * len(info['actions']))
                ],
                senses=["E", "E"],
                rhs=[0.0, 1.0])

            # declare indicator variables as SOS set
            mip.SOS.add(type="1",
                        name="sos_u[%d]" % info['idx'],
                        SOS=SparsePair(ind=info['action_ind_names'],
                                       val=info['actions']))

        # limit number of features per action
        #
        # size := n_actionable - n_null where n_null := sum_j u[j][0] = sum_j 1[a[j] = 0]
        #
        # size <= max_size
        # n_actionable - sum_j u[j][0]  <=  max_size
        # n_actionable - max_size       <=  sum_j u[j][0]
        #
        # min_size <= size:
        # min_size          <=  n_actionable - sum_j u[j][0]
        # sum_j u[j][0]     <=  n_actionable - min_size
        min_items = max(min_items, 1)
        max_items = min(max_items, n_actionable)
        size_expr = SparsePair(ind=indices['action_off_names'],
                               val=[1.0] * n_actionable)
        cons.add(names=['max_items', 'min_items'],
                 lin_expr=[size_expr, size_expr],
                 senses=['G', 'L'],
                 rhs=[
                     float(n_actionable - max_items),
                     float(n_actionable - min_items)
                 ])

        # add constraints for cost function
        if cost_type == 'max':

            indices['max_cost_var_name'] = ['max_cost']
            indices['epsilon'] = np.min(indices['cost_df']) / np.sum(
                indices['cost_ub'])
            vars.add(names=indices['max_cost_var_name'] +
                     indices['cost_var_names'],
                     types=['C'] * (n_actionable + 1),
                     obj=[1.0] + [indices['epsilon']] * n_actionable)
            #lb = [0.0] * (n_actionable + 1)) # default values are 0.0

            cost_constraints = {
                'names': [],
                'lin_expr': [],
                'senses': ["E", "G"] * n_actionable,
                'rhs': [0.0, 0.0] * n_actionable,
            }

            for info in build_info.values():

                cost_constraints['names'].extend([
                    'def_cost[%d]' % info['idx'],
                    'set_max_cost[%d]' % info['idx']
                ])

                cost_constraints['lin_expr'].extend([
                    SparsePair(ind=info['cost_var_name'] +
                               info['action_ind_names'],
                               val=[-1.0] + info['costs']),
                    SparsePair(ind=indices['max_cost_var_name'] +
                               info['cost_var_name'],
                               val=[1.0, -1.0])
                ])

            cons.add(**cost_constraints)

            # old code (commented out for speed)
            #
            # vars.add(names = indices['cost_var_names'],
            #          types = ['C'] * n_actionable,
            #          obj = [indices['epsilon']] * n_actionable,
            #          #ub = [CPX_INFINITY] * n_actionable, #indices['cost_ub'], #indices['cost_ub'],
            #          lb = [0.0] * n_actionable)
            #
            # vars.add(names = indices['max_cost_var_name'],
            #          types = ['C'],
            #          obj = [1.0],
            #          #ub = [np.max(indices['cost_ub'])],
            #          lb = [0.0])
            #
            # for info in build_info.values():
            #     cost[j] = sum c[j][k] u[j][k]
            #     cons.add(names = ['def_cost[%d]' % info['idx']],
            #              lin_expr = [SparsePair(ind = info['cost_var_name'] + info['action_ind_names'], val = [-1.0] + info['costs'])]
            #              senses = ["E"],
            #              rhs = [0.0])
            #
            #     max_cost > cost[j]
            #     cons.add(names = ['set_max_cost[%d]' % info['idx']],
            #              lin_expr = [SparsePair(ind = indices['max_cost_var_name'] + info['cost_var_name'], val = [1.0, -1.0])],
            #              senses = ["G"],
            #              rhs = [0.0])

        elif cost_type in ('total', 'local'):

            indices.pop('cost_var_names')
            objval_pairs = list(
                chain(*[
                    list(zip(v['action_ind_names'], v['costs']))
                    for v in build_info.values()
                ]))
            mip.objective.set_linear(objval_pairs)

        mip = self.set_mip_parameters(mip)
        self._mip = mip
        self.mip_indices = indices
Beispiel #27
0
#!/usr/bin/env python3

import cplex
from cplex import Cplex
from cplex.exceptions import CplexError
import numpy as np
import matplotlib.pyplot as plt

mip_solver = Cplex()

# mip_solver.set_results_stream(None)
# mip_solver.set_warning_stream(None)
# mip_solver.set_error_stream(None)
#mip_solver.parameters.threads.set(1)

hidden_weights = np.load("hidden_weights.npy")
hidden_bias = np.load("hidden_bias.npy")
output_weights = np.load("output_weights.npy")
output_bias = np.load("output_bias.npy")

input_dim = 28 * 28
hidden_nodes = 20

#mip_solver.objective.set_sense(mip_solver.objective.sense.minimize)
mip_solver.objective.set_sense(mip_solver.objective.sense.maximize)

# Set the value of the output variable as objective function
mip_solver.variables.add(obj=[1],
                         lb=[-cplex.infinity],
                         ub=[cplex.infinity],
                         types="C",
Beispiel #28
0
def qps_cplex(H, c, A, l, u, xmin, xmax, x0, opt):
    """Quadratic Program Solver based on CPLEX.

    A wrapper function providing a PYPOWER standardized interface for using
    C{cplexqp} or C{cplexlp} to solve the following QP (quadratic programming)
    problem::

        min 1/2 X'*H*x + c'*x
         x

    subject to::

        l <= A*x <= u       (linear constraints)
        xmin <= x <= xmax   (variable bounds)

    Inputs (all optional except C{H}, C{c}, C{A} and C{l}):
        - C{H} : matrix (possibly sparse) of quadratic cost coefficients
        - C{c} : vector of linear cost coefficients
        - C{A, l, u} : define the optional linear constraints. Default
        values for the elements of L and U are -Inf and Inf, respectively.
        - C{xmin, xmax} : optional lower and upper bounds on the
        C{x} variables, defaults are -Inf and Inf, respectively.
        - C{x0} : optional starting value of optimization vector C{x}
        - C{opt} : optional options structure with the following fields,
        all of which are also optional (default values shown in parentheses)
            - C{verbose} (0) - controls level of progress output displayed
                - 0 = no progress output
                - 1 = some progress output
                - 2 = verbose progress output
            - C{cplex_opt} - options dict for CPLEX, value in
            verbose overrides these options
        - C{problem} : The inputs can alternatively be supplied in a single
        C{problem} dict with fields corresponding to the input arguments
        described above: C{H, c, A, l, u, xmin, xmax, x0, opt}

    Outputs:
        - C{x} : solution vector
        - C{f} : final objective function value
        - C{exitflag} : CPLEXQP/CPLEXLP exit flag
        (see C{cplexqp} and C{cplexlp} documentation for details)
        - C{output} : CPLEXQP/CPLEXLP output dict
        (see C{cplexqp} and C{cplexlp} documentation for details)
        - C{lmbda} : dict containing the Langrange and Kuhn-Tucker
        multipliers on the constraints, with fields:
            - mu_l - lower (left-hand) limit on linear constraints
            - mu_u - upper (right-hand) limit on linear constraints
            - lower - lower bound on optimization variables
            - upper - upper bound on optimization variables

    @author: Ray Zimmerman (PSERC Cornell)
    """
    ##----- input argument handling  -----
    ## gather inputs
    if isinstance(H, dict):  ## problem struct
        p = H
        if 'opt' in p: opt = p['opt']
        if 'x0' in p: x0 = p['x0']
        if 'xmax' in p: xmax = p['xmax']
        if 'xmin' in p: xmin = p['xmin']
        if 'u' in p: u = p['u']
        if 'l' in p: l = p['l']
        if 'A' in p: A = p['A']
        if 'c' in p: c = p['c']
        if 'H' in p: H = p['H']
    else:  ## individual args
        assert H is not None
        assert c is not None
        assert A is not None
        assert l is not None

    if opt is None:
        opt = {}
#    if x0 is None:
#        x0 = array([])
#    if xmax is None:
#        xmax = array([])
#    if xmin is None:
#        xmin = array([])

## define nx, set default values for missing optional inputs
    if len(H) == 0 or not any(any(H)):
        if len(A) == 0 and len(xmin) == 0 and len(xmax) == 0:
            stderr.write(
                'qps_cplex: LP problem must include constraints or variable bounds\n'
            )
        else:
            if len(A) > 0:
                nx = shape(A)[1]
            elif len(xmin) > 0:
                nx = len(xmin)
            else:  # if len(xmax) > 0
                nx = len(xmax)
    else:
        nx = shape(H)[0]

    if len(c) == 0:
        c = zeros(nx)

    if  len(A) > 0 and (len(l) == 0 or all(l == -Inf)) and \
                       (len(u) == 0 or all(u ==  Inf)):
        A = None  ## no limits => no linear constraints

    nA = shape(A)[0]  ## number of original linear constraints
    if len(u) == 0:  ## By default, linear inequalities are ...
        u = Inf * ones(nA)  ## ... unbounded above and ...

    if len(l) == 0:
        l = -Inf * ones(nA)  ## ... unbounded below.

    if len(xmin) == 0:  ## By default, optimization variables are ...
        xmin = -Inf * ones(nx)  ## ... unbounded below and ...

    if len(xmax) == 0:
        xmax = Inf * ones(nx)  ## ... unbounded above.

    if len(x0) == 0:
        x0 = zeros(nx)

    ## default options
    if 'verbose' in opt:
        verbose = opt['verbose']
    else:
        verbose = 0

    #if 'max_it' in opt:
    #    max_it = opt['max_it']
    #else:
    #    max_it = 0

    ## split up linear constraints
    ieq = find(abs(u - l) <= EPS)  ## equality
    igt = find(u >= 1e10 & l > -1e10)  ## greater than, unbounded above
    ilt = find(l <= -1e10 & u < 1e10)  ## less than, unbounded below
    ibx = find((abs(u - l) > EPS) & (u < 1e10) & (l > -1e10))
    Ae = A[ieq, :]
    be = u[ieq]
    Ai = r_[A[ilt, :], -A[igt, :], A[ibx, :] - A[ibx, :]]
    bi = r_[u[ilt], -l[igt], u[ibx], -l[ibx]]

    ## grab some dimensions
    nlt = len(ilt)  ## number of upper bounded linear inequalities
    ngt = len(igt)  ## number of lower bounded linear inequalities
    nbx = len(ibx)  ## number of doubly bounded linear inequalities

    ## set up options struct for CPLEX
    if 'cplex_opt' in opt:
        cplex_opt = cplex_options(opt['cplex_opt'])
    else:
        cplex_opt = cplex_options

    cplex = Cplex('null')
    vstr = cplex.getVersion
    s, e, tE, m, t = re.compile(vstr, '(\d+\.\d+)\.')
    vnum = int(t[0][0])
    vrb = max([0, verbose - 1])
    cplex_opt['barrier']['display'] = vrb
    cplex_opt['conflict']['display'] = vrb
    cplex_opt['mip']['display'] = vrb
    cplex_opt['sifting']['display'] = vrb
    cplex_opt['simplex']['display'] = vrb
    cplex_opt['tune']['display'] = vrb
    if vrb and (vnum > 12.2):
        cplex_opt['diagnostics'] = 'on'
    #if max_it:
    #    cplex_opt.    ## not sure what to set here

    if len(Ai) == 0 and len(Ae) == 0:
        unconstrained = 1
        Ae = sparse((1, nx))
        be = 0
    else:
        unconstrained = 0

    ## call the solver
    if verbose:
        methods = [
            'default', 'primal simplex', 'dual simplex', 'network simplex',
            'barrier', 'sifting', 'concurrent'
        ]

    if len(H) == 0 or not any(any(H)):
        if verbose:
            stdout.write('CPLEX Version %s -- %s LP solver\n' %
                         (vstr, methods[cplex_opt['lpmethod'] + 1]))

        x, f, eflag, output, lam = \
            cplexlp(c, Ai, bi, Ae, be, xmin, xmax, x0, cplex_opt)
    else:
        if verbose:
            stdout.write('CPLEX Version %s --  %s QP solver\n' %
                         (vstr, methods[cplex_opt['qpmethod'] + 1]))
        ## ensure H is numerically symmetric
        if H != H.T:
            H = (H + H.T) / 2

        x, f, eflag, output, lam = \
            cplexqp(H, c, Ai, bi, Ae, be, xmin, xmax, x0, cplex_opt)

    ## check for empty results (in case optimization failed)
    if len(x) == 0:
        x = NaN * zeros(nx)

    if len(f) == 0:
        f = NaN

    if len(lam) == 0:
        lam['ineqlin'] = NaN * zeros(len(bi))
        lam['eqlin'] = NaN * zeros(len(be))
        lam['lower'] = NaN * zeros(nx)
        lam['upper'] = NaN * zeros(nx)
        mu_l = NaN * zeros(nA)
        mu_u = NaN * zeros(nA)
    else:
        mu_l = zeros(nA)
        mu_u = zeros(nA)

    if unconstrained:
        lam['eqlin'] = array([])

    ## negate prices depending on version
    if vnum < 12.3:
        lam['eqlin'] = -lam['eqlin']
        lam['ineqlin'] = -lam['ineqlin']

    ## repackage lambdas
    kl = find(lam.eqlin < 0)  ## lower bound binding
    ku = find(lam.eqlin > 0)  ## upper bound binding

    mu_l[ieq[kl]] = -lam['eqlin'][kl]
    mu_l[igt] = lam['ineqlin'][nlt + arange(ngt)]
    mu_l[ibx] = lam['ineqlin'][nlt + ngt + nbx + arange(nbx)]

    mu_u[ieq[ku]] = lam['eqlin'][ku]
    mu_u[ilt] = lam['ineqlin'][:nlt]
    mu_u[ibx] = lam['ineqlin'][nlt + ngt + arange(nbx)]

    lmbda = {
        'mu_l': mu_l,
        'mu_u': mu_u,
        'lower': lam.lower,
        'upper': lam.upper
    }

    return x, f, eflag, output, lmbda
Beispiel #29
0
def fair_partial_assignment_lp_solver(df, centers, color_flag, alpha, beta,
                                      cost_fun_string):

    # There are primarily five steps:
    # 1. Initiate a model for cplex
    # 2. Declare if it is minimization or maximization problem
    # 3. Add variables to the model. The variables are generally named.
    #    The upper bounds and lower bounds on the range for the variables
    #    are also mentioned at this stage. The coefficient of the objective
    #    functions are also entered at this step
    # 4. Add the constraints to the model. The constraint matrix, denoted by A,
    #    can be added in three ways - row wise, column wise or non-zero entry wise.
    # 5. Finally, call the solver.

    # Step 1. Initiate a model for cplex.

    print("Initializing Cplex model")
    problem = Cplex()

    # Step 2. Declare that this is a minimization problem

    problem.objective.set_sense(problem.objective.sense.minimize)

    # Step 3.   Declare and  add variables to the model. The function
    #           prepare_to_add_variables (points, center) prepares all the
    #           required information for this stage.
    #
    #    objective: a list of coefficients (float) in the linear objective function
    #    lower bounds: a list of floats containing the lower bounds for each variable
    #    upper bounds: a list of floats containing the upper bounds for each variable
    #    variable_name: a list of strings that contains the name of the variables

    print("Starting to add variables...")
    print("HERE???")
    t1 = time.monotonic()
    objective, lower_bounds, upper_bounds, variable_names = prepare_to_add_variables(
        df, centers, cost_fun_string)
    problem.variables.add(obj=objective,
                          lb=lower_bounds,
                          ub=upper_bounds,
                          names=variable_names)
    t2 = time.monotonic()
    print("Completed. Time for creating and adding variable = {}".format(t2 -
                                                                         t1))

    # Step 4.   Declare and add constraints to the model.
    #           There are few ways of adding constraints: rwo wise, col wise and non-zero entry wise.
    #           Assume the constraint matrix is A. We add the constraints row wise.
    #           The function prepare_to_add_constraints_by_entry(points,center,colors,alpha,beta)
    #           prepares the required data for this step.
    #
    #  constraints_row: Encoding of each row of the constraint matrix
    #  senses: a list of strings that identifies whether the corresponding constraint is
    #          an equality or inequality. "E" : equals to (=), "L" : less than (<=), "G" : greater than equals (>=)
    #  rhs: a list of floats corresponding to the rhs of the constraints.
    #  constraint_names: a list of string corresponding to the name of the constraint

    print("Starting to add constraints...")
    t1 = time.monotonic()
    objects_returned = prepare_to_add_constraints(df, centers, color_flag,
                                                  beta, alpha)
    constraints_row, senses, rhs, constraint_names = objects_returned
    problem.linear_constraints.add(lin_expr=constraints_row,
                                   senses=senses,
                                   rhs=rhs,
                                   names=constraint_names)
    t2 = time.monotonic()
    print(
        "Completed. Time for creating and adding constraints = {}".format(t2 -
                                                                          t1))

    # Optional: We can set various parameters to optimize the performance of the lp solver
    # As an example, the following sets barrier method as the lp solving method
    # The other available methods are: auto, primal, dual, sifting, concurrent

    #problem.parameters.lpmethod.set(problem.parameters.lpmethod.values.barrier)

    return problem, objective
def mbdmb_lp_solver(distance, num_centers, num_points, weight, frac_lp_assgn,
                    color_flag):

    # Step 1. Initiate a model for cplex.

    print("Initializing Cplex model")
    problem = Cplex()

    # Step 2. Declare that this is a minimization problem

    problem.objective.set_sense(problem.objective.sense.minimize)

    # Step 3.   Declare and  add variables to the model.

    print("Starting to add variables...")
    t1 = time.monotonic()
    objects_returned = prepare_to_add_variables(distance, num_centers,
                                                num_points, frac_lp_assgn)
    objective, lower_bounds, variable_names = objects_returned
    problem.variables.add(obj=objective, lb=lower_bounds, names=variable_names)
    t2 = time.monotonic()
    print("Completed. Time for creating and adding variable = {}".format(t2 -
                                                                         t1))

    # Step 4.   Declare and add constraints to the model.

    print("Starting to add constraints...")
    t1 = time.monotonic()
    objects_returned = prepare_to_add_constraints(num_centers, num_points,
                                                  weight, frac_lp_assgn,
                                                  color_flag)
    constraints_row, senses, rhs, constraint_names = objects_returned
    problem.linear_constraints.add(lin_expr=constraints_row,
                                   senses=senses,
                                   rhs=rhs,
                                   names=constraint_names)
    t2 = time.monotonic()
    print(
        "Completed. Time for creating and adding constraints = {}".format(t2 -
                                                                          t1))

    # Optional: We can set various parameters to optimize the performance of the lp solver
    # As an example, the following sets barrier method as the lp solving method
    # The other available methods are: auto, primal, dual, sifting, concurrent

    #problem.parameters.lpmethod.set(problem.parameters.lpmethod.values.barrier)

    # Step 5. call the solver

    t1 = time.monotonic()
    problem.solve()
    t2 = time.monotonic()
    print("LP solving time time = {}".format(t2 - t1))

    res = {
        "status": problem.solution.get_status(),
        "success": problem.solution.get_status_string(),
        "objective": problem.solution.get_objective_value(),
        "assignment": problem.solution.get_values(),
    }

    return res