Esempio n. 1
0
 def get(self):
     if lpsolve('get_Nrows', self.__lp) == 0:
         self.__lp_setting()
         lpsolve('solve', self.__lp)
     obj = lpsolve('get_objective', self.__lp)
     var = lpsolve('get_variables', self.__lp)[0][self.__nc:]
     return (obj, var)
Esempio n. 2
0
 def __node_constraints(self, lp):
     coeff = [0] * ((self.__nv + self.__ne) * self.__k)
     for v in self.__g.nodes_iter():
         for i in range(self.__k):
             coeff[v * self.__k + i] = 1
         lpsolve('add_constraint', lp, coeff, 'EQ', 1)
         for i in range(self.__k):
             coeff[v * self.__k + i] = 0
Esempio n. 3
0
 def __lin_prog(self):
     lp = lpsolve('make_lp', 0, (self.__nv + self.__ne) * self.__k)
     self.__set_objective(lp)
     self.__constraints(lp)
     self.__bounds(lp)
     self.__config(lp)
     lpsolve('solve', lp)
     return lpsolve('get_variables', lp)[0][:self.__nv * self.__k]
Esempio n. 4
0
 def __total_order(self):  # 3
     T, x, y = self.zeros, self.__x, self.__y
     for i, j in product(self.g.nodes(), repeat=2):
         if i == j:
             continue
         T[x(i, j)] = 1
         T[x(j, i)] = 1
         lpsolve('add_constraint', self.lp, T, 'EQ', 1)
         T[x(i, j)], T[x(j, i)] = 0, 0
Esempio n. 5
0
 def __gearing_xy(self):  # 5
     T, x, y = self.zeros, self.__x, self.__y
     for i, j in product(self.g.nodes(), repeat=2):
         if i == j:
             continue
         T[x(i, j)] = -1
         T[y(i, j)] = 1
         lpsolve('add_constraint', self.lp, T, 'LE', 0)
         T[x(i, j)], T[y(i, j)] = 0, 0
Esempio n. 6
0
 def __transitivity(self):  # 4
     T, x, y = self.zeros, self.__x, self.__y
     for i, j, k in product(self.g.nodes(), repeat=3):
         if i == j or j == k or i == k:
             continue
         T[x(i, j)] = 1
         T[x(j, k)] = 1
         T[x(i, k)] = -1
         lpsolve('add_constraint', self.lp, T, 'LE', 1)
         T[x(i, j)], T[x(j, k)], T[x(i, k)] = 0, 0, 0
Esempio n. 7
0
 def __counting_chord(self):  # 7
     T, x, y = self.zeros, self.__x, self.__y
     for i, j, k in product(self.g.nodes(), repeat=3):
         if i == j or j == k or i == k:
             continue
         T[x(j, k)] = 1
         T[y(i, j)] = 1
         T[y(i, k)] = 1
         T[y(j, k)] = -1
         lpsolve('add_constraint', self.lp, T, 'LE', 2)
         T[x(j, k)], T[y(i, j)], T[y(i, k)], T[y(j, k)] = 0, 0, 0, 0
Esempio n. 8
0
    def start_new_objective(self, kind=2, last_r=-1):

        if kind=='random':

            lpsolve('set_obj_fn', self.lp, random(lpsolve('get_Ncolumns', self.lp)) - 0.5)

        elif kind=='facet':

            r = self.ineqs[random_integers(len(self.ineqs))-1]
            lpsolve('set_obj_fn', self.lp, r[0])
            return r
Esempio n. 9
0
    def __counting_edges(self):  # 6
        T, x, y = self.zeros, self.__x, self.__y
        for i, j in self.g.edges():
            T[x(i, j)] = -1
            T[y(i, j)] = 1
            lpsolve('add_constraint', self.lp, T, 'EQ', 0)
            T[x(i, j)], T[y(i, j)] = 0, 0

            T[x(j, i)] = -1
            T[y(j, i)] = 1
            lpsolve('add_constraint', self.lp, T, 'EQ', 0)
            T[x(j, i)], T[y(j, i)] = 0, 0
Esempio n. 10
0
 def __set_constraints(self):
     for j, c in enumerate(self.__clauses):
         v = [0] * self.__N
         v[j] = 1
         d = 0
         for i in c:
             if i < 0:
                 v[self.__nc - 1 + abs(i)] = 1
                 d += 1
             else:
                 v[self.__nc - 1 + i] = -1
         lpsolve('add_constraint', self.__lp, v, 'LE', d)
Esempio n. 11
0
    def leq(self, a):
        #-----------------------------------------------------------------------
        # We convert <= constraints to >= so that in package_solution we can
        # simply subtract the current contraint value in the tableau from the
        # original right hand side values given here to derive the amount of
        # slack on each constraint. This is important to have in
        # interior_point().
        #-----------------------------------------------------------------------
        if not self.set_bound(a, 'up'):

            if self.add_noise: a = self.noise(a)
            lpsolve('add_constraint', self.lp, -a[1:], GE, a[0])
            self.leq_count += 1

            self.ineqs.append([-a[1:], GE, a[0]])
Esempio n. 12
0
    def __init__(self, **kw):

        self.ncols       = kw.get('ncols', None)
        self.nthreads    = kw.get('nthreads', 1)
        self.random_seed = kw.get('rngseed',  None)
        self.objf_choice = kw.get('objf choice', 'random')
        self.sol_type    = kw.get('solution type', 'interior')
        self.add_noise   = kw.get('add noise', False)

        ran_set_seed(self.random_seed)

        self.lp = lpsolve('make_lp', 0, self.ncols)
        #lpsolve('set_bounds_tighter', self.lp, True) # important so that we don't loosen tight bounds

        self.ineqs = []

        self.eq_count  = 0
        self.leq_count = 0
        self.geq_count = 0
        self.bnd_count  = 0

        self.iteration   = 0
        self.prev_sol    = None
        self.sum_ln_k    = 0
        self.curr_sol    = None
        self.n_solutions = 0
Esempio n. 13
0
 def __init__(self, nv, clauses, costs=None):
     self.__nc = len(clauses)
     self.__nv = nv
     self.__clauses = clauses
     self.__N = self.__nc + self.__nv
     self.__costs = costs if costs else [1] * self.__nc
     self.__lp = lpsolve('make_lp', 0, self.__N)
Esempio n. 14
0
 def __edge_constraints(self, lp):
     node_offset = self.__nv * self.__k
     coeff = [0] * ((self.__nv + self.__ne) * self.__k)
     for e, (u, v) in enumerate(self.__g.edges_iter()):
         uk, vk = u * self.__k, v * self.__k
         for i in range(self.__k):
             coeff[node_offset + e * self.__k + i] = 1
             coeff[uk + i] = 1
             coeff[vk + i] = -1
             lpsolve('add_constraint', lp, coeff, 'GE', 0)
             coeff[uk + i] = -1
             coeff[vk + i] = 1
             lpsolve('add_constraint', lp, coeff, 'GE', 0)
             coeff[node_offset + e * self.__k + i] = 0
             coeff[uk + i] = 0
             coeff[vk + i] = 0
Esempio n. 15
0
 def __determine_max_clique_size(self):  # 2
     T, x, y = self.zeros, self.__x, self.__y
     T[0] = 1
     for i in self.g.nodes():
         for j in self.g.nodes():
             if i == j:
                 continue
             T[y(i, j)] = -1
         lpsolve('add_constraint', self.lp, T, 'GE', 0)
         for j in self.g.nodes():
             if i == j:
                 continue
             T[y(i, j)] = 0
         assert (T.count(1) == 1)
     T[0] = 0
     assert (self.zeros.count(1) == 0)
Esempio n. 16
0
    def set_bound(self, a, type):
        return False

        func = {'low': 'set_lowbo',
                'up':  'set_upbo'}.get(type, None)

        if func is None:
            assert 0, 'Bad type to set_bound'

        w = a.nonzero()[0]
        if w.size == 2 and w[0] == 0:
            bnd = -a[0] / a[w[1]]
            lpsolve(func, self.lp, w[1], bnd)
            self.bnd_count += 1
            return True

        return False
Esempio n. 17
0
def assign_placements():
    num_tas = len(all_tas)
    num_times = len(ID_TO_TIMES)
    upper = [1 for _ in range(num_tas * num_times)]  # max value must be 1
    obj_func = make_obj_func()
    constr_mat = make_constr_mat(num_tas, num_times)
    b_vector = make_b_vector()
    e_vector = make_e_vector(num_tas, num_times)
    lp = lpm.lp_maker(obj_func, constr_mat, b_vector, e_vector)
    lps.lpsolve('set_bb_depthlimit', lp, 0)  # set branch and bound depth limit
    lps.lpsolve('set_binary', lp, upper)  # all values must be either 0 or 1
    lps.lpsolve('solve', lp)
    vars = lps.lpsolve('get_variables', lp)[0]
    lps.lpsolve('delete_lp', lp)
    output_results(vars)
Esempio n. 18
0
    def start(self):

        Log( '=' * 80 )
        Log( 'SAMPLEX' )
        Log( '=' * 80 )

        Log( 'Using lpsolve %s' % lpsolve('lp_solve_version') )

        Log( "ncols         = %i" % self.ncols )
        Log( "random seed   = %s" % self.random_seed )
        Log( "threads       = %s" % self.nthreads )
        Log( "objf choice   = %s" % self.objf_choice )
        Log( "solution type = %s" % self.sol_type )
        Log( "add noise     = %s" % self.add_noise )

        Log( "%6s %6s %6s %6s\n%6i %6i %6i %6i" 
            % (">=", "<=", "=", "bnd", self.geq_count, self.leq_count, self.eq_count, self.bnd_count) )

        #lpsolve('set_verbose', self.lp, DETAILED)
        #lpsolve('set_verbose', self.lp, DETAILED)
        lpsolve('set_verbose', self.lp, IMPORTANT)
Esempio n. 19
0
 def __bounds(self, lp):
     nv, ne = self.__nv * self.__k, self.__ne * self.__k
     # lpsolve('set_binary', lp, [1] * nv + [0] * ne)
     lpsolve('set_upbo', lp, [1] * nv + [Infinite] * ne)
     lpsolve('set_lowbo', lp, [0] * nv + [0] * ne)
     for i, v in enumerate(self.__s):
         lpsolve('set_lowbo', lp, v * self.__k + i + 1, 1)
Esempio n. 20
0
def set_constraints(lp, n):
    for v in incomming_constraints(n):
        lpsolve('add_constraint', lp, v, 'EQ', 1)
    for v in outgoing_constraints(n):
        lpsolve('add_constraint', lp, v, 'EQ', 1)
    for v in subtour_constraints(n):
        lpsolve('add_constraint', lp, v, 'LE', n - 1)
Esempio n. 21
0
def assign_placements():
    """
    Takes in an option for which to assign placements for.
    Outputs nothing, but calls output_results which prints all placements.
    """
    num_tutors = len(all_tutors)
    num_times = len(ID_TO_TIMES)
    upper = [1 for _ in range(num_tutors * num_times)]  # max value must be 1
    obj_func = make_obj_func()
    constr_mat = make_constr_mat(num_tutors, num_times)
    b_vector = make_b_vector()
    e_vector = make_e_vector(num_tutors, num_times)
    lp = lpm.lp_maker(obj_func, constr_mat, b_vector, e_vector)
    lps.lpsolve('set_bb_depthlimit', lp, 0)  # set branch and bound depth limit
    lps.lpsolve('set_binary', lp, upper)  # all values must be either 0 or 1
    lps.lpsolve('solve', lp)
    vars = lps.lpsolve('get_variables', lp)[0]
    lps.lpsolve('delete_lp', lp)
    output_results(vars)
Esempio n. 22
0
    def package_solution(self):
        objv  = array(lpsolve('get_objective', self.lp))
        vars  = array(lpsolve('get_variables', self.lp)[0])
        slack = array(lpsolve('get_constraints', self.lp)[0]) - array(lpsolve('get_rh', self.lp)[1:])

        slack[abs(slack) < 1e-5] = 0

        nvars = len(vars)
        nslack = len(slack)

        s = SamplexSolution()
        s.sol = empty(nvars + 1)
        s.sol[1:] = vars
        s.sol[0] = objv

        s.vertex = empty(nvars + nslack + 1)
        s.vertex[1:nvars+1] = vars
        s.vertex[1+nvars:1+nvars+nslack] = slack
        s.vertex[0] = objv

        assert all(s.vertex[1:] >= 0), s.vertex[s.vertex < 0]

        return s
Esempio n. 23
0
    def next_solution(self):

        while True:

            r = self.start_new_objective(kind=self.objf_choice)

            result = lpsolve('solve', self.lp)
            if   result in [OPTIMAL, TIMEOUT]:   break
            elif result == SUBOPTIMAL: continue
            elif result == INFEASIBLE: raise SamplexNoSolutionError()
            elif result == UNBOUNDED: raise SamplexUnboundedError()
            else:
                Log( result )
                raise SamplexUnexpectedError("unknown pivot result = %i" % result)

            objv = lpsolve('get_objective', self.lp)

            if self.objf_choice == 'facet' and abs(objv) > 1e-6:
                print 'BAD VARIABLE', objv
                del self.ineqs[r]
            else:
                break

        print 'Solution after %i steps.' % lpsolve('get_total_iter', self.lp)
Esempio n. 24
0
def get_mtz_ilp_tour(P):
    dmat = distance_matrix(P)
    n, N = len(P), len(dmat)

    lp = lpsolve('make_lp', 0, N + n)
    lpsolve('set_obj_fn', lp, dmat + [0] * len(P))
    set_constraints(lp, n)
    set_bounds(lp, N, n)
    config_lp(lp)

    lpsolve('solve', lp)

    tour = [0] * (n + 1)
    for i, v in enumerate(lpsolve('get_variables', lp)[0][N:]):
        tour[int(v)] = i
    return tour
Esempio n. 25
0
def get_mtz_ilp_tour(P):
    n, N = len(P), len(P)**2
    d = [np.linalg.norm(P[i] - P[j]) for i, j in product(range(n), range(n))]

    lp = lpsolve('make_lp', 0, N + n)
    lpsolve('set_obj_fn', lp, d + [0] * len(P))
    set_constraints(lp, n)
    set_bounds(lp, N, n)
    config_lp(lp)

    lpsolve('solve', lp)

    tour = [0] * (n + 1)
    for i, v in enumerate(lpsolve('get_variables', lp)[0][N:]):
        tour[int(v)] = i
    return tour
Esempio n. 26
0
 def eq(self, a):
     #if self.add_noise: a = noise(a)
     lpsolve('add_constraint', self.lp, a[1:], EQ, -a[0])
     self.eq_count += 1
Esempio n. 27
0
def solve_lp_knapsack_lpsolve(scores, costs, budget):
    import lpsolve55 as lps

    relax = True
    n = len(scores)

    lp = lps.lpsolve('make_lp', 0, n)
    # Set verbosity level. 3 = only warnings and errors.
    lps.lpsolve('set_verbose', lp, 3)
    lps.lpsolve('set_obj_fn', lp, -scores)

    lps.lpsolve('add_constraint', lp, costs, lps.LE, budget)

    lps.lpsolve('set_lowbo', lp, np.zeros(n))
    lps.lpsolve('set_upbo', lp, np.ones(n))

    if not relax:
        lps.lpsolve('set_int', lp, [True] * n)
    else:
        lps.lpsolve('set_int', lp, [False] * n)

    # Solve the ILP, and call the debugger if something went wrong.
    ret = lps.lpsolve('solve', lp)
    assert ret == 0

    # Retrieve solution and return
    x, _ = lps.lpsolve('get_variables', lp)
    x = np.array(x)

    return x
Esempio n. 28
0
def solve_lp(scp_gen):
    print "--- SOLVE LINEAR PROGRAMMING ---"
    n = scp_gen.universal_set_size

    lp = lpsolve('make_lp', 0, n)
    lpsolve('set_maxim', lp)
    lpsolve('set_obj_fn', lp, [1] * n)

    for i, v in enumerate(scp_gen.get_cols()):
        lpsolve('add_constraint', lp, v, 'LE', scp_gen.cost[i])
    lpsolve('set_lowbo', lp, [0] * n)

    lpsolve('write_lp', lp, 'd.lp')
    lpsolve('set_verbose', lp, IMPORTANT)

    lpsolve('solve', lp)
    res = lpsolve('get_objective', lp)
    print "solution of lp:", res
    V = lpsolve('get_variables', lp)[0]
    print "solution vector:", V

    ans = 0
    AV = []
    for i, v in enumerate(scp_gen.get_cols()):
        s = sum(V[j] for j, e in enumerate(v) if e != 0)
        if s == scp_gen.cost[i]:
            AV.append(1)
            ans += s
        else:
            AV.append(0)

    _, f = scp_gen.get_f()
    print "f:", f, "1/f", 1.0 / float(f)
    print "deterministic rounding:", AV
    print "result of linear relaxation:", ans
    print "alpha: ", ans / res, "\n"

    if scp_gen.is_covered([i for i, v in enumerate(AV) if v == 1]):
        print 'covered'
    else:
        print 'uncovered'

    lpsolve('delete_lp', lp)
Esempio n. 29
0
    def _find_error_canceling_reaction(self,
                                       reference_subset,
                                       milp_software=None):
        """
        Automatically find a valid error canceling reaction given a subset of the available benchmark species. This
        is done by solving a mixed integer linear programming (MILP) problem similiar to
        Buerger et al. (https://doi.org/10.1016/j.combustflame.2017.08.013)

        Args:
            reference_subset (list): A list of indices from self.reference_species that can participate in the reaction
            milp_software (list, optional): Solvers to try in order. Defaults to ['lpsolve'] or if pyomo is available
                defaults to ['lpsolve', 'pyomo']. lpsolve is usually faster.

        Returns:
            tuple(ErrorCancelingReaction, np.ndarray)
            - Reaction with the target species (if a valid reaction is found, else ``None``)
            - Indices (of the subset) for the species that participated in the return reaction
        """
        if milp_software is None:
            milp_software = ['lpsolve']
            if pyo is not None:
                milp_software.append('pyomo')

        # Define the constraints based on the provided subset
        c_matrix = np.take(self.constraint_matrix, reference_subset, axis=0)
        c_matrix = np.tile(c_matrix, (2, 1))
        sum_constraints = np.sum(c_matrix, 1, dtype=int)
        targets = -1 * self.target_constraint
        m = c_matrix.shape[0]
        n = c_matrix.shape[1]
        split = int(m / 2)

        for solver in milp_software:
            if solver == 'pyomo':
                # Check that pyomo is available
                if pyo is None:
                    raise ImportError(
                        'Cannot import optional package pyomo. Either install this dependency with '
                        '`conda install -c conda-forge pyomo glpk` or set milp_software to `lpsolve`'
                    )

                # Setup the MILP problem using pyomo
                lp_model = pyo.ConcreteModel()
                lp_model.i = pyo.RangeSet(0, m - 1)
                lp_model.j = pyo.RangeSet(0, n - 1)
                lp_model.r = pyo.RangeSet(
                    0, split -
                    1)  # indices before the split correspond to reactants
                lp_model.p = pyo.RangeSet(
                    split,
                    m - 1)  # indices after the split correspond to products
                lp_model.v = pyo.Var(lp_model.i,
                                     domain=pyo.NonNegativeIntegers
                                     )  # The stoich. coef. we are solving for
                lp_model.c = pyo.Param(
                    lp_model.i,
                    lp_model.j,
                    initialize=lambda _, i_ind, j_ind: c_matrix[i_ind, j_ind])
                lp_model.s = pyo.Param(
                    lp_model.i,
                    initialize=lambda _, i_ind: sum_constraints[i_ind])
                lp_model.t = pyo.Param(
                    lp_model.j, initialize=lambda _, j_ind: targets[j_ind])

                lp_model.obj = pyo.Objective(rule=_pyo_obj_expression)
                lp_model.constraints = pyo.Constraint(
                    lp_model.j, rule=_pyo_constraint_rule)

                # Solve the MILP problem using the GLPK MILP solver (https://www.gnu.org/software/glpk/)
                opt = pyo.SolverFactory('glpk')
                results = opt.solve(lp_model, timelimit=1)

                # Return the solution if a valid reaction is found. Otherwise continue to next solver
                if results.solver.termination_condition == pyo.TerminationCondition.optimal:
                    # Extract the solution and find the species with non-zero stoichiometric coefficients
                    solution = lp_model.v.extract_values().values()
                    break

            elif solver == 'lpsolve':
                # Save the current signal handler
                sig = signal.getsignal(signal.SIGINT)

                # Setup the MILP problem using lpsolve
                lp = lpsolve('make_lp', 0, m)
                lpsolve('set_verbose', lp,
                        2)  # Reduce the logging from lpsolve
                lpsolve('set_obj_fn', lp, sum_constraints)
                lpsolve('set_minim', lp)

                for j in range(n):
                    lpsolve(
                        'add_constraint', lp,
                        np.concatenate(
                            (c_matrix[:split, j], -1 * c_matrix[split:, j])),
                        EQ, targets[j])

                lpsolve('add_constraint', lp, np.ones(m), LE,
                        20)  # Use at most 20 species (including replicates)
                lpsolve('set_timeout', lp,
                        1)  # Move on if lpsolve can't find a solution quickly

                # Constrain v_i to be 4 or less
                for i in range(m):
                    lpsolve('set_upbo', lp, i, 4)

                # All v_i must be integers
                lpsolve('set_int', lp, [True] * m)

                status = lpsolve('solve', lp)

                # Reset signal handling since lpsolve changed it
                try:
                    signal.signal(signal.SIGINT, sig)
                except ValueError:
                    # This is not being run in the main thread, so we cannot reset signal
                    pass

                # Return the solution if a valid reaction is found. Otherwise continue to next solver
                if status == 0:
                    _, solution = lpsolve('get_solution', lp)[:2]
                    break

            else:
                raise ValueError(
                    f'Unrecognized MILP solver {solver} for isodesmic reaction generation'
                )

        else:
            return None, None

        reaction = ErrorCancelingReaction(self.target, dict())
        subset_indices = []
        for index, v in enumerate(solution):
            if v > 0:
                subset_indices.append(index % split)
                if index < split:
                    reaction.species.update(
                        {self.reference_species[reference_subset[index]]: -v})
                else:
                    reaction.species.update({
                        self.reference_species[reference_subset[index % split]]:
                        v
                    })

        return reaction, np.array(subset_indices)
Esempio n. 30
0
    def inner_point(self, newp):

        lp = lpsolve('make_lp', 0, self.nVars+1) # +1 for variable used to find the first inner point
        lpsolve('set_epsb', lp, 1e-14)
        lpsolve('set_epsd', lp, 1e-14)
        lpsolve('set_epsint', lp, 1e-14)
        lpsolve('set_epsel', lp, 1e-8)
        lpsolve('set_verbose', lp, FULL)
        lpsolve('set_sense', lp, False)

        for eq,a in self.eq_list:
            l = (a[1:]).tolist()
            if eq ==  'eq': l.append(0); lpsolve('add_constraint', lp, l, EQ, -a[0])
            if eq == 'leq': l.append(1); lpsolve('add_constraint', lp, l, LE, -a[0])
            if eq == 'geq': l.append(1); lpsolve('add_constraint', lp, l, GE, -a[0])

        for i in range(self.nVars):
            q = np.zeros(self.nVars+1)
            q[[i,-1]] = -1, 1
            lpsolve('add_constraint', lp, q.tolist(), LE, 0)

        o = np.zeros(self.nVars+1)
        o[-1] = 1
        lpsolve('set_obj_fn', lp, o.tolist())
        while True:
            result = lpsolve('solve', lp)
            if   result in [OPTIMAL, TIMEOUT]:   break
            elif result == SUBOPTIMAL: continue
            elif result == INFEASIBLE: raise SamplexNoSolutionError()
            elif result == UNBOUNDED: raise SamplexUnboundedError()
            else:
                Log( result )
                raise SamplexUnexpectedError("unknown pivot result %i from linear solver." % result)

        objv  = np.array(lpsolve('get_objective', lp))
        v1    = np.array(lpsolve('get_variables', lp)[0])
        assert len(v1) == lpsolve('get_Norig_columns', lp)
        assert len(v1) == self.nVars+1
        del lp

        v1 = v1[:-1] # Remove the temporary variable that tracks the distance from the simplex boundary
        v1[np.abs(v1) < 1e-14] = 0
        assert np.all(v1 >= 0), v1[v1 < 0]

        ok,fail_count = self.in_simplex(v1, eq_tol=1e-12, tol=0, verbose=1)
        ok,fail_count = self.in_simplex(v1, eq_tol=1e-12, tol=-1e-13, verbose=1)
        assert ok, len(fail_count)
        newp[:] = v1
        self.project(newp)
        ok,fail_count = self.in_simplex(newp, eq_tol=1e-12, tol=0, verbose=1)
        ok,fail_count = self.in_simplex(newp, eq_tol=1e-12, tol=-1e-5, verbose=1)
Esempio n. 31
0
			tmp += [1]
			if examinee in examinees:
				examinees[examinee] += [variable]
			else:
				examinees[examinee] = [variable]
		# one examination at most per timeslot
		constraints += [tmp + [0] * (len(function) - len(possibilities))]

for examinee in examinees:
	tmp = [0] * len(function)
	for variable in examinees[examinee]:
		tmp[variable] = 1
	# one examination at most per examinee
	constraints += [tmp]

lp = lpsolve('make_lp', len(constraints), len(function))
lpsolve('set_verbose', lp, 'IMPORTANT')
lpsolve('set_mat', lp, constraints)
lpsolve('set_rh_vec', lp, [1] * len(constraints))
lpsolve('set_constr_type', lp, ['LE'] * len(constraints))
lpsolve('set_obj_fn', lp, function)
lpsolve('set_maxim', lp)
lpsolve('set_binary', lp, [True] * len(function))
#lpsolve('write_lp', lp, 'a.lp')
lpsolve('solve', lp)
variables = lpsolve('get_variables', lp)[0]

'''
for date, times in examinations:
	for time, possibilities in times:
		for variable, examinee, examiners in possibilities:
Esempio n. 32
0
def _clarOptimization(mol, constraints=None, maxNum=None):
    """
    Implements linear programming algorithm for finding Clar structures. This algorithm maximizes the number
    of Clar sextets within the constraints of molecular geometry and atom valency.

    Returns a list of valid Clar solutions in the form of a tuple, with the following entries:
        [0] Molecule object
        [1] List of aromatic rings
        [2] List of bonds
        [3] Optimization solution

    The optimization solution is a list of boolean values with sextet assignments followed by double bond assignments,
    with indices corresponding to the list of aromatic rings and list of bonds, respectively.

    Method adapted from:
        Hansen, P.; Zheng, M. The Clar Number of a Benzenoid Hydrocarbon and Linear Programming.
            J. Math. Chem. 1994, 15 (1), 93–107.
    """
    cython.declare(molecule=Molecule, asssr=list, exo=list, l=cython.int, m=cython.int, n=cython.int,
                   a=list, objective=list, status=cython.int, solution=list, innerSolutions=list)

    from lpsolve55 import lpsolve

    # Make a copy of the molecule so we don't destroy the original
    molecule = mol.copy(deep=True)

    asssr = molecule.getAromaticSSSR()[0]

    if not asssr:
        return []

    # Get list of atoms that are in rings
    atoms = set()
    for ring in asssr:
        atoms.update(ring)
    atoms = list(atoms)

    # Get list of bonds involving the ring atoms, ignoring bonds to hydrogen
    bonds = set()
    for atom in atoms:
        bonds.update([atom.bonds[key] for key in atom.bonds.keys() if key.isNonHydrogen()])
    bonds = list(bonds)

    # Identify exocyclic bonds, and save their bond orders
    exo = []
    for bond in bonds:
        if bond.atom1 not in atoms or bond.atom2 not in atoms:
            if bond.isDouble():
                exo.append(1)
            else:
                exo.append(0)
        else:
            exo.append(None)

    # Dimensions
    l = len(asssr)
    m = len(atoms)
    n = l + len(bonds)

    # Connectivity matrix which indicates which rings and bonds each atom is in
    # Part of equality constraint Ax=b
    a = []
    for atom in atoms:
        inRing = [1 if atom in ring else 0 for ring in asssr]
        inBond = [1 if atom in [bond.atom1, bond.atom2] else 0 for bond in bonds]
        a.append(inRing + inBond)

    # Objective vector for optimization: sextets have a weight of 1, double bonds have a weight of 0
    objective = [1] * l + [0] * len(bonds)

    # Solve LP problem using lpsolve
    lp = lpsolve('make_lp', m, n)               # initialize lp with constraint matrix with m rows and n columns
    lpsolve('set_verbose', lp, 2)               # reduce messages from lpsolve
    lpsolve('set_obj_fn', lp, objective)        # set objective function
    lpsolve('set_maxim', lp)                    # set solver to maximize objective
    lpsolve('set_mat', lp, a)                   # set left hand side to constraint matrix
    lpsolve('set_rh_vec', lp, [1] * m)          # set right hand side to 1 for all constraints
    lpsolve('set_constr_type', lp, ['='] * m)   # set all constraints as equality constraints
    lpsolve('set_binary', lp, [True] * n)       # set all variables to be binary

    # Constrain values of exocyclic bonds, since we don't want to modify them
    for i in range(l, n):
        if exo[i - l] is not None:
            # NOTE: lpsolve indexes from 1, so the variable we're changing should be i + 1
            lpsolve('set_bounds', lp, i + 1, exo[i - l], exo[i - l])

    # Add constraints to problem if provided
    if constraints is not None:
        for constraint in constraints:
            lpsolve('add_constraint', lp, constraint[0], '<=', constraint[1])

    status = lpsolve('solve', lp)
    objVal, solution = lpsolve('get_solution', lp)[0:2]
    lpsolve('delete_lp', lp)  # Delete the LP problem to clear up memory

    # Check that optimization was successful
    if status != 0:
        raise ILPSolutionError('Optimization could not find a valid solution.')

    # Check that we the result contains at least one aromatic sextet
    if objVal == 0:
        return []

    # Check that the solution contains the maximum number of sextets possible
    if maxNum is None:
        maxNum = objVal  # This is the first solution, so the result should be an upper limit
    elif objVal < maxNum:
        raise ILPSolutionError('Optimization obtained a sub-optimal solution.')

    if any([x != 1 and x != 0 for x in solution]):
        raise ILPSolutionError('Optimization obtained a non-integer solution.')

    # Generate constraints based on the solution obtained
    y = solution[0:l]
    new_a = y + [0] * len(bonds)
    new_b = sum(y) - 1
    if constraints is not None:
        constraints.append((new_a, new_b))
    else:
        constraints = [(new_a, new_b)]

    # Run optimization with additional constraints
    try:
        innerSolutions = _clarOptimization(mol, constraints=constraints, maxNum=maxNum)
    except ILPSolutionError:
        innerSolutions = []

    return innerSolutions + [(molecule, asssr, bonds, solution)]
Esempio n. 33
0
def assign_sections(tas, prioritize=False, analyze=False):
    """
    tas: a list of ta objects
    i = index of sections
    j = index of tas
    The columns, x_i_j, go as follows:
        x_0_0, x_1_0, x_2_0, ..., x_0_1, ..., x_M_N
    """

    M = len(tas[0].rankings) # number of section
    N = len(tas)             # number of tas

    f = make_obj_f(tas, prioritize)
    A = make_coeff_m(M, N)
    b = make_b_v(tas, M, N)
    e = make_e_v(M, N)
    v = [1 for _ in range(M*N)]

    lp = lpm.lp_maker(f, A, b, e, None, v)

    # set branch and bound depth
    lps.lpsolve('set_bb_depthlimit', lp, 0)
    # set all variables to binary
    lps.lpsolve('set_binary', lp, v)
    # set lp to minimize the objective function
    lps.lpsolve('set_minim', lp)

    lps.lpsolve('write_lp', lp, LP_OUT)
    lps.lpsolve('solve', lp)
    res = lps.lpsolve('get_variables', lp)[0]
    lps.lpsolve('delete_lp', lp)
    parse_results(res, tas, M, analyze)
Esempio n. 34
0
    def next(self, nsolutions=None):

        Log( "Getting solutions" )

        self.start_new_objective('random')
        #lpsolve('set_simplextype', self.lp, lp.SIMPLEX_DUAL_DUAL)
        #lpsolve('set_simplextype', self.lp, lp.SIMPLEX_PRIMAL_PRIMAL)
        #lpsolve('set_pivoting', self.lp, PRICER_DANTZIG)
        #lpsolve('set_pivoting', self.lp, lp.PRICER_DANTZIG | lp.PRICE_ADAPTIVE)
        #lpsolve('set_pivoting', self.lp, lp.PRICER_STEEPESTEDGE | lp.PRICE_ADAPTIVE)
        #lpsolve('set_presolve', self.lp, PRESOLVE_LINDEP)
        #lpsolve('set_timeout', self.lp, 0)
        res = lpsolve('solve', self.lp)
        #lpsolve('set_presolve', self.lp, PRESOLVE_NONE)
        restxt = {NOMEMORY:          'NOMEMORY',
                  OPTIMAL:           'OPTIMAL',
                  SUBOPTIMAL:        'SUBOPTIMAL',
                  INFEASIBLE:        'INFEASIBLE',
                  UNBOUNDED:         'UNBOUNDED',
                  DEGENERATE:        'DEGENERATE',
                  NUMFAILURE:        'NUMFAILURE',
                  USERABORT:         'USERABORT',
                  TIMEOUT:           'TIMEOUT',
                  PRESOLVED:         'PRESOLVED'}[res]

        print 'solve result %s (%i)' % (restxt, res)

        if res != OPTIMAL: return

        #lpsolve('set_timeout', self.lp, 1)

        Log( "------------------------------------" )
        Log( "Found feasible" )
        Log( "------------------------------------" )

        self.curr_sol = self.package_solution()                
        self.prev_sol = self.curr_sol.vertex.copy()

        if self.sol_type in ['vertex', 'interior']:
            self.sum_ln_k = 0
            self.n_solutions = 0
            while self.n_solutions != nsolutions:
                self.iteration=0
                self.n_solutions += 1
                while True:
                    self.next_solution()
                    self.curr_sol = self.package_solution()                

                    p = self.prepare_return_sol()

                    if p is not None: 
                        break
                    
                    print 'SAME VERTEX!'

                yield p
        else:
            self.sum_ln_k = 0
            self.n_solutions = 0
            all = []
            while self.n_solutions != nsolutions*100:
                self.iteration=0
                self.n_solutions += 1
                while True:
                    self.next_solution()
                    self.curr_sol = self.package_solution()                

                    all.append(self.curr_sol)
                    break

            while nsolutions > 0:
                yield self.CLT(all)
                nsolutions -= 1
Esempio n. 35
0
    def test_lpsolve(self):
        """ test lpsolve

            (http://lpsolve.sourceforge.net)
        """

        try:
            from lpsolve55 import lpsolve
        except ImportError:
            raise SkipTest('lpsolve is not available')

        print '\n------------------------------ lpsolve ------------------------------'
        obj = self.f.tolist()
        lp = lpsolve('make_lp', 0, len(obj))
        lpsolve('set_verbose', lp, 'IMPORTANT')
        lpsolve('set_obj_fn', lp, obj)

        i = 0
        for con in self.A:
            lpsolve('add_constraint', lp, con.tolist(), 'LE', self.b[i])
            i = i + 1

        for i in range(len(self.lb)):
            lpsolve('set_lowbo', lp, i + 1, self.lb[i])
            lpsolve('set_upbo', lp, i + 1, self.ub[i])

        results = lpsolve('solve', lp)

        result_text = [
            'OPTIMAL      An optimal solution was obtained',
            'SUBOPTIMAL   The model is sub-optimal. Only happens if there are integer variables and there is already an integer solution found. The solution is not guaranteed the most optimal one.',
            'INFEASIBLE   The model is infeasible',
            'UNBOUNDED    The model is unbounded',
            'DEGENERATE   The model is degenerative',
            'NUMFAILURE   Numerical failure encountered',
            'USERABORT    The abort routine returned TRUE. See put_abortfunc',
            'TIMEOUT      A timeout occurred. A timeout was set via set_timeout',
            'N/A'
            'PRESOLVED    The model could be solved by presolve. This can only happen if presolve is active via set_presolve',
            'PROCFAIL     The B&B routine failed',
            'PROCBREAK    The B&B was stopped because of a break-at-first (see set_break_at_first) or a break-at-value (see set_break_at_value)',
            'FEASFOUND    A feasible B&B solution was found',
            'NOFEASFOUND  No feasible B&B solution found'
        ]

        print 'results: (%d)' % results, result_text[results]
        print 'f:', lpsolve('get_objective', lp)
        print 'x:', lpsolve('get_variables', lp)[0]

        lpsolve('delete_lp', lp)
Esempio n. 36
0
    def next(self, nsolutions=None):
    # this does the first part of the cpu intensive tasks

        Log( '=' * 80 )
        Log( 'Simplex Random Walk' )
        Log( '=' * 80 )

        Log( "    %i equations" % len(self.eq_list) )

        Log( "%6s %6s %6s\n%6i %6i %6i" 
            % (">=", "<=", "=", self.geq_count, self.leq_count, self.eq_count) )


        if nsolutions == 0: return

        assert nsolutions is not None

        dim = self.nVars
        dof = dim - self.eq_count

        burnin_len  = max(10, int(self.burnin_factor * dof))
        redo        = max(100,  int((dof ** self.redo_exp) * self.redo_factor))

        nmodels = nsolutions
        nthreads = self.nthreads

        self.stride = int(dim+1)

        n_stored = 0
        self.dim = dim
        self.dof = dof
        self.redo = redo

        self.burnin_len = burnin_len

        accept_rate     = self.accept_rate
        accept_rate_tol = self.accept_rate_tol

        store = np.zeros((dim, 1+burnin_len), order='Fortran', dtype=np.float64)
        newp = np.zeros(dim, order='C', dtype=np.float64)
        eval  = np.zeros(dim, order='C', dtype=np.float64)
        evec  = np.zeros((dim,dim), order='F', dtype=np.float64)

        self.eqs = np.zeros((self.eqn_count+dim,dim+1), order='C', dtype=np.float64)
        for i,[c,e] in enumerate(self.eq_list):
            self.eqs[i,:] = e
        for i in xrange(dim):
            self.eqs[self.eqn_count+i,1+i] = 1

        self.dist_eqs = np.zeros((self.eqn_count-self.eq_count,dim+1), order='C', dtype=np.float64)
        i=0
        for c,e in self.eq_list:
            if c == 'eq':
                continue
            elif c == 'leq':
                p = e
            elif c == 'geq':
                p = -e
            self.dist_eqs[i,:] = p
            i += 1

        Log( 'Using lpsolve %s' % lpsolve('lp_solve_version') )
        Log( "random seed = %s" % self.random_seed )
        Log( "threads = %s" % self.nthreads )
        Log( "acceptence rate = %s" % self.accept_rate )
        Log( "acceptence rate tolerance = %s" % self.accept_rate_tol )
        Log( "dof = %s" % self.dof)
        Log( "sample distance = max(100,%s * %s^%s) = %s" % (self.redo_factor, self.dof, self.redo_exp, redo) )
        Log( "starting twiddle = %s" % self.twiddle )
        Log( "burn-in length = %s" % burnin_len )

        time_begin_next = time.clock()

        #-----------------------------------------------------------------------
        # Create pseudo inverse matrix to reproject samples back into the
        # solution space.
        #-----------------------------------------------------------------------
        P = np.eye(dim) 
        if self.eq_count > 0:
            self.A = np.zeros((self.eq_count, dim), order='C', dtype=np.float64)
            self.b = np.zeros(self.eq_count, order='C', dtype=np.float64)
            for i,[c,e] in enumerate(self.eq_list[:self.eq_count]):
                self.A[i] = e[1:]
                self.b[i] = e[0]
            self.Apinv = pinv(self.A)
            P -= np.dot(self.Apinv, self.A)
        else:
            self.A = None
            self.B = None
            self.Apinv = None

        ev, evec = eigh(P)
        #-----------------------------------------------------------------------


        #-----------------------------------------------------------------------
        # Find a point that is completely inside the simplex
        #-----------------------------------------------------------------------
        Log('Finding first inner point')
        time_begin_inner_point = time.clock()
        self.inner_point(newp)
        time_end_inner_point = time.clock()
        ok,fail_count = self.in_simplex(newp, eq_tol=1e-12, tol=0, verbose=1)
        assert ok

        self.avg0 = newp

#       eqs  = self.eqs.copy('A')
#       eqs[:,1:] = np.dot(self.eqs[:,1:], evec)

#       print newp

#       S = zeros(self.eqs.shape[0])
#       newp[:] = np.dot(evec.T, newp)
#       newp0 = newp.copy()
#       steps = newp.copy()
#       for q in range(100):
#           csamplex.refine_center(self, eqs, newp, ev, S, steps)
#           d = newp - newp0
#           #print d
#           print norm(d)
#           #print
#           newp0 = newp.copy()

#       #assert 0
#       newp[:] = np.dot(evec, newp)


        store[:,0] = newp
        n_stored = 1


        #-----------------------------------------------------------------------
        # Estimate the eigenvectors of the simplex
        #-----------------------------------------------------------------------
        Log('Estimating eigenvectors')
        time_begin_est_eigenvectors = time.clock()
        self.measured_ev(newp, ev, eval, evec)
        time_end_est_eigenvectors = time.clock()

        #-----------------------------------------------------------------------
        # Now we can start the random walk
        #-----------------------------------------------------------------------

        Log( "Getting solutions" )

        q = MP.Queue()

        #-----------------------------------------------------------------------
        # Launch the threads
        #-----------------------------------------------------------------------
        threads = []
        models_per_thread = nmodels // nthreads
        models_under      = nmodels - nthreads*models_per_thread
        id,N = 0,0
        while id < nthreads and N < nmodels:
            n = models_per_thread
            if id < models_under:
                n += 1
            assert n > 0
            Log( 'Thread %i gets %i' % (id,n) )
            cmdq = MP.Queue()
            ackq = MP.Queue()
            thr = MP.Process(target=rwalk_burnin, args=(id, n, int(np.ceil(burnin_len/nthreads)), self, q, cmdq, ackq, newp, self.twiddle, eval.copy('A'), evec.copy('A')))
            
            thr.daemon = False  # RK: make non daemonic threads. Make sure to
                                # shut them down afterwards! But this way, one
                                # can use glass inside a celery worker (which
                                # is a daemon. since daemonic processes are not
                                # allowed to start daemonic processes, this has
                                # to be non daemonic)
            
            threads.append([thr,cmdq,ackq])
            N += n
            id += 1

        assert N == nmodels

        for thr,cmdq,_ in threads:
            thr.start()
            cmdq.put(['CONT'])

        def drainq(q):
            try:
                while True:
                    q.get(block=False)
            except QueueEmpty:
                pass

        def pause_threads(threads):
            for _,cmdq,ackq in threads:
                cmdq.put(['WAIT'])
                assert ackq.get() == 'OK'

        def adjust_threads(i, cont_cmd):
            pause_threads(threads)
            drainq(q)
            Log( 'Computing eigenvalues... [%i/%i]' % (i, burnin_len) )
            self.compute_eval_evec(store, eval, evec, n_stored)

            # new twiddle <-- average twiddle
            t = 0
            for _,cmdq,ackq in threads:
                cmdq.put(['REQ TWIDDLE'])
                t += ackq.get()
            t /= len(threads)

            Log( 'New twiddle %f' % t )
            for _,cmdq,_ in threads:
                cmdq.put(['NEW DATA', [eval.copy('A'), evec.copy('A'), t]])
                cmdq.put([cont_cmd])

        #-----------------------------------------------------------------------
        # Burn-in
        #-----------------------------------------------------------------------
        Status("computing eigenvalues", i=0, of=burnin_len)
        time_begin_burnin = time.clock()
        compute_eval_window = 2 * self.dof
        j = 0
        for i in xrange(burnin_len):
            Status("computing eigenvalues", i=i, of=burnin_len)
            j += 1
            k,vec = q.get()

            store[:, n_stored] = vec
            n_stored += 1

            if j == compute_eval_window:
                j = 0
                adjust_threads(i+1,'CONT')
                compute_eval_window = int(0.1*burnin_len + 1)
            elif len(threads) < compute_eval_window:
                threads[k][1].put(['CONT'])
        time_end_burnin = time.clock()
        Status("computing eigenvalues", i=burnin_len, of=burnin_len)

        #-----------------------------------------------------------------------
        # Actual random walk
        #-----------------------------------------------------------------------
        Status("generating models", i=0, of=nmodels)
        
        time_begin_get_models = time.clock()
        adjust_threads(burnin_len, 'RWALK')
        i=0
        while i < nmodels:
            k,vec = q.get()
            t = np.zeros(dim+1, order='Fortran', dtype=np.float64)
            t[1:] = vec
            i += 1
            Log( '%i models left to generate' % (nmodels-i), overwritable=True)
            Status("generating models", i=i, of=nmodels)
            yield t

        time_end_get_models = time.clock()
        Status("generating models", i=nmodels, of=nmodels)

        #-----------------------------------------------------------------------
        # Stop the threads and get their running times.
        #-----------------------------------------------------------------------
        time_threads = []
        for thr,cmdq,ackq in threads:
            cmdq.put(['STOP'])
            m,t = ackq.get()
            assert m == 'TIME'
            time_threads.append(t)
            #thr.terminate()

        time_end_next = time.clock()

        max_time_threads = np.amax(time_threads) if time_threads else 0
        avg_time_threads = np.mean(time_threads) if time_threads else 0

        Log( '-'*80 )
        Log( 'SAMPLEX TIMINGS' )
        Log( '-'*80 )
        Log( 'Initial inner point    %.2fs' % (time_end_inner_point - time_begin_inner_point) )
        Log( 'Estimate eigenvectors  %.2fs' % (time_end_est_eigenvectors - time_begin_est_eigenvectors) )
        Log( 'Burn-in                %.2fs' % (time_end_burnin - time_begin_burnin) )
        Log( 'Modeling               %.2fs' % (time_end_get_models - time_begin_get_models) )
        Log( 'Max/Avg thread time    %.2fs %.2fs' % (max_time_threads, avg_time_threads) )
        Log( 'Total wall-clock time  %.2fs' % (time_end_next - time_begin_next) )
        Log( '-'*80 )
Esempio n. 37
0
from lpsolve55 import lpsolve, IMPORTANT

lp = lpsolve('make_lp', 0, 4)
lpsolve('set_obj_fn', lp, [1, 3, 6.24, 0.1])

lpsolve('add_constraint', lp, [0, 78.26, 0, 2.9], 'GE', 92.3)
lpsolve('add_constraint', lp, [0.24, 0, 11.31, 0], 'LE', 14.8)
lpsolve('add_constraint', lp, [12.68, 0, 0.08, 0.9], 'GE', 4)
lpsolve('set_lowbo', lp, 1, 28.6)
lpsolve('set_lowbo', lp, 4, 18)
lpsolve('set_upbo', lp, 4, 48.98)

lpsolve('set_col_name', lp, 1, 'x1')
lpsolve('set_col_name', lp, 2, 'x2')
lpsolve('set_col_name', lp, 3, 'x3')
lpsolve('set_col_name', lp, 4, 'x4')
lpsolve('set_row_name', lp, 1, 'CONST1')
lpsolve('set_row_name', lp, 2, 'CONST2')
lpsolve('set_row_name', lp, 3, 'CONST3')
lpsolve('write_lp', lp, 'a.lp')

lpsolve('set_verbose', lp, IMPORTANT)
lpsolve('solve', lp)
print lpsolve('get_objective', lp)
print lpsolve('get_variables', lp)
print lpsolve('get_constraints', lp)[0]
Esempio n. 38
0
def calculate_lpsolve(f, A, b, resolution):
    n=len(f)
    lp = lpsolve('make_lp', 0, len(f))
    if not debug:
        lpsolve('set_verbose', lp, IMPORTANT)
    lpsolve('set_obj_fn', lp, [ -x for x in f ])
    for i in range(len(A)):
        lpsolve('add_constraint', lp, A[i], LE, b[i])    
    lpsolve('set_lowbo', lp, [0]*n)
    lpsolve('set_upbo', lp, [slot_size(resolution)]*n)
    lpsolve('solve', lp)
    result = lpsolve('get_variables', lp)[0]
    if not type(result) == list:
        result = [ result ]
    lpsolve('delete_lp', lp)    
    return result
Esempio n. 39
0
 def geq(self, a):
     if not self.set_bound(a, 'low'):
         if self.add_noise: a = self.noise(a)
         lpsolve('add_constraint', self.lp, a[1:], GE, -a[0])
         self.geq_count += 1
         self.ineqs.append([a[1:], GE, -a[0]])
Esempio n. 40
0
def det_rounding(scp_gen):
    # linear reluxation
    n = scp_gen.universal_set_size
    lp = lpsolve('make_lp', 0, n)
    lpsolve('set_maxim', lp)
    lpsolve('set_obj_fn', lp, [1] * n)
    for i, v in enumerate(scp_gen.get_cols()):
        lpsolve('add_constraint', lp, v, 'LE', scp_gen.cost[i])
    lpsolve('set_lowbo', lp, [0] * n)
    lpsolve('set_verbose', lp, IMPORTANT)
    lpsolve('solve', lp)

    # deterministic rounding
    float_ans = lpsolve('get_variables', lp)[0]
    lpsolve('delete_lp', lp)
    I = []
    for i, v in enumerate(scp_gen.get_cols()):
        s = sum(float_ans[j] for j, e in enumerate(v) if e != 0)
        if  s == scp_gen.cost[i]:
            I.append(1)
        else:
            I.append(0)
    return I
Esempio n. 41
0
from lpsolve55 import lpsolve, LE #@UnresolvedImport

lp = lpsolve('make_lp', 0, 8)
#lpsolve('set_verbose', lp, IMPORTANT)
lpsolve('set_obj_fn', lp, [-1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0])
lpsolve('add_constraint', lp, [1.0, 1.0, 1.0, 0, 0, 0, 0, 0], LE, 8.0)
lpsolve('add_constraint', lp, [0, 0, 0, 1.0, 1.0, 0, 0, 0], LE, 4.0)
lpsolve('add_constraint', lp, [0, 0, 0, 0, 0, 1.0, 0, 0], LE, 8.0)
lpsolve('add_constraint', lp, [0, 0, 0, 0, 0, 0, 1.0, 1.0], LE, 12.0)
lpsolve('add_constraint', lp, [1.0, 0, 0, 1.0, 0, 0, 0, 0], LE, 10.0)
lpsolve('add_constraint', lp, [0, 1.0, 0, 0, 1.0, 1.0, 1.0, 0], LE, 10.0)
lpsolve('add_constraint', lp, [0, 0, 1.0, 0, 0, 0, 0, 1.0], LE, 10.0)
lpsolve('set_lowbo', lp, [0, 0.0, 0, 0, 0, 0, 0, 0])
lpsolve('set_upbo', lp, [10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0])
lpsolve('solve', lp)
print lpsolve('get_variables', lp)[0]
lpsolve('delete_lp', lp)
Esempio n. 42
0
def solve_bp(scp_gen):
    print "--- SOLVE BINARY PROGRAMMING ---"
    lp = lpsolve('make_lp', 0, scp_gen.covering_set_num)
    lpsolve('set_obj_fn', lp, scp_gen.cost)
    lpsolve('set_binary', lp, range(scp_gen.covering_set_num))
    for v in scp_gen.get_rows():
        lpsolve('add_constraint', lp, v, 'GE', 1)
    lpsolve('set_verbose', lp, IMPORTANT)
    lpsolve('write_lp', lp, 'b.lp')

    lpsolve('solve', lp)
    print "solution of bp:", lpsolve('get_objective', lp)
    V = lpsolve('get_variables', lp)[0]
    print "solution vector:", V
    print scp_gen.is_covered([i for i, v in enumerate(V) if v != 0.0]), "\n"
    lpsolve('delete_lp', lp)
Esempio n. 43
0
    def next(self, nsolutions=None):

        Log("=" * 80)
        Log("Simplex Random Walk")
        Log("=" * 80)

        Log("    %i equations" % len(self.eq_list))

        Log("%6s %6s %6s\n%6i %6i %6i" % (">=", "<=", "=", self.geq_count, self.leq_count, self.eq_count))

        if nsolutions == 0:
            return

        assert nsolutions is not None

        dim = self.nVars
        dof = dim - self.eq_count

        burnin_len = max(10, int(self.burnin_factor * dof))
        redo = max(100, int((dof ** self.redo_exp) * self.redo_factor))

        nmodels = nsolutions
        nthreads = self.nthreads

        self.stride = int(dim + 1)

        n_stored = 0
        self.dim = dim
        self.dof = dof
        self.redo = redo

        self.burnin_len = burnin_len

        accept_rate = self.accept_rate
        accept_rate_tol = self.accept_rate_tol

        store = np.zeros((dim, 1 + burnin_len), order="Fortran", dtype=np.float64)
        newp = np.zeros(dim, order="C", dtype=np.float64)
        eval = np.zeros(dim, order="C", dtype=np.float64)
        evec = np.zeros((dim, dim), order="F", dtype=np.float64)

        self.eqs = np.zeros((self.eqn_count + dim, dim + 1), order="C", dtype=np.float64)
        for i, [c, e] in enumerate(self.eq_list):
            self.eqs[i, :] = e
        for i in xrange(dim):
            self.eqs[self.eqn_count + i, 1 + i] = 1

        self.dist_eqs = np.zeros((self.eqn_count - self.eq_count, dim + 1), order="C", dtype=np.float64)
        i = 0
        for c, e in self.eq_list:
            if c == "eq":
                continue
            elif c == "leq":
                p = e
            elif c == "geq":
                p = -e
            self.dist_eqs[i, :] = p
            i += 1

        Log("Using lpsolve %s" % lpsolve("lp_solve_version"))
        Log("random seed = %s" % self.random_seed)
        Log("threads = %s" % self.nthreads)
        Log("acceptence rate = %s" % self.accept_rate)
        Log("acceptence rate tolerance = %s" % self.accept_rate_tol)
        Log("dof = %s" % self.dof)
        Log("sample distance = max(100,%s * %s^%s) = %s" % (self.redo_factor, self.dof, self.redo_exp, redo))
        Log("starting twiddle = %s" % self.twiddle)
        Log("burn-in length = %s" % burnin_len)

        time_begin_next = time.clock()

        # -----------------------------------------------------------------------
        # Create pseudo inverse matrix to reproject samples back into the
        # solution space.
        # -----------------------------------------------------------------------
        P = np.eye(dim)
        if self.eq_count > 0:
            self.A = np.zeros((self.eq_count, dim), order="C", dtype=np.float64)
            self.b = np.zeros(self.eq_count, order="C", dtype=np.float64)
            for i, [c, e] in enumerate(self.eq_list[: self.eq_count]):
                self.A[i] = e[1:]
                self.b[i] = e[0]
            self.Apinv = pinv(self.A)
            P -= np.dot(self.Apinv, self.A)
        else:
            self.A = None
            self.B = None
            self.Apinv = None

        ev, evec = eigh(P)
        # -----------------------------------------------------------------------

        # -----------------------------------------------------------------------
        # Find a point that is completely inside the simplex
        # -----------------------------------------------------------------------
        Log("Finding first inner point")
        time_begin_inner_point = time.clock()
        self.inner_point(newp)
        time_end_inner_point = time.clock()
        ok, fail_count = self.in_simplex(newp, eq_tol=1e-12, tol=0, verbose=1)
        assert ok

        self.avg0 = newp

        #       eqs  = self.eqs.copy('A')
        #       eqs[:,1:] = np.dot(self.eqs[:,1:], evec)

        #       print newp

        #       S = zeros(self.eqs.shape[0])
        #       newp[:] = np.dot(evec.T, newp)
        #       newp0 = newp.copy()
        #       steps = newp.copy()
        #       for q in range(100):
        #           csamplex.refine_center(self, eqs, newp, ev, S, steps)
        #           d = newp - newp0
        #           #print d
        #           print norm(d)
        #           #print
        #           newp0 = newp.copy()

        #       #assert 0
        #       newp[:] = np.dot(evec, newp)

        store[:, 0] = newp
        n_stored = 1

        # -----------------------------------------------------------------------
        # Estimate the eigenvectors of the simplex
        # -----------------------------------------------------------------------
        Log("Estimating eigenvectors")
        time_begin_est_eigenvectors = time.clock()
        self.measured_ev(newp, ev, eval, evec)
        time_end_est_eigenvectors = time.clock()

        # -----------------------------------------------------------------------
        # Now we can start the random walk
        # -----------------------------------------------------------------------

        Log("Getting solutions")

        q = MP.Queue()

        # -----------------------------------------------------------------------
        # Launch the threads
        # -----------------------------------------------------------------------
        threads = []
        models_per_thread = nmodels // nthreads
        models_under = nmodels - nthreads * models_per_thread
        id, N = 0, 0
        while id < nthreads and N < nmodels:
            n = models_per_thread
            if id < models_under:
                n += 1
            assert n > 0
            Log("Thread %i gets %i" % (id, n))
            cmdq = MP.Queue()
            ackq = MP.Queue()
            thr = MP.Process(
                target=rwalk_burnin,
                args=(
                    id,
                    n,
                    int(np.ceil(burnin_len / nthreads)),
                    self,
                    q,
                    cmdq,
                    ackq,
                    newp,
                    self.twiddle,
                    eval.copy("A"),
                    evec.copy("A"),
                ),
            )
            threads.append([thr, cmdq, ackq])
            N += n
            id += 1

        assert N == nmodels

        for thr, cmdq, _ in threads:
            thr.start()
            cmdq.put(["CONT"])

        def drainq(q):
            try:
                while True:
                    q.get(block=False)
            except QueueEmpty:
                pass

        def pause_threads(threads):
            for _, cmdq, ackq in threads:
                cmdq.put(["WAIT"])
                assert ackq.get() == "OK"

        def adjust_threads(i, cont_cmd):
            pause_threads(threads)
            drainq(q)
            Log("Computing eigenvalues... [%i/%i]" % (i, burnin_len))
            self.compute_eval_evec(store, eval, evec, n_stored)

            # new twiddle <-- average twiddle
            t = 0
            for _, cmdq, ackq in threads:
                cmdq.put(["REQ TWIDDLE"])
                t += ackq.get()
            t /= len(threads)

            Log("New twiddle %f" % t)
            for _, cmdq, _ in threads:
                cmdq.put(["NEW DATA", [eval.copy("A"), evec.copy("A"), t]])
                cmdq.put([cont_cmd])

        # -----------------------------------------------------------------------
        # Burn-in
        # -----------------------------------------------------------------------
        time_begin_burnin = time.clock()
        compute_eval_window = 2 * self.dof
        j = 0
        for i in xrange(burnin_len):
            j += 1
            k, vec = q.get()

            store[:, n_stored] = vec
            n_stored += 1

            if j == compute_eval_window:
                j = 0
                adjust_threads(i + 1, "CONT")
                compute_eval_window = int(0.1 * burnin_len + 1)
            elif len(threads) < compute_eval_window:
                threads[k][1].put(["CONT"])
        time_end_burnin = time.clock()

        # -----------------------------------------------------------------------
        # Actual random walk
        # -----------------------------------------------------------------------
        time_begin_get_models = time.clock()
        adjust_threads(burnin_len, "RWALK")
        i = 0
        while i < nmodels:
            k, vec = q.get()
            t = np.zeros(dim + 1, order="Fortran", dtype=np.float64)
            t[1:] = vec
            i += 1
            Log("%i models left to generate" % (nmodels - i), overwritable=True)
            yield t

        time_end_get_models = time.clock()

        # -----------------------------------------------------------------------
        # Stop the threads and get their running times.
        # -----------------------------------------------------------------------
        time_threads = []
        for thr, cmdq, ackq in threads:
            cmdq.put(["STOP"])
            m, t = ackq.get()
            assert m == "TIME"
            time_threads.append(t)
            # thr.terminate()

        time_end_next = time.clock()

        max_time_threads = np.amax(time_threads) if time_threads else 0
        avg_time_threads = np.mean(time_threads) if time_threads else 0

        Log("-" * 80)
        Log("SAMPLEX TIMINGS")
        Log("-" * 80)
        Log("Initial inner point    %.2fs" % (time_end_inner_point - time_begin_inner_point))
        Log("Estimate eigenvectors  %.2fs" % (time_end_est_eigenvectors - time_begin_est_eigenvectors))
        Log("Burn-in                %.2fs" % (time_end_burnin - time_begin_burnin))
        Log("Modeling               %.2fs" % (time_end_get_models - time_begin_get_models))
        Log("Max/Avg thread time    %.2fs %.2fs" % (max_time_threads, avg_time_threads))
        Log("Total wall-clock time  %.2fs" % (time_end_next - time_begin_next))
        Log("-" * 80)
Esempio n. 44
0
def assign_sections(students, prioritize=False, debug=False):
    """
    students: a list of student objects
    i = index of sections
    j = index of students
    The columns, x_i_j, go as follows:
        x_0_0, x_1_0, x_2_0, ..., x_0_1, ..., x_M_N
    """

    M = len(students[0].rankings)  # number of section
    N = len(students)  # number of students

    f = make_obj_f(students, prioritize)
    A = make_coeff_m(M, N)
    b = make_b_v(students, M, N)
    e = make_e_v(M, N)
    v = [1 for _ in range(M * N)]

    lp = lpm.lp_maker(f, A, b, e, None, v)

    # set branch and bound depth to be unlimited
    lps.lpsolve("set_bb_depthlimit", lp, 0)
    # set all variables to binary
    lps.lpsolve("set_binary", lp, v)
    # set lp to minimize the objective function
    lps.lpsolve("set_minim", lp)

    lps.lpsolve("write_lp", lp, LP_OUT)
    lps.lpsolve("solve", lp)
    res = lps.lpsolve("get_variables", lp)[0]
    lps.lpsolve("delete_lp", lp)
    parse_results(res, students, M, debug)
Esempio n. 45
0
 def __del__(self):
     if self.lp:
         lpsolve('delete_lp', self.lp)
Esempio n. 46
0
def det_rounding(scp_gen):
    # linear reluxation
    lp = lpsolve('make_lp', 0, scp_gen.covering_set_num)
    lpsolve('set_obj_fn', lp, scp_gen.cost)
    for v in scp_gen.get_rows():
        lpsolve('add_constraint', lp, v, 'GE', 1)
    for i in range(1, scp_gen.covering_set_num + 1):
        lpsolve('set_lowbo', lp, i, 0)
    lpsolve('set_verbose', lp, IMPORTANT)
    lpsolve('solve', lp)

    # deterministic rounding
    float_ans = lpsolve('get_variables', lp)[0]
    _, f = scp_gen.get_f()
    return [1 if x >= 1.0 / float(f) else 0 for x in float_ans]
Esempio n. 47
0
    def test_lpsolve(self):
        """ test lpsolve

            (http://lpsolve.sourceforge.net)
        """

        try:
            from lpsolve55 import lpsolve
        except ImportError:
            raise SkipTest('lpsolve is not available')

        print '\n------------------------------ lpsolve ------------------------------'
        obj = self.f.tolist()
        lp = lpsolve('make_lp', 0, len(obj))
        lpsolve('set_verbose', lp, 'IMPORTANT')
        lpsolve('set_obj_fn', lp, obj)

        i = 0
        for con in self.A:
            lpsolve('add_constraint', lp, con.tolist(), 'LE', self.b[i])
            i = i+1

        for i in range (len(self.lb)):
            lpsolve('set_lowbo', lp, i+1, self.lb[i])
            lpsolve('set_upbo',  lp, i+1, self.ub[i])

        results = lpsolve('solve', lp)

        result_text = [
            'OPTIMAL      An optimal solution was obtained',
            'SUBOPTIMAL   The model is sub-optimal. Only happens if there are integer variables and there is already an integer solution found. The solution is not guaranteed the most optimal one.',
            'INFEASIBLE   The model is infeasible',
            'UNBOUNDED    The model is unbounded',
            'DEGENERATE   The model is degenerative',
            'NUMFAILURE   Numerical failure encountered',
            'USERABORT    The abort routine returned TRUE. See put_abortfunc',
            'TIMEOUT      A timeout occurred. A timeout was set via set_timeout',
            'N/A'
            'PRESOLVED    The model could be solved by presolve. This can only happen if presolve is active via set_presolve',
            'PROCFAIL     The B&B routine failed',
            'PROCBREAK    The B&B was stopped because of a break-at-first (see set_break_at_first) or a break-at-value (see set_break_at_value)',
            'FEASFOUND    A feasible B&B solution was found',
            'NOFEASFOUND  No feasible B&B solution found'
        ]

        print 'results: (%d)' % results, result_text[results]
        print 'f:', lpsolve('get_objective', lp)
        print 'x:', lpsolve('get_variables', lp)[0]

        lpsolve('delete_lp', lp)