def method_1(self):
		#creates a distance matrix that essentially is a list containing lists with the property where matrix[i][j] is the distance between point i and point j
		t0 = dt.time()#starts to time the method until its completion
		matrix = []
		meth1sol = []
		for i in li:
			r = []
			for j in li:
				r.append(i.distance(j))
			matrix.append(r)
		n = len(matrix)
		V = range(n)
		E = [(i,j) for i in V for j in V if i!=j]
		#the algorithm that eliminates invalid subtours
		pm.begin('subtour elimination')
		x = pm.var('x', E, bool)
		#minimizes the sum of the distances found in the matrix
		pm.minimize(sum(matrix[i][j]*x[i,j] for i,j in E), 'dist')
		for k in V:
			sum( x[k,j] for j in V if j!=k ) == 1
			sum( x[i,k] for i in V if i!=k ) == 1
			#calls the solver method and deactivates the result message 
			pm.solver(float, msg_lev = pm.glpk.GLP_MSG_OFF)
			pm.solver(int, msg_lev= pm.glpk.GLP_MSG_OFF)
			pm.solve()
		global subtourg
		#the function that creates subtours
		def subtourl(x):
			succ = 0
			subt = [succ] #start from node 0
			while True:
				succ=sum(x[succ,j].primal*j for j in V if j!=succ)
				if succ == 0: break #tour found
				subt.append(int(succ+0.5))
			return subt
		subtourg = subtourl
		while True:
			#a loop that creates subtours and keeps them if they are valid, terminating the programme in the process, or discards them if they are not
		   subt = subtourg(x)
		   if len(subt) == n:
		      #print("Optimal tour length: %g"%pm.vobj())
		      #print("Optimal tour:"); print(subt)
		      break
		   print("New subtour: %r"% subt)
		   if len(subt) == 1: break #something wrong
		   #now add a subtour elimination constraint:
		   nots = [j for j in V if j not in subt]
		   sum(x[i,j] for i in subt for j in nots) >= 1
		   pm.solve() #solve the IP problem again
		pm.end()
		#print(subt)
		#now the solution is added to a list that can be interpreted by the connect method
		for i in subt:
			meth1sol.append(li[i])
		print(len(meth1sol))
		self.connect(meth1sol, method1_colour, 1)
		t1 = dt.time()
		t = t1 - t0
		t = round(t, 2)#the required time is calculated and rounded for conviniency
		self.t1.set("time:\n{}s".format(t))
예제 #2
0
def mp_pd_01():

    P = mp.begin('Primal of PD_01')
    x = P.var('x', 3)
    # Max c.x
    # s.t.
    # Ax <= b

    # Set up obj coef c
    c = (5, 4.5, 6)
    # Set up constraints
    A = [(6, 5, 8), (10, 20, 10), (1, 0, 0)]
    # Set up RHS of the constraints
    b = (60, 150, 8)

    P.verbose(True)
    # Plug in objective fn
    P.maximize(sum(c[i] * x[i] for i in range(len(x))))
    # Plug in constraints
    for i in range(len(b)):
        sum(A[i][j] * x[j] for j in range(len(x))) <= b[i]

    P.solve()

    # Dual part
    D = mp.begin('Dual of PD_01')
    D.verbose(True)
    # Min b.y
    # s.t.
    # Transpose(A)*y >= c
    y = D.var('y', len(b))

    # Use numpy to get the transpose(A)
    # 1. Convert A to numpy matrix
    mA = np.matrix(A)
    # 2. B is transpose of A
    mB = mA.transpose()
    # 3. Convert to list to feed into PyMProg
    B = mB.tolist()

    # now we have b, c, y, transpose(A) to run the Dual
    D.minimize(sum(b[i] * y[i] for i in range(len(y))))
    # contraints part
    for i in range(len(c)):
        sum(B[i][j] * y[j] for j in range(len(y))) >= c[i]

    D.solve()

    print("")
    print("Report of Primal_01")
    P.sensitivity()
    print("Report of Dual_01")
    D.sensitivity()

    P.end()
    D.end()

    return
예제 #3
0
def mp_solve_lp(model_name, obj, c, vars_cnt, A, b, b_condition):
    P = mp.begin(model_name)
    P.verbose(True)

    x = P.var('x', vars_cnt)
    
    if (obj == 'max'):
        P.maximize(sum(c[i]*x[i] for i in range(len(x))))
    else:
        P.minimize(sum(c[i]*x[i] for i in range(len(x))))

    for i in range(len(b)):
        if (b_condition[i] == 'gt'):
            sum(A[i][j]*x[j] for j in range(len(x))) >= b[i]
        elif (b_condition[i] == 'eq'):
            sum(A[i][j]*x[j] for j in range(len(x))) == b[i]
        else:
            sum(A[i][j]*x[j] for j in range(len(x))) <= b[i]

    P.solve()
    P.sensitivity()
    coef_range_lb = {}
    coef_range_ub = {}
    cons_range_lb = {}
    cons_range_ub = {}
    cols = P.get_num_cols()
    idx = 0
    for val in P._viter(range(1, cols+1)):
        coef_range_lb[idx] = val[5]
        coef_range_ub[idx] = val[6]
        idx = idx + 1

    rows = P.get_num_rows()
    idx = 0
    for val in P._citer(range(1, rows+1)):
        cons_range_lb[idx] = val[6]
        cons_range_ub[idx] = val[7]
        idx = idx + 1

    #print(coef_range_lb, coef_range_ub)
    #print(cons_range_lb, cons_range_ub)
    obj_val = P.get_obj_val()

    P.end()
    return obj_val,coef_range_lb, coef_range_ub
예제 #4
0
corpus = [
    'This is the first document.',
    'This document is the second document.',
    'And this is the third one.',
    'Is this the first document?',
]
vectorizer = TfidfVectorizer()
X = vectorizer.fit_transform(corpus)
print(vectorizer.get_feature_names())

print(X.shape)

print(X)

import pymprog as mp
mp.begin('bike production')
x, y = mp.var('x, y')  # variablesas mp
mp.maximize(15 * x + 10 * y, 'profit')
x <= 3  # mountain bike limit
y <= 4  # racer production limit
x + y <= 5  # metal finishing limit
mp.solve()

print("#####################")

import spacy
import spacy_kenlm

nlp = spacy.load('en_core_web_sm')

kenlm_model = spacy_kenlm.spaCyKenLM(
예제 #5
0
파일: solver.py 프로젝트: viniciusjk/Tcc
Created on Thu Mar 29 23:09:54 2018

@author: ViniciusJokubauskas
"""


import numpy as np
import pymprog as pp
from variables import *

rNumberClients = range(numberClients)
rNumberChannels = range(numberChannels)
rNumberProducts = range(numberProducts)

t = pp.iprod(rNumberClients,rNumberChannels,rNumberProducts)
pp.begin('basic') # begin modelling
pp.verbose(True)  # be verbose

x = pp.var('choice', 
        t, bool) #create 3 variables
        
pp.maximize(sum(x[i,j,k]*expectedReturn[i][j][k] for i in rNumberClients\
             for j in rNumberChannels for k in rNumberProducts))

  
#channelLimitConstraint:
for j in rNumberChannels:
    sum(x[i,j,k] for i in rNumberClients for k in rNumberProducts)\
    <=channelCap[j]
    
#maxOfferProductConstraint:    
예제 #6
0
def ppSolver(expectedReturn, numberClients, numberChannels, numberProducts,
             cost, budget, channelCap, minOfferProduct, maxOfferProduct,
             rurdleRate):

    startTime = timeit.default_timer()
    rNumberClients = range(numberClients)
    rNumberChannels = range(numberChannels)
    rNumberProducts = range(numberProducts)

    t = pp.iprod(rNumberClients, rNumberChannels, rNumberProducts)
    pp.begin('basic')  # begin modelling
    pp.verbose(False)  # be verbose

    x = pp.var('choice', t, bool)

    pp.maximize(sum(x[i,j,k]*expectedReturn[i][j][k] for i in rNumberClients\
                 for j in rNumberChannels for k in rNumberProducts))

    #channelLimitConstraint:
    for j in rNumberChannels:
        sum(x[i,j,k] for i in rNumberClients for k in rNumberProducts)\
        <=channelCap[j]

    #maxOfferProductConstraint:
    for k in rNumberProducts:
        sum(x[i,j,k] for i in rNumberClients for j in rNumberChannels)\
        <=maxOfferProduct[k]

    #minOfferProductConstraint:


#    for k in rNumberProducts:
#        sum(x[i,j,k] for i in rNumberClients for j in rNumberChannels)\
#        >=minOfferProduct[k]

#budgetConstraint:

    pp.st(sum(x[i,j,k]*cost[j] for i in rNumberClients for j in\
        rNumberChannels for k in rNumberProducts)<=budget,"Budget Constr.")

    #clientLimitConstraint:

    for i in rNumberClients:
        pp.st(sum(x[i,j,k] for j in rNumberChannels for k in rNumberProducts)\
              <=1,"Client "+str(i)+" limit")

    #rurdleRateConstraint:

    pp.st(sum(x[i,j,k]*expectedReturn[i][j][k] for i in rNumberClients for j \
          in rNumberChannels for k in rNumberProducts)>= (1+rurdleRate)\
            *sum(x[i,j,k]*cost[j] for i in rNumberClients for j in\
                rNumberChannels for k in rNumberProducts),"Rurdle Rate Constr")

    pp.solve()  # solve the model

    #    pp.sensitivity() # sensitivity report
    endTime = timeit.default_timer() - startTime
    print("Objetivo encontrado: ", round(pp.vobj(), 2), " em ",
          round(endTime, 3), " segundos")

    print("\n\n\n")
    appendCsv(numberClients, "Solver method", endTime, True,
              round(pp.vobj(), 2))
    pp.end()  #Good habit: do away with the model
예제 #7
0
np.set_printoptions(precision = 2, linewidth = 400)
from random import Random

## Model Data ##
rand = Random()
roads =5
time = 1
# initializing indices
M = [(i,t) for i in range(roads) for t in range(time+1)]
age_i_t_DF = makeDataFrames_age(roads, time)
XLnXS_i_t_DF = makeDataFrames_Activities(roads, time,age_i_t_DF)



## Beging Model ##
PYM.begin(p)
# p.solver('interior', msg_lev=PYM.glpk.GLP_MSG_OFF)
p.solver(int, br_tech=PYM.glpk.GLP_BR_PCH)
# begin("5 roads and 1 period")
#action variables xl is large action, xs is small action ## Slice notation a[start_index:end_index:step]
xl = p.var('xl', M[1::2], bool)
xs = p.var('xs', M[1::2], bool)
#age variables not above 10 years old
# age_i_t = p.var('age',M, bounds = (0,10))
print("\n*Variables*\n",xl,"\n**\n",xs,"\n**\n")#,age_i_t

##Setting objective function
p.minimize(sum(xl[i]*200+xs[i]*75 for i in M[1::2]),'Cost')
print("\nprint(p.get_obj_name()) = ",p.get_obj_name(),"\n")

##FIRST Condition Set##
예제 #8
0
n = 3
N = range(n)
M = [(i, j) for i in N for j in N if i < j]

D = (3, 4, 2)  #duration of each job
L = (0, 2, 0)  #earliest start
U = (9, 7, 8)  #latest finish

# from pymprog import *
import pymprog as PYM

js = PYM.model("job-scheduling")
PYM.begin(js)
x = js.var('x', N)  #start time
#MD[i,j] = (D[i]+D[j])/2.0
#T[i] = x[i] + D[i]
#y[i,j]<= |T[i]-x[j]-MD[i,j]|
#y[i,j] < MD[i,j] <==> overlap betw jobs i,j
y = js.var('y', M)
#w[i,j]: the 'OR' for |T[i]-x[j]-MD[i,j]|
w = js.var('w', M, kind=bool)
# z[i,j] >= MD[i,j] - y[i,j]
z = js.var('z', M)

js.minimize(sum(z[i, j] for i, j in M))

for i, j in M:
    ((D[i] + D[j]) / 2.0 - (x[i] + D[i] - x[j]) +
     (U[i] - L[j]) * w[i, j] >= y[i, j])

    ((x[i] + D[i] - x[j]) - (D[i] + D[j]) / 2.0 + (U[j] - L[i]) *
예제 #9
0
def summerize(tweets_df):
    print(len(tweets_df))
    #print(tweets_df['tweet_texts'][1])

    tf_idf.compute_tf_idf(tweets_df)
    term_matrix = np.load('term_matrix.npy')
    vocab_to_idx = np.load('vocab_to_idx.npy', allow_pickle=True).item()
    content_vocab = list(np.load('content_vocab.npy'))
    # tfidf_dict = np.load('tfidf_dict.npy', allow_pickle=True).item()

    print("1 ##################")

    spacy_tweets = []

    for doc in nlp.pipe(tweets_df['tweet_texts'].astype('unicode'),
                        n_threads=-1):
        spacy_tweets.append(doc)
    spacy_tweets = [tweet for tweet in spacy_tweets if len(tweet) > 1]
    # spacy_tweets = np.random.choice(spacy_tweets, 10, replace=False)
    # spacy_tweets = spacy_tweets[:20]
    print(len(spacy_tweets))
    print(spacy_tweets[0])

    print("2 ##################")

    all_bigrams = [
        list(bigrams([token.lemma_ for token in tweets]))
        for tweets in spacy_tweets
    ]
    starting_nodes = [single_bigram[0] for single_bigram in all_bigrams]
    end_nodes = [single_bigram[-1] for single_bigram in all_bigrams]
    all_bigrams = [
        node for single_bigram in all_bigrams for node in single_bigram
    ]
    all_bigrams = list(set(all_bigrams))
    print("all_bigrams len=", len(all_bigrams))
    print(all_bigrams[0])

    print("3 ##################")

    # bigram_graph = make_bigram_graph(all_bigrams, starting_nodes[1])
    # print(len(bigram_graph))
    # print(bigram_graph)
    # path = breadth_first_search(bigram_graph, starting_nodes[1], end_nodes[2])
    # print(path)

    bigram_paths = []

    for single_start_node in tqdm(starting_nodes):
        bigram_graph = make_bigram_graph(all_bigrams, single_start_node)
        for single_end_node in end_nodes:
            possible_paths = breadth_first_search(bigram_graph,
                                                  single_start_node,
                                                  single_end_node)
            for path in possible_paths:
                bigram_paths.append(path)
    print("bigram_paths len=", len(bigram_paths))
    # print(bigram_paths[10])

    # for tweet in spacy_tweets:
    #     bigram_paths.append(list(bigrams([token.lemma_ for token in tweets])))
    word_paths = []
    for path in tqdm(bigram_paths):
        word_paths.append(make_list(path))
    print(word_paths[0])

    print("4 ##################")

    mp.begin('COWABS')
    # Defining my first variable, x
    # This defines whether or not a word path is selected
    x = mp.var(str('x'), len(word_paths), bool)
    # Also defining the second variable, which defines
    # whether or not a content word is chosen
    y = mp.var(str('y'), len(content_vocab), bool)

    mp.maximize(
        sum([
            linguistic_quality(word_paths[i]) *
            informativeness(word_paths[i], term_matrix, vocab_to_idx) * x[i]
            for i in range(len(x))
        ]) + sum(y))
    # hiding the output of this line since its a very long sum
    # sum([x[i] * len(word_paths[i]) for i in range(len(x))]) <= 150

    for j in range(len(y)):
        sum([
            x[i]
            for i in paths_with_content_words(j, word_paths, content_vocab)
        ]) >= y[j]

    for i in range(len(x)):
        sum(y[j] for j in content_words(i, word_paths, content_vocab)) >= len(
            content_words(i, word_paths, content_vocab)) * x[i]
    mp.solve()
    result_x = [value.primal for value in x]
    result_y = [value.primal for value in y]
    mp.end()

    chosen_paths = np.nonzero(result_x)
    chosen_words = np.nonzero(result_y)
    print("*** Total = ", len(chosen_paths[0]))

    min_cosine_sim = 999
    final_sentence = None
    for i in chosen_paths[0]:
        print('--------------')
        print(str(" ").join([token for token in word_paths[i]]))
        cosine_sim = informativeness(word_paths[i], term_matrix, vocab_to_idx)
        print(cosine_sim)
        if min_cosine_sim > cosine_sim:
            min_cosine_sim = cosine_sim
            final_sentence = str(" ").join([token for token in word_paths[i]])

    # print("####### Summary ###########")
    # print(final_sentence)

    return final_sentence
예제 #10
0
파일: lp1997ug.py 프로젝트: Breccia/pyLP
#!/usr/bin/python3

import pymprog as MP
from lp_cfg import *
from solve_lp import solve_lp


if __name__ == '__main__':
    # Max/Min W = Cx
    # s.t.
    # Ax [lte, gte, e] b

    lp = MP.begin('LP1997UG')
    lp.verbose(True)

    c = [(1, 1), 30 - 75 + 90 - 95]
    A = [(50, 24),
            (30 , 33),
            (1, 0),
            (0, 1)]
    b = [(40 * 60, 'lte'), 
            (35 * 60, 'lte'),
            (75 - 30, 'gte'),
            (95 - 90, 'gte')]

    solve_lp(MAXIMIZE, lp, c, A, b)

    lp.sensitivity()
    lp.end()
    
예제 #11
0
파일: lp1994ug.py 프로젝트: Breccia/pyLP
#!/usr/bin/python3

import pymprog as MP
from solve_lp import solve_lp
from lp_cfg import *

if __name__ == '__main__':
    lp = MP.begin('LP1994UG')
    c = [(20 - 130 / 60 - 40 / 60, 30 - 190 / 60 - 58 / 60), 0]
    A = [(13, 19), (20, 29), (1, 0)]
    b = [(40 * 60, 'lte'), (35 * 60, 'lte'), (10, 'gte')]
    lp.verbose(True)
    solve_lp(MAXIMIZE, lp, c, A, b)
    lp.sensitivity()
    lp.end()