def pre_process(self) -> dict: # obtain D2 greedy = Greedy.Greedy(self.G, INF, self.rho2, self.rho2, self.heuristic) greedy.run() self.D2 = greedy.D2 # print (self.D2) # gernerate subgraph for each node in D2 dic = {} for u in self.D2: sub = self.sub_graph(u) g = Greedy.Greedy(sub, INF, self.rho1, self.rho1, self.heuristic) g.run() # obtain D1 (actually D2 in Greedy output) dic[u] = g.D2 return dic
def output_2(self, heu): # fix d datas_greedy = [['ER07', 'ER47', 'SF', 'NSM2', 'NSM10']] datas_R1 = [['ER07', 'ER47', 'SF', 'NSM2', 'NSM10']] datas_R2 = [['ER07', 'ER47', 'SF', 'NSM2', 'NSM10']] d = 10 for rho1 in range(1, 11): for rho2 in range(1, 11): avg_greedy = [0.0, 0.0, 0.0, 0.0, 0.0] avg_R1 = [0.0, 0.0, 0.0, 0.0, 0.0] avg_R2 = [0.0, 0.0, 0.0, 0.0, 0.0] for i in range(0, 5): er07 = self.ER(1000, 0.01) er47 = self.ER(1000, 0.04) sf = self.SF(1000) sm02 = self.SM(2) sm10 = self.SM(10) dic = {0: er07, 1: er47, 2: sf, 3: sm02, 4: sm10} for j in range(0, 5): g = Greedy.Greedy(dic[j], d, rho1, rho2, heu) g.run() avg_greedy[j] += len(g.D1) r1 = Replacement.ReplacementA(dic[j], d, rho1, rho2, heu) r1.run() avg_R1[j] += len(r1.D1) r2 = Replacement.ReplacementB(dic[j], d, rho1, rho2, heu) r2.run() avg_R2[j] += len(r2.D1) print(i) for j in range(0, 5): avg_greedy[j] = avg_greedy[j] / 10 avg_R1[j] = avg_R1[j] / 10 avg_R2[j] = avg_R2[j] / 10 datas_greedy.append(avg_greedy) datas_R1.append(avg_R1) datas_R2.append(avg_R2) with open('2_greedy_' + heu + '.csv', 'w', newline='') as f: writer = csv.writer(f) for row in datas_greedy: writer.writerow(row) with open('2_r1_' + heu + '.csv', 'w', newline='') as f: writer = csv.writer(f) for row in datas_R1: writer.writerow(row) with open('2_r2_' + heu + '.csv', 'w', newline='') as f: writer = csv.writer(f) for row in datas_R2: writer.writerow(row)
def runGreedy(nbmanchots, run, iterations): for i in range(run): tabMachines = creerManchots(nbmanchots) nbiterations2.append(i+1) gains2.append(g.Greedy(iterations, tabMachines)) print(gains2) return nbiterations2, gains2
def ejecuta(self): for f in self.ficheros: print(f) g = Greedy(f, self.semilla) antes = time.time() gs = g.greedy() despues = time.time() self.tiempos.append(despues - antes) self.valores.append(g.evalua_sol(gs)) self.imprime()
def pre_process(self) -> dict: # obtain D1 (actually D2 in Greedy output) greedy = Greedy.Greedy(self.G, INF, self.rho1, self.rho1, self.heuristic) greedy.run() self.D1 = greedy.D2 # print(self.D1) # gernerate subgraph for each node in V dic = {} for v in self.G.nodes(): dic[v] = self.cover_nodes(v) return dic
def run_test(fileName, max_k): cache_dir = './cache' D = 2. T = 3. L = 1. host, paras, phi = newickFormatReader.getInput(fileName) if not os.path.exists(cache_dir): os.makedirs(cache_dir) f = open('%s/README' % cache_dir, 'w') f.write( 'This directory holds a cache of reconciliation graph for the TreeLife data set' ) f.close() cache_location = '%s/%s.graph' % (cache_dir, os.path.split(fileName)[1]) if not os.path.isfile(cache_location): print >> sys.stderr, 'A reconciliation graph has not been built yet for this newick file' print >> sys.stderr, 'Doing so now and caching it in {%s}...' % cache_location DictGraph, numRecon = DP.DP(host, paras, phi, D, T, L) f = open(cache_location, 'w+') f.write(repr(DictGraph)) f.close() print >> sys.stderr, 'Loading reonciliation graph from cache' f = open(cache_location) DictGraph = eval(f.read()) f.close() scoresList, dictReps = Greedy.Greedy(DictGraph, paras) print >> sys.stderr, 'Found cluster representatives using point-collecting' graph = ReconGraph.ReconGraph(DictGraph) setReps = [ ReconGraph.dictRecToSetRec(graph, dictRep) for dictRep in dictReps ] random.seed(0) extra_reps = [KMeans.get_template(graph) for i in xrange(max_k)] representatives = setReps + extra_reps print >> sys.stderr, 'Starting K Means algorithm ... ' print >> sys.stderr, 'Printing Average and Maximum cluster radius at each step' for i in xrange(1, max_k + 1): print 'k = %d' % i KMeans.k_means(graph, 10, i, 0, representatives[:i])
def output_4(self, heu): # fix d datas_greedy = [['Real1']] datas_R1 = [['Real1']] datas_R2 = [['Real1']] d = 10 for rho1 in range (1,11): for rho2 in range(1, 11): avg_greedy = 0.0 avg_R1 = 0.0 avg_R2 = 0.0 for i in range(0, 5): G = self.College() g = Greedy.Greedy(G, d, rho1, rho2, heu) g.run() avg_greedy += len(g.D1) r1 = Replacement.ReplacementA(G, d, rho1, rho2, heu) r1.run() avg_R1 += len(r1.D1) r2 = Replacement.ReplacementB(G, d, rho1, rho2, heu) r2.run() avg_R2 += len(r2.D1) print(i) avg_greedy = avg_greedy / 10 avg_R1 = avg_R1 / 10 avg_R2 = avg_R2 / 10 datas_greedy.append(avg_greedy) datas_R1.append(avg_R1) datas_R2.append(avg_R2) with open('2_greedy_' + heu + '.csv', 'w', newline='') as f: writer = csv.writer(f) for row in datas_greedy: writer.writerow(row) with open('2_r1_' + heu + '.csv', 'w', newline='') as f: writer = csv.writer(f) for row in datas_R1: writer.writerow(row) with open('2_r2_' + heu + '.csv', 'w', newline='') as f: writer = csv.writer(f) for row in datas_R2: writer.writerow(row)
def freqSummation(argList): """Takes as input an argument list containing a newick file of host and parasite trees as well as their phi mapping, duplication, transfer, and loss costs, the type of frequency scoring to be used, as well as switch and loss cost ranges for xscape scoring, and returns a file containing the list of scores for each individual reconciliation, the sum of the those scores, the total cost of those reconciliations and the number of reconciliations of those trees.""" newickFile = argList[1] D = float(argList[2]) T = float(argList[3]) L = float(argList[4]) freqType = argList[5] switchLo = float(argList[6]) switchHi = float(argList[7]) lossLo = float(argList[8]) lossHi = float(argList[9]) fileName = newickFile[:-7] f = open(fileName + "freqFile.txt", 'w') host, paras, phi = newickFormatReader.getInput(newickFile) DTL, numRecon = DP.DP(host, paras, phi, D, T, L) if freqType == "Frequency": newDTL = DTL elif freqType == "xscape": newDTL = calcCostscapeScore.newScoreWrapper(newickFile, switchLo, switchHi, lossLo, lossHi, D, T, L) elif freqType == "unit": newDTL = MasterReconciliation.unitScoreDTL(host, paras, phi, D, T, L) scoresList, reconciliation = Greedy.Greedy(newDTL, paras) totalSum = 0 for score in scoresList: totalSum += score for index in reconciliation: totalCost = 0 for key in index: if index[key][0] == "L": totalCost += L elif index[key][0] == "T": totalCost += T elif index[key][0] == "D": totalCost += D f.write(str(scoresList) + '\n') f.write(str(totalSum) + '\n') f.write(str(totalCost) + '\n') f.write(str(numRecon)) f.close()
def Reconcile(argList): """Takes command-line arguments of a .newick file, duplication, transfer, and loss costs, the type of scoring desired and possible switch and loss ranges. Creates Files for the host, parasite, and reconciliations""" fileName = argList[1] #.newick file D = float(argList[2]) # Duplication cost T = float(argList[3]) # Transfer cost L = float(argList[4]) # Loss cost freqType = argList[5] # Frequency type # Optional inputs if freqType == xscape switchLo = float(argList[6]) # Switch lower boundary switchHi = float(argList[7]) # Switch upper boundary lossLo = float(argList[8]) # Loss lower boundary lossHi = float(argList[9]) # Loss upper boundary host, paras, phi = newickFormatReader.getInput(fileName) hostRoot = cycleCheckingGraph.findRoot(host) hostv = cycleCheckingGraph.treeFormat(host) Order = orderGraph.date(hostv) # Default scoring function (if freqtype== Frequency scoring) DTLReconGraph, numRecon = DP.DP(host, paras, phi, D, T, L) print DTLReconGraph, numRecon #uses xScape scoring function if freqType == "xscape": DTLReconGraph = calcCostscapeScore.newScoreWrapper(fileName, switchLo, \ switchHi, lossLo, lossHi, D, T, L) #uses Unit scoring function elif freqType == "unit": DTLReconGraph = unitScoreDTL(host, paras, phi, D, T, L) DTLGraph = copy.deepcopy(DTLReconGraph) scoresList, rec = Greedy.Greedy(DTLGraph, paras) for n in range(len(rec)): graph = cycleCheckingGraph.buildReconciliation(host, paras, rec[n]) currentOrder = orderGraph.date(graph) if currentOrder == "timeTravel": rec[n], currentOrder = detectCycles.detectCyclesWrapper( host, paras, rec[n]) currentOrder = orderGraph.date(currentOrder) hostOrder = hOrder(hostv, currentOrder) hostBranchs = branch(hostv, hostOrder) if n == 0: newickToVis.convert(fileName, hostBranchs, n, 1) else: newickToVis.convert(fileName, hostBranchs, n, 0) # filename[:-7] is the file name minus the .newick reconConversion.convert(rec[n], DTLReconGraph, paras, fileName[:-7], n)
def Reconcile(argList): """Takes command-line arguments of a .newick file, duplication, transfer, and loss costs, the type of scoring desired and possible switch and loss ranges. Creates Files for the host, parasite, and reconciliations""" fileName = argList[1] #.newick file D = float(argList[2]) # Duplication cost T = float(argList[3]) # Transfer cost L = float(argList[4]) # Loss cost freqType = argList[5] # Frequency type # Optional inputs if freqType == xscape switchLo = float(argList[6]) # Switch lower boundary switchHi = float(argList[7]) # Switch upper boundary lossLo = float(argList[8]) # Loss lower boundary lossHi = float(argList[9]) # Loss upper boundary host, paras, phi = newickFormatReader.getInput(fileName) hostRoot = ReconciliationGraph.findRoot(host) # Default scoring function (if freqtype== Frequency scoring) DTLReconGraph, numRecon = DP.DP(host, paras, phi, D, T, L) #uses xScape scoring function # if freqType == "xscape": # DTLReconGraph = calcCostscapeScore.newScoreWrapper(fileName, switchLo, \ # switchHi, lossLo, lossHi, D, T, L) #uses Unit scoring function if freqType == "unit": DTLReconGraph = unitScoreDTL(host, paras, phi, D, T, L) DTLGraph = copy.deepcopy(DTLReconGraph) scoresList, recs = Greedy.Greedy(DTLGraph, paras) infeasible_recs = [] for rec in recs: if orderGraph.date(ReconciliationGraph.buildReconciliation(host, paras, rec)) == False: infeasible_recs.append(rec) return infeasible_recs, recs
def run(): t6.delete('1.0', END) t7.delete('1.0', END) print("value is: " + v.get()) try: score = int(t3.get("1.0", 'end-1c')) except ValueError: messagebox.showerror( "Error", "A number was not entered or there was an empty field.") try: mismatch = int(t4.get("1.0", 'end-1c')) except ValueError: messagebox.showerror( "Error", "A number was not entered or there was an empty field.") try: gap = int(t4.get("1.0", 'end-1c')) except ValueError: messagebox.showerror( "Error", "A number was not entered or there was an empty field.") #timer t0 = time.time() #picking an algorithm to run if (v.get() == "Needleman Wunsch"): temp = bru.brutForce(file1, file2, score, mismatch, gap) #print(bio.score) t5.insert(END, temp[0]) t6.insert(END, temp[1]) if (v.get() == "Brute Force"): temp = bru.brutForce(file1, file2, score, mismatch, gap) #print(bio.score) t5.insert(END, temp[0]) t6.insert(END, temp[1]) if (v.get() == "Divide and Conquer"): temp = DV.DivideConquer(file1, file2, score, mismatch, gap) t5.insert(END, temp[0]) t6.insert(END, temp[1]) if (v.get() == "Random"): #file 1, file 1 , match, mismatch, gap temp = r.Random(file1, file2, score, mismatch, gap) t5.insert(END, temp[0]) t6.insert(END, temp[1]) if (v.get() == "Greedy"): temp = g.Greedy(file1, file2, score, mismatch, gap) t5.insert(END, temp[0]) t6.insert(END, temp[1]) t1 = time.time() total = t1 - t0 t7.insert(END, total)
import Read import Greedy import time Str = '' for i in range(1, 72): Str += 'p' + str(i) + ':\n' FacilityNum, CustomerNum, Capacity, OpeningCost, Demand, Assignment = Read.Read(i) test = Greedy.Greedy(FacilityNum, CustomerNum, Capacity, OpeningCost, Demand, Assignment) time_start = time.time() Str += test.start() time_end = time.time() Str += "Time cost: " + str(time_end - time_start) + 's\n\n' with open(r'Result.txt', 'w') as file: file.write(Str) file.close()