def feasibilitycheck(solution): readfile.read(call) feasible = readfile.check_feasibility(solution) print("Feasible: ", feasible) #feasibilitycheck()
def exam(calls): for i in range(len(calls)): score = 0 readfile.read(calls[i]) start = dt.datetime.now() before = readfile.gen_dummy_solution() score_before = readfile.objective_function(before) #------- after = algorithms.simulated_annealing_new(before, call_max_time[i]) #------- score = readfile.objective_function(after) end = dt.datetime.now() time = (end - start).total_seconds() improvement = 100 * ((score_before - score) / score_before) print("Stats for ", calls[i]) print(f"Objective: {score:.2f}") print(f"Improvement%: {improvement:.2f}") print(f"Running time: {time:.2f}") print(f"Solution: {after}") print("------------------------------------------------------------")
def results(filename='Call_7_Vehicle_3.txt'): readfile.read(filename) score_sum = 0 before = readfile.gen_dummy_solution() score_before = readfile.objective_function(before) best = before best_score = score_before time_sum = 0 iterations = 10 for _ in range(iterations): start = dt.datetime.now() before = readfile.gen_dummy_solution() score_before = readfile.objective_function(before) #---------- after = algorithms.simulated_annealing_new(before, call_max_time[4]) #---------- after_score = readfile.objective_function(after) end = dt.datetime.now() time = (end - start).total_seconds() score_sum += after_score time_sum += time if after_score < best_score: best = after best_score = after_score print("denne improvement: ", 100 * ((score_before - after_score) / score_before)) average_score = score_sum / iterations average_time = time_sum / iterations improvement = 100 * ((score_before - average_score) / score_before) improvement_best = 100 * ((score_before - best_score) / score_before) print(f"Avrage objective: {average_score:.2f}") print(f"Best objective: {best_score:.2f}") print(f"Improvement%: {improvement:.2f}") print(f"Improvement best%: {improvement_best:.2f}") print(f"Running time: {average_time:.2f}") print(f"Best solution: {best}")
def create_data(filename): # Input types json and log accepted. fn = filename.split('.') fout = fn[0] + '_report.txt' if fn[1] == 'json': with open(filename) as json_data: data = json.load(json_data) elif fn[1] == 'log': data = read(filename) else: sys.exit("File not found") return data, fout
def __init__(self, *args): self.table = [[] for i in range(101)] self.max_num_of_collisions = 0 self.all_num_of_collisions = 0 temparr = read('./ContactList.txt') for i in temparr: h = self.__gethash__(i['name']) if self.table[h] != []: self.all_num_of_collisions += 1 self.table[h].append(i) if len(self.table[h]) > self.max_num_of_collisions: self.max_num_of_collisions = len(self.table[h])
def siman(filename): distances = readfile.read(filename) # reading the file from the readfile.py size = (len(distances)) e = math.e cities = np.zeros((size),np.int) #initiate the cities for i in range(0,size): cities[i] = i+1 current = np.zeros((size+1),np.int) current[0] = 0 current[1:] = np.random.permutation(cities) #these are variables for the other scheduling algorithms schedule = [] schedule.append(500) #this is user defined a = 0.95 T = 10000 best_fit = np.zeros((size+1),np.int) best_fit[0] = 9999999 start = time.time() for t in range (1,10000000,4): #schedue.append(schedule[t-1]/beta) #T = a * T T = 1/(math.log(1+t)) succ = np.zeros((size+1)) temp = np.zeros((size+1)) if T <0.1: break temp[1:] = current[1:] succ[1:] = permutate(temp[1:]) current = fitness(current,distances) succ = fitness(succ,distances) difference = succ[0] - current[0] if difference >= 0: current = succ else: if r.random() < (e**(((-(difference))/T))): current[:] = succ[:] if current[0] < best_fit[0]: best_fit = current end = time.time() print (end-start) # print ("The simulated annealing has produced a tour of lenght: " , current[0]) # print ("The actual tour is: ", current[1:]) return current # best_fit
def worker(self, cpu, tmpdir, sn): print 'Process: '+str(cpu)+' started.' # Initiate metadata tracking meta = metadata.meta_check() cf = cfscan.cf() # Initialise counters kk = 0 fileErr = [] # Create tmp log for this process tmplog = output.tmplog(cpu, tmpdir) ''' ------------------------------------------------- While the fileList queue still has files: check ------------------------------------------------- ''' while self.fileQueue.empty() == False: try: # Try to get a file off the file queue, if successful continue ncfile = self.fileQueue.get() print "{:<14}{:^5}{:<15}{:>15}{:^5}".format('[PROCESS: **', cpu, ' **]', 'Checking file: ', kk+1) tmplog.header(ncfile) # Open/read file and extract global attributes, netCDF format #gatts, ncformat, conv, vars = readfile.read(ncfile) f = readfile.read(ncfile) tmplog.meta(f.atts, f.ncformat, f.conv) # Initialise CF and ACDD lists on first loop then check # if variables the same between each file on subsequent loops. cf.newvars(f.vars) # ACDD Compliance Check # Check list against global file attributes, track sum of missing acdd attrs meta.acddCheck(f.atts) # Also keep track of file netCDF format and conventions used (if any) meta.fileFormat(f.ncformat) meta.conventions(f.conv) ''' ------------------------------------------------------- Wrapper for CF-Convention 'cfchecks.py' -------------------------------------------------------''' # Run the 'cfchecks.py' script on the individual file, redirect output to tmpfile tmpfile = tmpdir+'/tmp'+str(cpu)+'.out' # Check for new standard name table first if not sn: standardName=STANDARDNAME else: standardName=sn # This line initialises cfchecks.py inst = CFChecker(uploader=None, useFileName="yes", badc=None, coards=None, cfStandardNamesXML=standardName, cfAreaTypesXML=areaTypes, udunitsDat=udunits, version=version) # This part executes the checker part of cfchecks.py and redirects output to tmpfile # If for any reason a file can't be read, keep track of that rc = [] with redirected(stdout=tmpfile): try: inst.checker(ncfile) except: rc = 0 if rc == 0: print 'Unexpected error: ', sys.exc_info()[0], sys.exc_info()[1] print '\nFILE: '+ncfile+' [SKIPPING FILE]\n' fileErr.append(ncfile) cf.wrapper(tmplog, tmpfile) print >>tmplog.fn, ' \n' kk += 1 except: break # If results from this process, add to local result queue #try: self.meta.put(meta) self.cf.put(cf) self.fileErr.put(fileErr) print 'Process: '+str(cpu)+' completed.'
import svm import readfile from sklearn.metrics import precision_recall_fscore_support as score from sklearn.metrics import accuracy_score # =============================================================== print("-- KNN Classifier --") filename = 'telepathology2.csv' reader = readfile.read(filename) X_train, y_train, X_test, y_test = reader.get_data() # training a KNN classifier from sklearn.neighbors import KNeighborsClassifier knn = KNeighborsClassifier(n_neighbors=7).fit(X_train, y_train) # creating a confusion matrix y_pred = knn.predict(X_test) accuracy = accuracy_score(y_test, y_pred) precision, recall, fscore, support = score(y_test, y_pred) print('accuracy = %.2f' % (accuracy * 100), '%') print('Precision:', precision) print('Recall:', recall) print('F-Measure:', fscore) print('Support-score:', support)
def main(): grid, pins = readfile.read() path.maze(grid, pins) drawing.click()
import hmm import readfile from sklearn.metrics import accuracy_score # =============================================================== import hmm import readfile from sklearn.metrics import precision_recall_fscore_support as score from sklearn.metrics import accuracy_score # =============================================================== filename = 'telepathology1.csv' # Get HMM parameters, train_data (for training) and test_data (for decoding) readFile = readfile.read(filename) (states, symbols, trans_prob, emis_prob, x_train, x_test, y_test) = readFile.get_data() model = hmm.HmmScaled(states, symbols, trans_prob, emis_prob, 'init_model.json') # Train the HMM model.train(x_train) model.check_prob() # Check its probabilities model.print_hmm() # Print it out print('') # ------------------------------------------------------------- # Find the best hidden state sequence Q for the given observation sequence - (DECODING) # Given an observation at time t, predict the next state at t+1
def worker(self, cpu, tmpdir, sn): print 'Process: ' + str(cpu) + ' started.' # Initiate metadata tracking meta = metadata.meta_check() cf = cfscan.cf() # Initialise counters kk = 0 fileErr = [] # Create tmp log for this process tmplog = output.tmplog(cpu, tmpdir) ''' ------------------------------------------------- While the fileList queue still has files: check ------------------------------------------------- ''' while self.fileQueue.empty() == False: try: # Try to get a file off the file queue, if successful continue ncfile = self.fileQueue.get() print "{:<14}{:^5}{:<15}{:>15}{:^5}".format( '[PROCESS: **', cpu, ' **]', 'Checking file: ', kk + 1) tmplog.header(ncfile) # Open/read file and extract global attributes, netCDF format #gatts, ncformat, conv, vars = readfile.read(ncfile) f = readfile.read(ncfile) tmplog.meta(f.atts, f.ncformat, f.conv) # Initialise CF and ACDD lists on first loop then check # if variables the same between each file on subsequent loops. cf.newvars(f.vars) # ACDD Compliance Check # Check list against global file attributes, track sum of missing acdd attrs meta.acddCheck(f.atts) # Also keep track of file netCDF format and conventions used (if any) meta.fileFormat(f.ncformat) meta.conventions(f.conv) ''' ------------------------------------------------------- Wrapper for CF-Convention 'cfchecks.py' -------------------------------------------------------''' # Run the 'cfchecks.py' script on the individual file, redirect output to tmpfile tmpfile = tmpdir + '/tmp' + str(cpu) + '.out' # Check for new standard name table first if not sn: standardName = STANDARDNAME else: standardName = sn # This line initialises cfchecks.py inst = CFChecker(uploader=None, useFileName="yes", badc=None, coards=None, cfStandardNamesXML=standardName, cfAreaTypesXML=areaTypes, udunitsDat=udunits, version=version) # This part executes the checker part of cfchecks.py and redirects output to tmpfile # If for any reason a file can't be read, keep track of that rc = [] with redirected(stdout=tmpfile): try: inst.checker(ncfile) except: rc = 0 if rc == 0: print 'Unexpected error: ', sys.exc_info( )[0], sys.exc_info()[1] print '\nFILE: ' + ncfile + ' [SKIPPING FILE]\n' fileErr.append(ncfile) cf.wrapper(tmplog, tmpfile) print >> tmplog.fn, ' \n' kk += 1 except: break # If results from this process, add to local result queue #try: self.meta.put(meta) self.cf.put(cf) self.fileErr.put(fileErr) print 'Process: ' + str(cpu) + ' completed.'
import math with open('Boy-Girl-3424-3424.json', encoding='utf-8-sig') as data_file: data = json.load(data_file, encoding='utf-8-sig') def list2freqdict(mylist): mydict = dict() for ch in mylist: mydict[ch] = mydict.get(ch, 0) + 1 return mydict #handle stopword stopword = [] pttstop = readfile.read('ptt_words.txt') speaial = readfile.read('specialMarks.txt') chinesestop = readfile.read('chinese_sw.txt') stopword = pttstop + speaial + chinesestop num = len(data["articles"]) #for i in range(num): for i in range(num): response = [] contentmain = [] datatmp = data["articles"][i] #print(i,data["articles"][i]["article_title"]) title = data["articles"][i]["article_title"] content = data["articles"][i]["content"] #print('內文') #print(data["articles"][i]["content"])
import time import math import random as r import numpy as np import readfile import sys distances = readfile.read() size = len(distances) def findduplicates(tour): missing = [] seen = [] for i in range(1, len(tour) + 1): if (i in tour) == False: missing.append(i) counter = len(missing) for city in range(0, len(tour)): if counter == 0: break if tour[city] in seen: tour[city] = missing[counter - 1] counter -= 1 else: seen.append(tour[city]) return tour def fitness(population, distances): for i in range(len(population)):
#Suppoting programs import readfile import timefunction as tm import order #Library from threading import Thread #Variables order_time = [] threads = [] #Attempt to read the input file try: f = readfile.read(input('Enter the path: ')) number_of_rows = len(list(f)) except: print("Sorry, the input file couldn't be opened.") exit(0) #Place Order def placeOrder(): for cell in f: #Place order #Cell number starts from zero order.place(cell[5],cell[7], cell[4], 123, 1234) #Records the order time in timestamp format order_time.append(tm.now()) print("\n") print(order_time)
def genetic(filename): distances = readfile.read(filename) size = len(distances) mutateprob = 0.4 #probability that the child is mutated generations = 400 #number of populations created population_size = 250 #size of tours that each population has population = np.zeros((population_size, size + 1), np.int64) # If elitism is true, it means that the fittest tour of each population will be moved to the new Population in order to improve performance of the genetic algorithm. The tour will be swapped with a random chosen tour but we can also find the most unfit individual and swap that over with the fittest for better results. However, in that case you are interfiring with the random selection and probability. elitism = True #initiate the population cities = np.zeros((size), np.int64) for i in range(size): cities[i] = (i + 1) # randomly shuffle all the cities of the initial population for i in range(population_size): r.shuffle(cities) for j in range(size): population[i][j + 1] = cities[j] start = time.time() while True: #initiate the new population, calculate the fitness and probability of the old one newP = np.zeros((population_size, size + 1), np.int) population = fitness(population, distances) probs = calculateprob(population) for i in range(len(population)): # initiate the parents newX = np.zeros((size + 1), np.int) newY = np.zeros((size + 1), np.int) #choose the parents based on the probabilities x = np.random.choice(len(population), p=probs) y = np.random.choice(len(population), p=probs) #choose a random position to split the tours crossover = r.randint(1, size - 1) #add the prefixes to the parents newX[1:crossover + 1] = population[x][1:crossover + 1] newY[1:crossover + 1] = population[y][1:crossover + 1] #add the suffixes to the opposite parents newX[crossover + 1:size + 1] = population[y][crossover + 1:size + 1] newY[crossover + 1:size + 1] = population[x][crossover + 1:size + 1] #replace duplicate cities with missing cities so that a tour exist newX[1:] = findduplicates(newX[1:]) newY[1:] = findduplicates(newY[1:]) #find fitness of children and choose the fittest one (the one with the smallest length) children = fitness((newX, newY), distances) if children[0][0] < children[1][0]: z = children[0] else: z = children[1] #mutate the child z with fixed probability if r.random() < mutateprob: city1 = np.random.choice(1, len(z)) city2 = np.random.choice(1, len(z)) temp = z[city1] z[city1] = z[city2] z[city2] = temp newP[i][:] = z #if elitism is set to true then it picks a random position to place the elite in the new population if elitism == True: position = r.randint(0, len(population) - 1) elit = find_fittest(population) newP[position][:] = population[elit[1]][:] population = newP if generations == 0: break else: generations -= 1 end = time.time() print("Total Time Taken: ", end - start) best_tour = find_fittest(population) print("Best tour that was found has length: ", best_tour[0]) pointer = best_tour[1] print("The actual tour is: ", population[pointer][1:]) final = population[pointer] return final
import readfile import gan import utils import random import hmm import time import numpy as np from matplotlib import pyplot from sklearn.metrics import classification_report util = utils.Utils() # Read and normalize dataset according the model reader = readfile.read('telepathology1.csv') trainX, trainY, testX, testY = reader.get_data() # Define GAN parameters epochs = len(trainX) latent_dim = 1 feature_num = 1 batch_size = 1 # Build GAN gan = gan.GAN(latent_dim, feature_num) gan.load_data(trainX, trainY) # Train GAN start = time.time() generator = gan.train(epochs, batch_size) stop = time.time() print("Training time: %.2f s" % (stop - start))
content_new = open("download/" + new, 'r', encoding='latin-1').read() content_old = open("download/" + old, 'r', encoding='latin-1').read() pos_new = content_new.find("Time8") pos_old = content_old.find("Time8") i = 0 while (content_new[pos_new + i] == content_old[pos_old + i]): #check if 30 is a proper flag if (i >= 30): return 0 i += 1 return 1 csv_list = [] #need a initial file with "Time8" filetxt = open('filename_new.txt', 'r') newfile = filetxt.readline()[:-1] oldfile = filetxt.readline()[:-1] if (compareTime( newfile, oldfile)): #need to be implement by read the content of the file os.remove("download/" + oldfile) csv_list = readfile.read("download/" + newfile) filetxt.close() #filname from bash script here ^^^^^^^ print("update success") add_csv(csv_list) else: print("no update") os.remove("download/" + oldfile) filetxt.close()