def read_dxf(self): t1 = sec() print(out(outpth, 'Reading DXF geometry...'), end='\r') dxffile = dxf.readfile('{}.dxf'.format(self.title)) print(out(outpth, '[OK] DXF geometry imported ({} s)'.format(round(sec() - t1, 2)))) beams = [] columns = [] x = 0 t = len(dxffile.entities) # assign LINES elements to columns or beams tables for ent in dxffile.entities: progressBar('Converting lines', x, t) if ent.dxftype == 'LINE': if ent.start[2] == ent.end[2]: beams.append(ent) else: columns.append(ent) x += 1 print(out(outpth, '[OK] Lines converted ')) # assign 3DFACE elements to shells table shells = [ent for ent in dxffile.entities if ent.dxftype == '3DFACE'] return {'b': beams, 'c': columns, 's': shells, 'f': []}
def showResult(): word = [] dateStart = asc(loc(sec())) if request.method == 'POST': word = request.form['word'].lower().split(" ") result = [] timeStart = sec() for i in word: temp = search(i) timeEnd = sec() timeSeq = timeEnd - timeStart inter2 = [] timeStart = sec() for i in word: inter = [] inter.append(i) x = list(intersect(inter)) if len(inter) != 0: inter2.append(x) else: inter2.append("No intersect because word not found!!") timeEnd = sec() timePos = timeEnd - timeStart timeStart = sec() for i in word: temp = binary.get(i) timeEnd = sec() timeBi = timeEnd - timeStart timeStart = sec() for i in word: temp = hashTable.search(i) if temp != None: result.append(Result(i, temp.value, temp.file)) else: result.append(Result(i)) timeEnd = sec() timeHash = timeEnd - timeStart dateEnd = asc(loc(sec())) ##### ส่งค่าไปโชว์ ###### return render_template('result.html', result=result, inter=inter2, timeSeq=timeSeq, timeBi=timeBi, timeHash=timeHash, dateStart=dateStart, dateEnd=dateEnd, size=hashTable.size)
def __init__(self, t_end, title, fire_type, fuelconfig): self.t_end = t_end # simulation duration time self.title = title # simulation title self.f_type = fire_type # type of fire self.fire_coords = [] # to export to Single class print(out(outpth, 'Reading fuel configuration files...'), end='\r') t = sec() if fuelconfig == 'stp': self.fuel = fires.Fuel(title).read_fuel() # import fuel from STEP and FUL config files elif fuelconfig == 'obj': self.fuel = fires.FuelOBJ(title).read_fuel() # import fuel from OBJ and FUL config files else: self.fuel = rcsv('{}.ful'.format(title)) print(out(outpth, '[OK] Fuel configuration imported ({} s)'.format(round(sec() - t, 2))))
def generate_sim(data_path): t = sec() chdir(config['results_path']) data_set = rcsv(data_path) for i, r in data_set.iterrows(): if r['profile'] != r['profile']: with open('{0}\{0}.err'.format(r['ID']), 'w') as err: mess = '[WARNING] There are no elements above the fire base in scenario {}'.format(r['ID']) err.write('{}\nMax element temperature in te scenario is equal to the ambient temperature'.format(mess)) print(out(outpth, mess)) continue chdir(str(r['ID'])) MultiT2D(config['time_end']).prepare(r) chdir('..') return '[OK] {} simulation files created ({} s)'.format(len(data_set.index), round(sec() - t, 2))
def showResult(): word=[] dateStart = asc(loc(sec())) if request.method=='POST': word=request.form['word'].lower().split(" ") result=[] result2=[] timeStart = sec() for i in word: temp = search(i) timeEnd = sec() timeSeq = timeEnd - timeStart # timeStart = sec() # for i in word: # temp = intersect(i) # if temp != None: # result2.append(Result2(i,temp.value,temp.file)) # else: # result2.append(Result2(i)) # timeEnd = sec() # timePos = timeEnd - timeStart timeStart = sec() for i in word: temp = binary.get(i) timeEnd = sec() timeBi = timeEnd - timeStart timeStart = sec() for i in word: temp = hashTable.search(i) if temp != None: result.append(Result(i,temp.value,temp.file)) else: result.append(Result(i)) timeEnd = sec() timeHash = timeEnd - timeStart dateEnd = asc(loc(sec())) return render_template('result.html',result=result,timeSeq=timeSeq,timeBi=timeBi,timeHash=timeHash,dateStart=dateStart,dateEnd=dateEnd,size=hashTable.size)
def generate_set(n, title, t_end, fire_type, config_path, results_path, fuelconfig): def create_df(): return df(columns=('ID', 'element_type', 'time', 'x_f', 'y_f', 'z_f', 'x_s', 'y_s', 'z_s', 'distance', 'ceiling_lvl', 'profile', 'u_x', 'u_y', 'u_z', 'HRRPUA', 'alpha')) # append DataFrame to CSV file def df2csv(df, path='{}\{}_set.csv'.format(results_path, title)): try: with open(path): header = False except FileNotFoundError: header = True df.to_csv(path, mode='a', header=header) # create locafi.txt file def locafi(row, fire): chdir(config_path) # create simulation directory try: makedirs('{}\{}'.format(results_path, str(row['ID']))) except FileExistsError: pass chdir('{}\{}'.format(results_path, str(row['ID']))) # create locafi.txt fire file gen.locafitxt((row['x_f'], row['y_f'], row['z_f']), *fire, row['ceiling_lvl']) chdir(config_path) csvset = create_df() df2csv(csvset) simid_core = int(sec()) if simid_core % 2 != 0: # check if odd simid_core += 1 print(out(outpth, '[OK] User configuration imported')) sing = Single(title) gen = Generator(t_end, title, fire_type, fuelconfig) t = sec() # draw MC input samples for i in range(0, int(n) * 2, 2): progressBar('Preparing fire scenarios', i, n * 2) fire = list(gen.fire()) # draw fire # draw localization of the most exposed beam csvset.loc[i] = [simid_core + i, 'b', ctime(sec())] + sing.generate(gen.fire_coords, 'b') + fire[2:] locafi(csvset.loc[i], fire[:2]) # generate locafi.txt # draw localization of the most exposed column csvset.loc[i + 1] = [simid_core + i + 1, 'c', ctime(sec())] + sing.generate(gen.fire_coords, 'c') + fire[2:] locafi(csvset.loc[i + 1], fire[:2]) # generate locafi.txt # write rows every 8 records (4 fire scenarios) if (i + 2) % 8 == 0: df2csv(csvset) del csvset csvset = create_df() # write unwritten rows try: df2csv(csvset) del csvset except ValueError: pass return '[OK] {} scenarios (2 simulations each) generated ({} s)'.format(int(n), round(sec() - t, 2))
for i, r in data_set.iterrows(): if r['profile'] != r['profile']: with open('{0}\{0}.err'.format(r['ID']), 'w') as err: mess = '[WARNING] There are no elements above the fire base in scenario {}'.format(r['ID']) err.write('{}\nMax element temperature in te scenario is equal to the ambient temperature'.format(mess)) print(out(outpth, mess)) continue chdir(str(r['ID'])) MultiT2D(config['time_end']).prepare(r) chdir('..') return '[OK] {} simulation files created ({} s)'.format(len(data_set.index), round(sec() - t, 2)) if __name__ == '__main__': outpth = getcwd() + '\mc.log' with open(outpth, '+w') as f: f.write(ctime(sec()) + '\n') print(out(outpth, 'Reading user configuration...'), end='\r') config = user_config(sys.argv[1]) # import multisimulation config try: makedirs(config['results_path']) # create results directory except FileExistsError: pass chdir(config['config_path']) # change to config print(out(outpth, generate_set(config['max_iterations'], config['case_title'], config['time_end'], config['fire_type'], config['config_path'], config['results_path'], config['fuel']))) print(out(outpth, generate_sim('{}\{}_set.csv'.format(config['results_path'], config['case_title']))))
print("Put in your Memo:") input() elif jotters == 3: print("put in your memo:") input() else: print("Invalid Syntax") print("Do you want to save Your Memo") print("1.Yes \n 2.No\n") answer = int(input()) if answer == 1: print(".......Saving") print("saved") if answer == 2: print("Exiting....") time.sec(5) else: print("invalid input") if jotter == 3: jotter3 = str(input("Put in your memo:")) print("Click to save") input() jotter3Demo = [jotter3] print("1.Save \n 2.Dont Save") veris = int(input()) if veris == 1: print("Saved") elif veris == 2: jotter3Demo.pop() print("Which jotter do u want to use:")
def result(): if request.method == "POST": result = request.args word = request.form["Search"] list_text = [] list_text = splitword(word) count = 0 for i in list_text: if i in word_list.keys(): count += 1 if word.count('*') != 0: global list_fil urlfil = [] dict_fil = {} list_fil = [] list_filtered = splitword(word) print(list_filtered) for i in list_filtered: filtered = fnmatch.filter(word_list, i) if len(filtered) > 0: for j in filtered: list_fil.append(j) print(list_fil) return render_template("test.html", listword=list_fil) global countin global countpo global counthash global counttree countin = 0 countpo = 0 counthash = 0 counttree = 0 ###--inverted---### timeStart = sec() dictin = {} urlin1 = [] if count == len(list_text): for i in list_text: urlin = [] text = checkword(i) for j in text[1]: urlin.append(df['url'][j]) urlin1.append(urlin) urlin2 = intersechash(urlin1) dictin[word] = urlin2 else: dictin[word] = ['NOT FOUND'] timeEnd = sec() timein = timeEnd - timeStart ###---position---### dictpo = {} url = [] timeStart = sec() if len(list_text) > 1 and count == len(list_text): listindex = checkposition(list_text, count) if listindex != "NOT FOUND": for i in listindex: url.append(df['url'][i]) dictpo[word] = url else: dictpo[word] = ["NOT FOUND"] elif len(list_text) == 1 and list_text[0] in word_list.keys(): for i in range(0, len(word_list[list_text[0]])): countpo += 1 url.append(df['url'][word_list[list_text[0]][i][0]]) dictpo[word] = url else: dictpo[word] = ["NOT FOUND"] timeEnd = sec() timepo = timeEnd - timeStart ####--hash--## urlhash = [] h = HashMap() dicthash = {} for j in wordhash: h.add(j, wordhash[j]) timeStart = sec() if count == len(list_text): for i in list_text: if i in wordhash: urlhash.append(list(h.get(i))) urlhashin = intersechash(urlhash) dicthash[word] = urlhashin else: dicthash[word] = ["NOT FOUND"] timeEnd = sec() timehash = timeEnd - timeStart ##--tree--### global textinput dicttree = {} lendic = len(wordtree) mid = lendic // 2 for i, w in enumerate(wordtree): #counttree+=1 if i == mid: datanode = chdic(wordtree, w) r = Node(datanode) for i, w in wordtree.items(): #counttree+=1 array = chdic(wordtree, i) insert(r, Node(array)) timeStart = sec() if count == len(list_text): for i in list_text: textinput = i inorder(r) link = intersec(arrtree) dicttree[word] = list(link) else: dicttree[word] = ["NOT FOUND"] timeEnd = sec() timetree = timeEnd - timeStart return render_template("result.html", resultin=dictin, resultin1=countin, retimein=timein, resultpo=dictpo, resultpo1=countpo, retimepo=timepo, resulthash=dicthash, resulthash1=counthash, retimehash=timehash, resulttree=dicttree, resulttree1=counttree, retimetree=timetree, word=word)
def showResult(): word = [] dateStart = asc(loc(sec())) if request.method == 'POST': text = request.form['word'] word = request.form['word'].lower().split(" ") ######------Time Check-------############### result = [] ###---TimeInvert---### timeStart = sec() for i in word: temp = search(i) timeEnd = sec() timeInV = timeEnd - timeStart ###---TimePosition---### timeStart = sec() for i in word: temp = search(i) timeEnd = sec() timePos = timeEnd - timeStart ###---TimeIntersect---### timeStart = sec() inter = intersect(word) timeEnd = sec() timeInS = timeEnd - timeStart ###---TimeBinary---### timeStart = sec() for i in word: temp = binary.get(i) timeEnd = sec() timeBi = timeEnd - timeStart ###---TimeHash---### timeStart = sec() for i in word: temp = hashTable.search(i) if temp != None: result.append(Result(i, temp.value, temp.file)) else: result.append(Result(i)) timeEnd = sec() timeHash = timeEnd - timeStart dateEnd = asc(loc(sec())) return render_template('result.html', result=result, word=text, inter=inter, timePos=timePos, timeInV=timeInV, timeInS=timeInS, timeBi=timeBi, timeHash=timeHash, dateStart=dateStart, dateEnd=dateEnd, size=hashTable.size)
def result (): if request.method == "POST": result = request.args word = request.form["Search"] word2 = request.form["Search"] list_text =[] list_text = splitword(word) with open("imdb_labelled.txt", "r") as text_file: lines = text_file.read().split("\n") with open("amazon_cells_labelled.txt", "r") as text_file: lines = text_file.read().split("\n") with open("yelp_labelled.txt", "r") as text_file: lines = text_file.read().split("\n") newLines = [line.split("\t") for line in lines if len(line.split("t")) == 2 and line.split("\t")[1] != ""] train_documents = [line[0] for line in newLines] train_labels = [int(line[1]) for line in newLines] count_vectorizer = CountVectorizer(binary="true") train_documents = count_vectorizer.fit_transform(train_documents) classifier = BernoulliNB().fit(train_documents, train_labels) sss = [] def predictionOutput(sentence): p = "Positive" n = "Negative" prediction = classifier.predict(count_vectorizer.transform([sentence])) if(prediction[0] == 1): sss.append(p) elif (prediction[0] == 0): sss.append(n) predictionOutput(word2) str1 = ''.join(sss) count =0 for i in list_text: if i in word_list.keys(): count +=1 if word.count('*') != 0: global list_fil urlfil =[] dict_fil={} list_fil = [] list_filtered = splitword(word) print(list_filtered) for i in list_filtered: filtered = fnmatch.filter(word_list,i) print(filtered) if len(filtered) >0: for j in filtered: list_fil.append(j) print(list_fil) return render_template("test.html",listword = list_fil , word2 = word2) global countin global countpo global counthash global counttree countin = 0 countpo = 0 counthash = 0 counttree = 0 ###--inverted---### timeStart = sec() dictin ={} urlin1 =[] if count == len(list_text): for i in list_text: urlin =[] text = checkword(i) for j in text[1]: urlin.append(df['url'][j]) urlin1.append(urlin) urlin2 = intersechash(urlin1) dictin[word] = urlin2 else: dictin[word] = ['NOT FOUND'] timeEnd = sec() timein = timeEnd - timeStart ###---position---### dictpo={} url=[] timeStart = sec() if len(list_text) > 1 and count == len(list_text): listindex = checkposition(list_text,count) if listindex != "NOT FOUND": for i in listindex: url.append(df['url'][i]) dictpo[word] = url else: dictpo[word]=["NOT FOUND"] elif len(list_text) ==1 and list_text[0] in word_list.keys(): for i in range(0,len(word_list[list_text[0]])): countpo+=1 url.append(df['url'][word_list[list_text[0]][i][0]]) dictpo[word] = url else: dictpo[word]=["NOT FOUND"] timeEnd = sec() timepo = timeEnd - timeStart ####--hash--## urlhash =[] h= HashMap() dicthash ={} for j in wordhash: h.add(j,wordhash[j]) timeStart = sec() if count == len(list_text): for i in list_text: if i in wordhash: urlhash.append(list(h.get(i))) urlhashin = intersechash(urlhash) dicthash[word] = urlhashin else: dicthash[word] = ["NOT FOUND"] timeEnd = sec() timehash = timeEnd - timeStart ##--tree--### global textinput dicttree={} lendic = len(wordtree) mid = lendic//2 for i,w in enumerate(wordtree): #counttree+=1 if i == mid: datanode = chdic(wordtree,w) r = Node(datanode) for i,w in wordtree.items(): #counttree+=1 array = chdic(wordtree,i) insert(r,Node(array)) timeStart = sec() if count == len(list_text): for i in list_text: textinput = i inorder(r) link = intersec(arrtree) dicttree[word] = list(link) else: dicttree[word] = ["NOT FOUND"] timeEnd = sec() timetree = timeEnd - timeStart return render_template("result.html",resultin = dictin ,resultin1=countin,retimein = timein,resultpo=dictpo,resultpo1=countpo,retimepo=timepo,resulthash=dicthash,resulthash1=counthash,retimehash = timehash,resulttree=dicttree,resulttree1=counttree,retimetree=timetree, word = word, str1 = str1)
def __init__(self, filename: str): self.terminal = stdout self.log = open('{}_{}'.format(int(sec()), filename), 'w')
def result(): word = [] dateStart = asc(loc(sec())) if request.method == 'POST': text = request.form['word'] word = request.form['word'].lower().split(" ") #Wildcard if text.count("*") != 0: global list_fil urlfil = [] dict_fil = {} list_fil = [] list_filtered = splitword(text) for i in list_filtered: filtered = fnmatch.filter(mydict2, i) if len(filtered) > 0: for j in filtered: list_fil.append(j) return render_template('wildcard.html', listword=list_fil) result = [] ###---TimeInvert---### timeStart = sec() for i in word: temp = hashTable.search(i) timeEnd = sec() dateEnd = asc(loc(sec())) timeInV = timeEnd - timeStart ###---TimePosition---### timeStart = sec() for i in word: temp = hashTable.search(i) timeEnd = sec() dateEnd = asc(loc(sec())) timePos = timeEnd - timeStart ###---TimeIntersect---### timeStart = sec() inter = intersect(word) if not inter: inter = "No Intersect" timeEnd = sec() dateEnd = asc(loc(sec())) timeInS = timeEnd - timeStart ###---TimeBinary---### timeStart = sec() for i in word: temp = binary.get(i) timeEnd = sec() dateEnd = asc(loc(sec())) timeBi = timeEnd - timeStart ###---TimeHash---### timeStart = sec() for i in word: temp = hashTable.search(i) if temp != None: result.append(Result(i, temp.value, temp.file)) else: result.append(Result(i)) timeEnd = sec() dateEnd = asc(loc(sec())) timeHash = timeEnd - timeStart return render_template('result.html', result=result, word=text, inter=inter, timePos=timePos, timeInV=timeInV, timeBi=timeBi, timeHash=timeHash, dateStart=dateStart, dateEnd=dateEnd, size=hashTable.size)