def make_Histogram(self): self.read_table() functions.process(self.dispData, self.dicData) self.make_CorrFigs() self.make_TMSFig() on = self.dicData['hdf5_on'] # this one contains all the histogram axis res = self.dicData['res'] # this contains the calculation results fig1 = Figure(facecolor='white', edgecolor='white') ax1 = fig1.add_subplot(2, 2, 1) ax2 = fig1.add_subplot(2, 2, 2) ax3 = fig1.add_subplot(2, 2, 3) ax4 = fig1.add_subplot(2, 2, 4) ax1.imshow(res.IQmapM_avg[0], interpolation='nearest', origin='low', extent=[on.xII[0], on.xII[-1], on.yII[0], on.yII[-1]], aspect='auto') ax2.imshow(res.IQmapM_avg[1], interpolation='nearest', origin='low', extent=[on.xQQ[0], on.xQQ[-1], on.yQQ[0], on.yQQ[-1]], aspect='auto') ax3.imshow(res.IQmapM_avg[2], interpolation='nearest', origin='low', extent=[on.xIQ[0], on.xIQ[-1], on.yIQ[0], on.yIQ[-1]], aspect='auto') ax4.imshow(res.IQmapM_avg[3], interpolation='nearest', origin='low', extent=[on.xQI[0], on.xQI[-1], on.yQI[0], on.yQI[-1]], aspect='auto') fig1.tight_layout() ax1.set_title('IIc') ax2.set_title('QQc') ax3.set_title('IQc') ax4.set_title('QIc') self.update_page_1(fig1) # send figure to the show_figure terminal self.read_table()
def test_one_file_processing(conn, dmp_file_path, date_stamp): # get station details [aws_id, scm] = functions.get_station_details(conn, dmp_file_path) # process the file functions.process(dmp_file_path) # compare the processed file's results to the test results a = get_results_from_tbls(conn, aws_id, 'tbl_15min_test', date_stamp) b = get_results_from_tbls(conn, aws_id, 'tbl_data_minutes', date_stamp) return results_are_same(compare_table_results(a, b))
def main(): """Display HTML template and retrieve user input from locally hosted site, before disaggregating values at arguments for process() function""" url = request.args.get('url') text_type = request.args.get('text_type') choice = f"{url}, {text_type}" choice_list = [x.split() for x in choice.split(',')] if not url: print("ERROR: No URL entered") else: process(str(choice_list[0][0]), str(choice_list[1][0])) return render_template("form.html", choice=choice)
def main(dictionary, tfidf_model, tfidf_corpus, matrix_sim, lsi_matrix, lsi_model, paragraphs): # Processes the query with the same function as used in task 1, and convert to BOW representation. #q = "What is the function of money?" q = "How taxes influence Economics?" q = functions.process(q) q = dictionary.doc2bow(q) # Converts BOW to TF-IDF representation. tfidf_index = tfidf_model[q] #print(r) # 3 most relevant paragraph for q according to TF-IDF model doc2similarity = enumerate(matrix_sim[tfidf_index]) docs = sorted(doc2similarity, key=lambda kv: -kv[1])[:3] print("# 3 most relevant paragraph for q according to TF-IDF model for q:") print("\n" + "[paragraph " + str(docs[0][0]) + "]") print(paragraphs[docs[0][0]]) print("\n" + "[paragraph " + str(docs[1][0]) + "]") print(paragraphs[docs[1][0]]) print("\n" + "[paragraph " + str(docs[2][0]) + "]") print(paragraphs[docs[2][0]]) #Compare TF-IDF representation for the query with LSI-topic representation print( "Comparing TF-IDF representation for the query with LSI-topic representation:\n" ) lsi_query = lsi_model[q] topics = sorted(lsi_query, key=lambda kv: -abs(kv[1]))[:3] for topic in enumerate(topics): top = topic[1][0] print("\n[Topic #" + str(top) + "]") print(lsi_model.show_topics()[top]) doc2similarity = enumerate(lsi_matrix[lsi_query]) docs = sorted(doc2similarity, key=lambda kv: -kv[1])[:3] for document in docs: d = document[0] print("\n[Paragraph #" + str(d) + "]") print(paragraphs[d])
def upload(): target = os.path.join(APP_ROOT, 'images/') # target = os.path.join(APP_ROOT, 'static/') print(target) if not os.path.isdir(target): os.mkdir(target) else: print("Couldn't create upload directory: {}".format(target)) print(request.files.getlist("file")) for upload in request.files.getlist("file"): print(upload) print("{} is the file name".format(upload.filename)) filename = upload.filename destination = "/".join([target, filename]) print ("Accept incoming file:", filename) print ("Save it to:", destination) upload.save(destination) global List target2 = os.path.join(APP_ROOT, 'images/',filename) List = functions.process(target2) Output = functions.input_leaf(List) if (Output == 0): result = 'apple scabe' elif (Output == 'a'): result = 'Angel man Syndrome founded' elif (Output == 'aa'): result = 'Williams Syndrome founded' else: result = 'No Syndrome founded' if (Output == 'aaa'): description = '=======' frec = '=========' elif (Output == 'aaaa'): description = '==============' frec = '==================' elif (Output == 'aaaaa'): description = '===========.' frec = '====================' else: description = '====== ' frec = '===========' # return send_from_directory("images", filename, as_attachment=True) return render_template("final.html", name=result, desc = description, desc2 = frec, image_name=filename)
def main(dictionary, tfidf_model, tfidf_corpus, matrix_sim, lsi_matrix, lsi_model, paragraphs): # Task 4.1 query = "How taxes influence economics".lower() query = functions.process(query) query = dictionary.doc2bow(query) # Task 4.2 tfidf_index = tfidf_model[query] # Task 4.3 docsim = enumerate(matrix_sim[tfidf_index]) docs = sorted(docsim, key=lambda kv: -kv[1])[:3] print(docs) # Task 4.4 lsi_query = lsi_model[query] topics = sorted(lsi_query, key=lambda kv: -abs(kv[1]))[:3] functions.show_topics(topics, lsi_model) docsim = enumerate(lsi_matrix[lsi_query]) docs = sorted(docsim, key=lambda kv: -kv[1])[:3] functions.show_docs(docs, paragraphs)
import pandas as pd from tkinter import filedialog from functions import process from tqdm import tqdm from tkinter import ttk import time # Initialize window root = Tk() root.title("DrugPred-NN") root.iconbitmap("icon.ico") root.configure(bg='#FFFFFF') #root.geometry("400x475") # Initialize object obj = process() # Initialize frames frame1 = LabelFrame(root, text="Step 1: Import file", padx=20, pady=20, bg='#FFFFFF') frame1.grid(row=0, column=0, padx=10, pady=10, sticky="NSEW") frame2 = LabelFrame(root, text="Step 2: Feature Extraction", padx=20, pady=20, bg='#FFFFFF') frame2.grid(row=1, column=0, padx=10, pady=10, sticky="NSEW")
import functions as app if __name__ == '__main__': app.init() app.process() app.end()
def main(): ex7_0 = get_input_by_lines(7) ex7 = process(ex7_0) print(get_weight(ex7, where_to_go(ex7, find_wrong(ex7_0)[0])[0]))
import os import settings if __name__ == "__main__": # set up test logging logging.basicConfig(filename=settings.HOME_DIR + 'test/test.log', format='%(asctime)s %(levelname)s\n%(message)s\n\n', level=logging.DEBUG) # get test data files dmp_files = [] dmp_files_dir = settings.HOME_DIR + 'test/DMP_files/' for f in os.listdir(dmp_files_dir): dmp_files.append(dmp_files_dir + f) functions.process(dmp_files_dir + f) #functions.process(dmp_files_dir + 'NENANDI150306-0000.DMP') #functions.process(dmp_files_dir + 'ALDINGA150306-0100.DMP') #functions.process(dmp_files_dir + 'MTCMPASS150306-0000.DMP') #functions.process(dmp_files_dir + 'JOYCE150306-0000.DMP') #functions.process(dmp_files_dir + 'CADELL150306-0000.DMP') #functions.process() #for var in reading_vars: # print var #print '-----------------------------------------------------'
#print('Number of features:', len(data['features'])) with open(path.join(output_dir, file_name), 'w') as data_file: json.dump(data, data_file) # return file path to saved data return path.join(output_dir, file_name) def import_file(file_name): """ Convert a .geojson file, or similar, into a geo-dataframe """ df = gpd.read_file(file_name, encoding='UTF-8') return df def create_df(json_data): """ Convert raw geojson into a geo-dataframe """ return gpd.read_geojson(json_data) ############################################## # fetch data and save to file data_path = get_data() # run path width calculations df_1, df_2 = process(import_file(data_path))
from functions import process # Prompt user input for URL and HTML type to parse, then call process() # function print('''\nPlease enter the URL of the site you'd like to process\n''') url_raw = input('> ') print('''\nPlease select type of words to rank: all, bold or italic text\n''') text_pref = input('> ') process(url_raw, text_pref)