def main(): # Create Stack Pointer stack_ptr=register() # Create Instruction Register inst_reg=register() ALU_in=None # Output of ALU ALU_out=None # Output of ALU MEM_out=None # Result of reading from MEM WB_addr=None # Address to write back to # Create registers data_reg=[] for it in range (0,NUM_OF_REG): data_reg.append(register()) #create memory inst_mem=mem_collection("inst", SIZE_OF_INST_MEM) data_mem=mem_collection("data", SIZE_OF_DATA_MEM) print("Done initializing mem and reg") # All data Registers print("\nData Registers") for it in range (0,NUM_OF_REG): print(data_reg[it].read()) # Instruction Mem print("\nInstruction Memory") print(inst_mem.load(2)) # Data Mem print("\nData Memory") print(data_mem.load(2)) print("\n\nRead assembly file and convert to binary") f = FileToBin(SOURCE_FILE, BIN_FILE) # Read source file f.read() # write binary to bin file and return binary inst_binary_array = f.write() # print entire inst_binary_array print(inst_binary_array) # store all encoded binary to instruction memory inst_mem.save_all(inst_binary_array) ''' Pipeline Starts here ''' pipeline(stack_ptr, inst_reg, data_reg, data_mem, inst_mem, ALU_in, ALU_out, MEM_out, WB_addr)
def main(): i = 0 n = 0 for fp in xmlpath: try: i += 1 pipeline(fp,session) except: n += 1 print("Failed filename: ", fp) session.commit() session.close() print("Number of Total Failed Files: ", n) print("Number of Total Successful Files: ", i)
def output(): #pull 'ID' from input field and store it try: title = request.args.get('TITLE') except: title = ' NO TITLE GIVEN' try: max_depth = int(request.args.get('DEPTH')) except: max_depth = 0 # dataframes to dicts if title == '': success = False ld_rated = {} s_all = {} else: success, ld_rated = ppl.pipeline(title, max_depth) s_all = ppl.get_searches_dict(ld_rated, success) if success: bg_image_covering = 'contain' else: bg_image_covering = 'cover' return render_template("output_app.html", ldrated = ld_rated, searchesAll = s_all, title0=title, depth0 = max_depth, successfull_search = success, bg_image_covering = bg_image_covering)
def controller(): pl = pipeline(outputdirectory=outDir, prefix=baseName, pathtorefgenome=REF_GENOME_HG38, pathtocosmic=COSMIC, pathtogatk=GATK, pathtopicard=PICARD, pathtobwa=BWA, pathtobamsurgeondir=BAMSURGEON_DIR, pathtoart=ART_ILLUMINA, pathtobedtools=BEDTOOLS, pathtosamtools=SAMTOOLS, pathtopullcosm=PULLCOSM, pathtomutect=MUTECT, bedfile=bedFile) create_reads(pl) addMuts(pl) pre_processing(pl) detect_variants(pl)
def runPipeline(bucket, s3dir, keys): getData(bucket, s3dir, keys, '../../data') if len(glob.glob('../../data/*.tif')) != 0: fileList = sorted(glob.glob('../../data/*.tif')) else: sys.exit("FILE NOT FOUND") #Generate results form pipeline print 'Starting Pipeline' results = pipe.pipeline(loadTiff(fileList[0]), loadTiff(fileList[1])) result_key = keys[0].split('_')[0] #generate visualization #mv.generatePlotlyLineGraph(myID, results) #generate csv generateCSV(results) uploadResults(bucket, s3dir, result_key, '../../results/results.csv') return
def lane_detection(img, size=(100, 100)): time_start = time.time() img = cv2.resize(img, size) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) time_1 = time.time() img_ = pipeline(img) time_2 = time.time() img_warp = perspective_warp(img_) time_3 = time.time() out_img, curves, lanes, ploty = sliding_window(img_warp, draw_windows=False) time_4 = time.time() print("\n time_color_channel_conv = " + str(int((time_1-time_start)*1000)) + "\n pipeline_time = "\ + str(int((time_2-time_1)*1000)) + "\n perspective_warp_time = " \ + str(int((time_3-time_2)*1000)) + "\n sliding_window_time = " + str(int((time_4-time_3)*1000))) print("\n Overall Time = " + str(int((time_4 - time_start) * 1000))) return curves, lanes, ploty, out_img
def videoProcess(motorq, videoq): global cam_width, cam_height cam = Camera() ReCal = True # recalibration on # generate the lane detector # HACK:i feed it a pre done calibration img if ReCal: # ld.getCalibImage(cam),ColorProfile.lanes) ld.calibrateKmeans(ld.getCalibImage(cam), ColorProfile.lanes) else: ld.loadSvm("../test/model.pkl") # pre-trained svm while True: # we are now in the video loop, check if we should exit msg = None # Get the most recent message while not videoq.empty(): msg = videoq.get(block=False) # Check if the message is None or "exit" if msg == None: pass elif msg == "exit": # Quit this function if the message is None # This is the indicator to stop this function return # read a frame from the camera frame = cam.image if (frame is None) or (not np.any(frame)): # return a black frame when the camera retrieves no frame return # try calling the pipeline function frame = pipeline.pipeline(frame, motorq, ld) #cv2.imshow("test", frame) #k = cv2.waitKey(1) # release the camera cam.release()
def take_one_cut(): '''simpla working program to take first cut''' a = pipeline() print('pipeline ok') b = filmin(a) print('filmin ok') bright = bright_frame(a) bis = bis_frame(a) play = play_film(a) tl = takeliner(a, line_catcher(a)) sm = smoother(a, 'c_line') ct = cut(a, 'c_line') msl = smoother(a, 'l_line') ctl = cut(a, 'l_line') smr = smoother(a, 'r_line') ctr = cut(a, 'r_line') #ovl = overliner(a, 'c_line') for i in range(500): try: b.stp() except: break bright.run() tl.run() bis.run() play.get_frame() sm.run() ct.run() msl.run() ctl.run() smr.run() ctr.run() #print(a.c_line) plt.plot(a.c_line[0], a.c_line[1], '.') plt.plot(a.l_line[0], a.l_line[1], '.') plt.plot(a.r_line[0], a.r_line[1], '.') plt.show() a.GO = False b.end()
def predict(jobID): numApp = int(request.args["numApp"]) allCandidates = bool(request.args["numApp"]) results, jd = pipeline.pipeline(jobID, numApp, root_file_path=root_path, all_resumes=allCandidates) jd = jd[0].encode('ascii', errors='ignore').decode('ascii') # jd = 'Job Description: ' + jd[:300] + '...' responseData = [] for i in range(numApp): result = { "Rank": i + 1, "Candidate ID": results.iloc[i][0], "Similarity": round(results.iloc[i][1], 4) } responseData.append(result) return render_template('candidateResults.html', candidates=responseData, job_description=jd)
def runPipeline(bucket, key): print 'Getting Data' getData(bucket, key, '../../data') if len(glob.glob('../../data/*.mat')) != 0: fileList = sorted(glob.glob('../../data/*.mat')) else: sys.exit("MAT File not found, Exiting...") data = np.array(io.loadMat('../../data/' + key)) print 'Starting Pipeline' results = pipe.pipeline(data) result_key = key + '_results.dat' print 'Generating Results' #generateCSV(results) pickle.dump(results, open('../../results/out.dat', 'w')) print 'Uploading Results' uploadResults(bucket, result_key, '../../results/out.dat') #ADD other result formats (visuals, etc) print 'Synapsys finished' return
def run_training(config): """Train the model.""" #print(os.getcwd()) current_path = utils.get_original_cwd() + "/" # read training data data = pd.read_csv(current_path + config.dataset.data, encoding=config.dataset.encoding) # divide train and test X_train, X_test, y_train, y_test = train_test_split( data.drop(config.target.target, axis=1), data[config.target.target], test_size=0.1, random_state=0) # we are setting the seed here X_train.reset_index(inplace=True, drop=True) X_test.reset_index(inplace=True, drop=True) # transform the target #y_train = np.log(y_train) #y_test = np.log(y_test) match_pipe = pipeline(config) match_pipe.fit(X_train, y_train) joblib.dump(match_pipe, utils.to_absolute_path(config.pipeline.pipeline01))
def runExperimentSeries(start, end): for i in range(int(start), int(end)): config = parseConfig(i) pipeline(config[0], config[1], config[2], config[3], config[4], config[5], config[6])
def runExperiment(num): config = parseConfig(num) pipeline(config[0], config[1], config[2], config[3], config[4], config[5], config[6])
def process_frame(frame): camera = Camera.get_calibrated_camera() pv = pipeline(frame, camera) #type: PipelineVars return pv.img_processed
sender = ('', match.group(1).strip()) gpx = gmail.get_attachment(msg, 'gpx') mail_time = time.mktime(email.utils.parsedate(msg['Date'])) userid = user.userid_from_sender(sender) doc = {'mailid': mailid, 'time': mail_time, 'sender': sender, 'gpx_complete': False, 'userid': userid} db.tracks.update({'mailid': mailid}, doc, upsert=True) track_id = db.tracks.find_one({'mailid': mailid})['_id'] gpx_path = 'data/gpx/%s.gpx' % track_id with open(gpx_path, 'w') as f: f.write(gpx.get_payload(decode=True)) #kmz = gmail.get_attachment_by_ext(msg, 'kmz') #with open('%d.kmz' % mailid, 'w') as f: # f.write(kmz.get_payload(decode=True)) track_result = pipeline.pipeline(track_id, db) db.tracks.update({'mailid': mailid}, {'$set': {'gpx_complete': True}}) if userid: db.users.update({'_id': userid}, {'$inc': {'total_duration': track_result['duration']}}) yes.close() # if get_payload(decode=True) barfs on equal signs, it may be a CRLF issue. Look at # http://stackoverflow.com/questions/787739/python-email-get-payload-decode-fails-when-hitting-equal-sign
def main(): args = parser.parse_args() pipeline(args.config)
import pandas as pd import json # add the path to utilities f = os.path.dirname(os.path.realpath(__file__)) par_dir = os.path.abspath(os.path.join(f, os.pardir)) sys.path.append(par_dir) import pipeline as pp import utilities.sys_utils as su if __name__ == '__main__': args = su.get_pars([sys.argv[1]]) # pipeline parameters top_list = args['top_list'] pattern = args['pattern'] filepat_list = args['filepat_list'] ex_list = args['ex_list'] out_file = args['out_file'] # pipeline pl = pp.pipeline(filepat_list, top_list, ex_list, pattern, cat_type='avro', print_file=False) d_out = (json.loads(d) for d in pl) df = pd.DataFrame(d_out) if len(df) == 0: print 'no data' sys.exit(0) su.df_to_json_lines(df, out_file)
print(f"hit @{i}: {hit}, MRR: {mrr}") if __name__ == '__main__': parser = argparse.ArgumentParser(description='Process some integers.') parser.add_argument('--dataset', help='name of the dataset', default='WN18RR') args = parser.parse_args() dataset = args.dataset train_dir = os.path.join("data", dataset, "train.txt") valid_dir = os.path.join("data", dataset, "valid.txt") print(f'load dataset {dataset}') train = pd.read_csv(train_dir, sep='\t', names=['e1', 'r', 'e2']) valid = pd.read_csv(valid_dir, sep='\t', names=['e1', 'r', 'e2']) relations_kb = memory.make_relations_kb(train, valid) G = create_graph(train, add_inverse_name=False) valid = valid.loc[valid.apply(lambda x: x.e1 in G and x.e2 in G, axis=1)] # filter nodes, that not present in train print('start cases') cases = memory.create_memory_cases(G, cutoff=cutoff, max_relations=max_relations, cores=cores) print('start similarity') sim_mat, node_ids = memory.create_similarity(G, sparse=True) print('start inference') ranks_tail = pipeline(valid, G, sim_mat, node_ids, cases, relations_kb=relations_kb, top_k=top_k, type='tail') ranks_head = pipeline(valid, G, sim_mat, node_ids, cases, relations_kb=relations_kb, top_k=top_k, type='head') print_scores(ranks_tail, ranks_head)
def process_image(image): global line_left global line_right img = pp.pipeline(image, line_left, line_right, cal_mtx, cal_dist) return img
import os; import sys; sys.path.append(os.getcwd()+"/src") import pipeline; import task; def main(args): print(u'OK Lets GO'); print(args) if __name__ == '__main__': main(sys.argv) p = pipeline.pipeline(); p.clean() p.processApp() p.pj_initialize() #p.run()
from register_methods import * #I need to change this so I'm not wildcard importing from pipeline import pipeline import numpy as np ### For Testing ### from skimage import io from skimage.color import rgb2gray from matplotlib import pyplot as plt """The transform pipeline outputs a transform""" register_pipeline = pipeline( #downsample_image, #subtract_background, #create_binary, #orb_extractor, estimate_transform ) def registration(stack): # TODO: put in a good docstring. I need to figure out what the input type on stack is. # assert isinstance(stack, list) == True, "stack is not a list: %r" % stack stack = [img for img in stack] #TODO: This is a temp solution for taking in a multidimensional ndarray of images. transform = register_pipeline(stack) #show_images(apply_transform(stack, transform)) return transform if __name__ == "__main__":
# it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from build_menu import Console_interface from pipeline import pipeline ci = Console_interface() p = pipeline() main_prompt = '\n\t\033[35mBLAST-TNG Readout Pipeline\033[0m\n\t\033[33mChoose a number from the list and press Enter:\033[0m' plot_opts= ['IQ Loops','Phase scatter','Frequency noise','Phase noise'] opt = ci.mk_menu(main_prompt,plot_opts) if opt == 0: chan = input('Channel = ? ') p.plot_loop_rotated(chan) if opt == 1: chan = input('Channel = ? ') p.phase_scatter(chan) if opt == 2: chan = input('Channel = ? ') p.delta_f(chan) if opt == 3:
def runExperiments(list): for num in list: config = parseConfig(num) pipeline(config[0], config[1], config[2], config[3], config[4], config[5], config[6])
from register_methods import * #Is this a bad idea? In this case I don't see risk of collision. from pipeline import pipeline src = "../img/test043_TL/p1-D1-01b.jpg" dst = "../img/test043_TL/p1-D1-01b.jpg" register_pipeline = pipeline( show_images, read_file ) #This currently only works for functions that take one input (so func(src,dst) wouldn't work. Needs fixing. if __name__ == "__main__": register_pipeline(src, dst)
f = os.path.dirname(os.path.realpath(__file__)) par_dir = os.path.abspath(os.path.join(f, os.pardir)) sys.path.append(par_dir) import pipeline as pp import utilities.sys_utils as su if __name__ == '__main__': args = su.get_pars([sys.argv[1]]) # pipeline parameters top_list = args['top_list'] pattern = args['pattern'] filepat_list = args['filepat_list'] ex_list = args['ex_list'] out_file = args['out_file'] # pipeline pl = pp.pipeline(filepat_list, top_list, ex_list, pattern, cat_type='avro', print_file=False) d_out = (json.loads(d) for d in pl) df = pd.DataFrame(d_out) if len(df) == 0: print 'no data' sys.exit(0) su.df_to_json_lines(df, out_file)
import pymongo import pipeline import user db = pymongo.Connection().spoked user.db = db for track in db.tracks.find({'userid': None}): userid = user.userid_from_sender(track['sender']) if userid: track['userid'] = userid track_result = pipeline.pipeline(track['_id'], db) print track_result db.users.update({'_id': userid}, {'$inc': {'total_duration': track_result['duration']}}) db.tracks.save(track) print track['sender'], userid
""""Run the whole pipeline """ import pandas as pd from pprint import pprint from pipeline import pipeline file_location = 'csv.pubmed19n1034.csv' abstract_df = pd.read_csv(file_location) abstract_df = abstract_df.dropna() result_dict = dict() for _, row in abstract_df.iterrows(): temp_result = pipeline(row['abstract']) temp_title = row['title'] result_dict[temp_title] = temp_result # pprint(abstract_df.isnull())
def framework(environ, start_response): """ framework handles gluing together functions that handle WebOb request and response objects. This is just a slightly messy example of how stage functions could be layered to build a web framework. None of the actual stage implementations are complete. """ settings = Settings() @expand_args def attach_settings(request, response): request.settings = settings return request, response @expand_args def get_cookies_user(request, response): if not hasattr(request, 'user') or request.user is None: if 'user' in request.cookies: request.user = User(request.cookies['user']) if not hasattr(request, 'user'): request.user = None return request, response @expand_args def get_basicauth_user(request, response): if not hasattr(request, 'user') or request.user is None: if request.authorization: scheme, credential = request.authorization if scheme == 'Basic': credential = credential.encode('utf-8') login, password = b64decode(credential).decode( 'utf-8').split(':') if (login, password) in settings.basicauth_credentials: request.user = User(login) response.set_cookie('user', login) if not hasattr(request, 'user'): request.user = None return request, response @expand_args def dispatch(request, response): for exp, view_func in settings.urls: if re.match(exp, request.path_url): # TODO: parse arguments return view_func(request, response) raise exc.HTTPNotFound @expand_args def controller(request, response, **kwargs): return request, response @expand_args def render(request, response): if hasattr(response, 'jinja2_template') and hasattr( response, 'context'): response.text = response.jinja2_template.render(**response.context) return request, response request = Request(environ) response = Response() framework_pipeline = pipeline([ Stage(attach_settings), Stage(get_cookies_user), Stage(get_basicauth_user), Stage(dispatch), Stage(controller), Stage(render) ], initial_data=[(request, response)]) framework_pipeline.join() request, response = framework_pipeline.values[0] return response(environ, start_response)
dist_pickle = pickle.load(open("calibration.p", "rb")) mtx = dist_pickle["mtx"] dist = dist_pickle["dist"] # file = 'straight_lines1.jpg' # Perform thresholding before perspective transformation thresh = True file = 'test2/167.png' source_files = 'test_images' # Perspective warping source and destination points src = np.float32([[600, 451], [680, 451], [243, 720], [1057, 720]]) dst = np.float32([[350, 0], [930, 0], [350, 720], [930, 720]]) for file in glob.glob(os.path.join(source_files, "*.jpg")): base_filename = os.path.basename(file) print(base_filename) # Read in an image img = cv2.imread(file) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) pipeline.pipeline(img, mtx, dist, src, dst, base_filename, output_files="output_images", debug=True)
def process_image(image): img = ppl.pipeline(image) return img
from pipeline import pipeline ##### Initialise pipeline object ################################ pipelinesList = {} #### Main pipelines ################### pipelinesList['eyetrack'] = pipeline() pipelinesList['eyetrack'].config(prepearEyeGaze=True,undistortCamera=True,Trim2=True,perspective=True,perspeRender=True,drawEyeGaze=True,BGR2RGB=True,) pipelinesList['TopCamera'] = pipeline() pipelinesList['TopCamera'].config(flip=True,Trim=True,blur=True,hsv=True,bckSub=False,hand=True,blueMask=True,redMask=True,greenMask=True,yellowMask=True,blocksRepres=True,renderROIs=True,renderTrsfGaze=True,BGR2RGB=True) ##### Masks configuration pipelines ################################### pipelinesList['pipelineYellow'] = pipeline() pipelinesList['pipelineYellow'].config(flip=True,Trim=True,blur=True,hsv=True,yellowMask=True,BGR2RGB=True) pipelinesList['pipelineBlue'] = pipeline() pipelinesList['pipelineBlue'].config(flip=True,Trim=True,blur=True,hsv=True,blueMask=True,BGR2RGB=True,gray=True,thresh=True) pipelinesList['pipelineRed'] = pipeline() pipelinesList['pipelineRed'].config(flip=True,Trim=True,blur=True,hsv=True,redMask=True,BGR2RGB=True) pipelinesList['pipelineGreen'] = pipeline()
def integerify(x): try: return int(x) except: return None def triple(x): try: return 3 * x except: return None if __name__ == "__main__": integerify_stage = Stage(integerify, n_workers=2) triple_stage = Stage(triple, n_workers=2) print(pipeline([integerify_stage, triple_stage], sys.stdin).join().values) p = pipeline([integerify_stage, triple_stage], sys.stdin) for x in p.out_q: print(x) p = pipeline([integerify_stage, triple_stage], sys.stdin) while 1: sys.stdin.flush() p = pipeline([integerify_stage, triple_stage], sys.stdin) if p.out_q: print(p.out_q.get()) gevent.sleep(0)
def child(): if frames_cnt == 0: time.sleep(0) else: pipeline.pipeline(path, frames_cnt)
from register_methods import * #Is this a bad idea? In this case I don't see risk of collision. from pipeline import pipeline src = "../img/test043_TL/p1-D1-01b.jpg" dst = "../img/test043_TL/p1-D1-01b.jpg" register_pipeline = pipeline(show_images, read_file) #This currently only works for functions that take one input (so func(src,dst) wouldn't work. Needs fixing. if __name__ == "__main__": register_pipeline(src, dst)
# it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from build_menu import Console_interface from pipeline import pipeline ci = Console_interface() p = pipeline() main_prompt = '\n\t\033[35mBLAST-TNG Readout Pipeline\033[0m\n\t\033[33mChoose a number from the list and press Enter:\033[0m' plot_opts = ['IQ Loops', 'Phase scatter', 'Frequency noise', 'Phase noise'] opt = ci.mk_menu(main_prompt, plot_opts) if opt == 0: chan = input('Channel = ? ') p.plot_loop_rotated(chan) if opt == 1: chan = input('Channel = ? ') p.phase_scatter(chan) if opt == 2: chan = input('Channel = ? ') p.delta_f(chan) if opt == 3:
def main(args): print(create_args_str(args)) demo_files, target_dir, keras_path, ds_path, ds_alpha, ds_trie, lm_path, vocab_path, normalize, gpu = setup( args) num_files = len(demo_files) print( f'Processing {num_files} audio/transcript samples. All results will be written to {target_dir}' ) lm = load_lm(lm_path) if lm_path else None vocab = load_vocab(vocab_path) if vocab_path else None stats_keras, stats_ds = [], [] for i, (audio, transcript) in enumerate(demo_files): print( '-----------------------------------------------------------------' ) print(f'{i + 1}/{num_files}: Evaluating pipeline on {audio}') print( '-----------------------------------------------------------------' ) demo_id = splitext(basename(audio))[0] target_dir_ds = join(target_dir, demo_id + '_ds') target_dir_keras = join(target_dir, demo_id + '_keras') audio_bytes, sample_rate, transcript, language = preprocess( audio, transcript, 'en', norm_transcript=normalize) voiced_segments = vad(audio_bytes, sample_rate) df_alignments_ds = pipeline(voiced_segments=voiced_segments, sample_rate=sample_rate, transcript=transcript, language='en', ds_path=ds_path, ds_alpha_path=ds_alpha, ds_trie_path=ds_trie, lm_path=lm, force_realignment=args.force_realignment, align_endings=args.align_endings, target_dir=target_dir_ds) df_stats_ds = calculate_stats(df_alignments_ds, ds_path, transcript) df_alignments_keras = pipeline( voiced_segments=voiced_segments, sample_rate=sample_rate, transcript=transcript, language='en', keras_path=keras_path, lm=lm, vocab=vocab, force_realignment=args.force_realignment, align_endings=args.align_endings, target_dir=target_dir_keras) df_stats_keras = calculate_stats(df_alignments_keras, keras_path, transcript) # average similarity between Keras and DeepSpeech alignments av_similarity = np.mean([ levenshtein_similarity(al_keras, al_ds) for (al_keras, al_ds) in zip(df_alignments_keras['alignment'], df_alignments_ds['alignment']) ]) df_stats_ds['similarity'] = av_similarity df_stats_keras['similarity'] = av_similarity stats_ds.append(df_stats_ds) stats_keras.append(df_stats_keras) create_demo_files(target_dir_ds, audio, transcript, df_alignments_ds, df_stats_ds) create_demo_files(target_dir_keras, audio, transcript, df_alignments_keras, df_stats_keras) df_keras = pd.concat(stats_keras) csv_keras = join(target_dir, 'performance_keras.csv') df_keras.to_csv(csv_keras) df_ds = pd.concat(stats_ds) csv_ds = join(target_dir, 'performance_ds.csv') df_ds.to_csv(csv_ds) print(f'summary saved to {csv_keras}') visualize_pipeline_performance(csv_keras, csv_ds, silent=True) update_index(target_dir, lang='en', num_aligned=len(demo_files), df_keras=df_keras, keras_path=keras_path, df_ds=df_ds, ds_path=ds_path, lm_path=lm_path, vocab_path=vocab_path) print(f'Done! Demos have been saved to {target_dir}')
def main(args): print(create_args_str(args)) target_dir, keras_path, lm_path, vocab_path, gpu = setup(args) print(f'all results will be written to {target_dir}') lm = load_lm(lm_path) if lm_path else None vocab = load_vocab(vocab_path) if vocab_path else None corpus = get_corpus('rl', 'de') corpus.summary() test_entries = list(set((segment.entry for segment in corpus.test_set()))) # add 6 entries from PodClub corpus corpus = get_corpus('pc', 'de') corpus.summary() test_entries += [ corpus['record1058'], corpus['record1063'], corpus['record1076'], corpus['record1523'], corpus['record1548'], corpus['record1556'] ] stats = [] for i, entry in enumerate(test_entries): print(f'entry {i + 1}/{len(test_entries)}') audio_file = entry.audio_path sample_rate = entry.rate with open(entry.transcript_path, encoding='utf-8') as f: transcript = f.read() if args.norm_transcript: transcript = normalize(transcript, 'de') demo_id = splitext(basename(audio_file))[0] target_dir_entry = join(target_dir, demo_id) if not exists(target_dir_entry): makedirs(target_dir_entry) voiced_segments = [ Voice(s.audio, s.rate, s.start_frame, s.end_frame) for s in entry ] df_alignments = pipeline(voiced_segments=voiced_segments, sample_rate=sample_rate, transcript=transcript, language='de', keras_path=keras_path, lm=lm, vocab=vocab, force_realignment=args.force_realignment, align_endings=args.align_endings, target_dir=target_dir_entry) df_stats = calculate_stats(df_alignments, keras_path, transcript) # calculate average similarity between Keras-alignment and original aligments original_alignments = [s.transcript for s in entry.segments] av_similarity = np.mean([ levenshtein_similarity(ka, oa) for (ka, oa) in zip(df_alignments['alignment'], original_alignments) ]) df_stats['similarity'] = av_similarity create_demo_files(target_dir_entry, audio_file, transcript, df_alignments, df_stats) stats.append(df_stats) df_keras = pd.concat(stats) csv_keras = join(target_dir, 'performance.csv') df_keras.to_csv(csv_keras) print(f'summary saved to {csv_keras}') visualize_pipeline_performance(csv_keras, csv_ds=None, silent=True) update_index(target_dir, lang='de', num_aligned=len(test_entries), df_keras=df_keras, keras_path=keras_path, lm_path=lm_path, vocab_path=vocab_path) K.clear_session()
from matplotlib import pyplot as plt from matplotlib.colors import LinearSegmentedColormap import numpy as np import math from pipeline import pipeline #%% load image path = r"../JenAesthetics/small/Giovanni_Francesco_Romanelli_-_" \ "The_Finding_of_Moses_-_Google_Art_Project.jpg" img_high = io.imread(path) #%% execute pipeline pipe = pipeline(resize_=0.2) hogs = pipe.run(img_high) #%% low resolution img = resize(img_high, (896, 1191)) #%% convert to lab img_lab = color.rgb2lab(img) #%% create colormaps redgreen = LinearSegmentedColormap.from_list('a', ['green', 'white', 'red']) yellowblue = LinearSegmentedColormap.from_list('b', ['blue', 'white', 'yellow']) #%% plot full image fig, ax = plt.subplots(figsize=(4, 5), dpi=300)
def runClassification(): oneCheck = sum([var_t.get(), var_mRMR.get(), var_mInfo.get()]) featSelectMethod = '' if oneCheck > 1: print("Only select one method for feature selection") sys.exit() if oneCheck == 1: if var_t.get() == 1: featSelectMethod = 'ttest' elif var_mRMR.get() == 1: featSelectMethod = 'mrmr' else: featSelectMethod = 'minfo' else: print("Please select a method for feature selection") sys.exit() remove_zeros = False if var_rem == 1: remove_zeros = True clf_gene, fea_gene, clf_miRNA, fea_miRNA, clf_meth, fea_meth, clf_CNV, fea_CNV, clf = \ pp.pipeline(remove_zeros, 'custom', None, var_Rand, dir, method=featSelectMethod, test_size=float(var_Train.get())) #new func self.gene_feat = fea_gene self.miRNA_feat = fea_miRNA self.meth_feat = fea_meth self.CNV_feat = fea_CNV text_OutputClassifier.config(state="normal") text_OutputClassifier.insert( "end", 'Gene Expression Classifier:\n %s \n' % clf_gene) text_OutputClassifier.insert( "end", 'Micro RNA Expression Classifier:\n %s \n' % clf_miRNA) text_OutputClassifier.insert( "end", 'DNA Methylation Classifier:\n %s \n' % clf_meth) text_OutputClassifier.insert( "end", 'Copy Number Variation Classifier:\n %s \n' % clf_CNV) text_OutputClassifier.insert( "end", 'Integrated Classifier: \n %s \n' % clf) text_OutputClassifier.config(state="disabled") text_OutputGene.config(state="normal") text_Outputmi.config(state="normal") text_OutputMeth.config(state="normal") text_OutputCNV.config(state="normal") for i in fea_gene: text_OutputGene.insert("end", '%s\n' % i) for i in fea_miRNA: text_Outputmi.insert("end", '%s\n' % i) for i in fea_meth: text_OutputMeth.insert("end", '%s\n' % i) for i in fea_CNV: text_OutputCNV.insert("end", '%s\n' % i) text_OutputGene.config(state="disabled") text_Outputmi.config(state="disabled") text_OutputMeth.config(state="disabled") text_OutputCNV.config(state="disabled") scrollbar_out.config(command=text_OutputClassifier.yview) scrollbar_outGene.config(command=text_OutputGene.yview) scrollbar_outmi.config(command=text_Outputmi.yview) scrollbar_outMeth.config(command=text_OutputMeth.yview) scrollbar_outCNV.config(command=text_OutputCNV.yview)
from pipeline import pipeline ### Read in all calibration images chessboard_source_path = '../camera_cal/' chessboard_filename = 'calibration*.jpg' # images have similar name with * chessboard_images = glob.glob( chessboard_source_path + chessboard_filename) # glob API to read all images mtx, dist = perform_image_calibration_and_undistort_chessboards( chessboard_images) # image = mpimg.imread(chessboard_images[0]) # undistorted_image = cv2.undistort(image, mtx, dist, None, mtx) # cv2.imwrite('../output_images/undistorted_chessboard.png', image) ### Read in test images test_images_source_path = '../test_images/' test_images_filename = 'test*.jpg' test_images = glob.glob(test_images_source_path + test_images_filename) # show_images(test_images) is_video = False pipeline(test_images, mtx, dist, is_video, None) ### Perform on video is_video = True video_file = '../project_video.mp4' pipeline(None, mtx, dist, is_video, video_file)