def update(cur, conn, tb): tb = table_exists(cur, conn, tb) read(cur, conn, tb) ans = input( "Ви хочете переназвати таблицю(1) або змінити колонки(2) чи значення(3)?" ) if ans != '1': cur.execute("SELECT * FROM " + tb) desc = cur.description col_names = [desc[i][0].lower() for i in range(len(desc))] if ans == '3': col = update_rows(cur, conn, tb, col_names) sql = "UPDATE " + tb + " SET " + col else: sql = update_columns(cur, conn, tb, col_names) else: sql = "ALTER TABLE {0} RENAME ".format(tb) tb = input('Введіть нову назву таблиці:') sql += tb try: cur.execute(sql) conn.commit() print("Успішно змінено!") except: print('Не можу відредагувати!') ans = input("Ще одна зміна?так/ні: ") if ans == 'так': update(cur, conn, tb) else: return
def preprocess(percentage, basicNN=False): printOn.blockPrint() if basicNN == True: test, unlabel, label, true, x, y, x_true, y_true, x_test, y_test = read.read( file='diabetes.csv', drop=None, retNum=1, chopNum=1, unlabel_percentage=percentage, ytrain=True) else: test, unlabel, label, true, x, y, x_true, y_true = read.read( file='diabetes.csv', drop=None, retNum=1, chopNum=1, unlabel_percentage=percentage) clfs = classifiers.ensemble(x, y) printOn.enablePrint() for point in test: point.insert(0, point.pop()) if basicNN == True: return unlabel, clfs, true, x, y, test, y_test, x_test else: return unlabel, clfs, true, x, y, test
def main(dir_path, doc_number): index_list = IndexList() file_title_list = list() read(dir_path, index_list, doc_number, file_title_list) while True: log_inverted_list = read_log('log.txt') query = input('Please input your query (Enter \'q\' to quit): ') if query == 'q': break query = query.lower() query = query.split(' ') query = [stem_for_str(keyword) for keyword in query] expansion_terms = query_expansion(index_list, query, doc_number, log_inverted_list) new_query = list(query) new_query.extend(expansion_terms) search(new_query, index_list, file_title_list, dir_path) clicked = input( 'Which one satisfies your need? Please enter here (spilt with \',\', \'?\' stands for none): ' ) print() log_writer = open('log.txt', 'a') for i in range(len(query) - 1): log_writer.write(query[i] + ',') log_writer.write(query[len(query) - 1]) log_writer.write('\t' + clicked + '\n') log_writer.close()
def base_strains(directory): result = [] genes = [] genes_names = [] strains_db = r.read(directory + "/ST.txt", r.csv) for gene in strains_db[0]: gene_file = directory + "/" + gene + ".fasta" if os.path.isfile(gene_file): genes.append( r.read(gene_file, r.fasta_dict) ) genes_names.append(gene) for strain in strains_db[1:]: alleles = [] id = strain[0] gene = 0 for allele in strain[1:]: try: name = genes_names[gene] + "_" + allele alleles.append(genes[gene][name].seq) except KeyError : pass except IndexError: pass gene += 1 result.append(Strain(id,alleles)) return result
def assertStdout(inputString, result): inputStream = Stream(inputString) environment = createStandardEnvironment() outputStream = Stream() environment["*standard-output*"] = outputStream nextExpression = read(inputStream) while nextExpression: evaluate(environment, nextExpression) nextExpression = read(inputStream) assert(environment["*standard-output*"].read() == result)
def load(self, out_dir): self.params = Params().from_json( r.read(out_dir + "/params.json", r.json)) self.sample = Sample().from_fasta( r.read(out_dir + "/sample.fasta", r.fasta_list)) self.art_output = r.read(out_dir + "/art.aln", r.aln(self.params.take_ref)) self.instance = Instance().from_json( r.read(out_dir + "/instance.json", r.json)) return self
def runCode(code): inputStream = Stream(code) environment = createStandardEnvironment() outputStream = Stream() environment["*standard-output*"] = outputStream nextExpression = read(inputStream) while nextExpression: lastReturnValue = evaluate(environment, nextExpression) nextExpression = read(inputStream) stdout = environment["*standard-output*"].read() return lastReturnValue, stdout
def upload(): if not os.path.exists(app.config['UPLOAD_FOLDER']): os.mkdir(app.config['UPLOAD_FOLDER']) if request.method == "POST": f = request.files['file'] file_name = secure_filename(f.filename) f.save(os.path.join(app.config['UPLOAD_FOLDER'], file_name)) file = os.path.splitext(os.path.basename(file_name))[0] result_file = os.path.join(app.config['UPLOAD_FOLDER'], file + '.txt') read(os.path.join(app.config['UPLOAD_FOLDER'], file_name), result_file) return result_file
def sentence(): if tk.token.code == CodeTable['IDENTIFIER']: assipro.assipro() elif tk.token.code == CodeTable['IF']: ifsent.ifsent() elif tk.token.code == CodeTable['WHILE']: whilsent.whilsent() elif tk.token.code == CodeTable['READ']: read.read() elif tk.token.code == CodeTable['WRITE']: write.write() elif tk.token.code == CodeTable['BEGIN']: compsent.compsent()
def menu(self): while self.dato: selecction = input("\n Selecciona una de las opciones del menu \n 1.- Insertar \n 2.- Leer \n 3.-Actualizar \n 4.-Borrar \n\n") if selecction=='1': insert.insert() elif selecction=='2': read.read() elif selecction=='3': update.update() elif selecction=='4': delete.delete() else: print("Instruccion invalida")
def menu(self): while self.dato: selection = input( '\nSelect 1 to insert, 2 to update, 3 to read, 4 to delete \n') if selection == '1': insert.insert() elif selection == '2': update.update() elif selection == '3': read.read() elif selection == '4': delete.delete() else: print('\n Invalid selection \n')
def main(): regions = read() params = estimate_parameters_by_region(regions, ('Hubei', 'China')) pprint(params) print('Beginning model..', flush=True) fig = plt.figure(facecolor='w') ax = fig.add_subplot(111, axisbelow=True) model = SIRD_Model(1200000, 13, 0, 0) # model = SIRD_Model(58000000, 444, 28, 17) for i in range(49, 150): model.plot(ax, params['infection_rate'] * ((i + 1) / 100), params['recovery_rate'], params['death_rate']) model.plot(ax, params['infection_rate'], params['recovery_rate'], params['death_rate'], alpha=1.0, lw=3) ax.set_xlabel('Time /days') ax.set_ylabel('Population') ax.yaxis.set_tick_params(length=0) ax.xaxis.set_tick_params(length=0) ax.grid(b=True, which='major', c='w', lw=2, ls='-') legend = ax.legend() legend.get_frame().set_alpha(0.5) for spine in ('top', 'right', 'bottom', 'left'): ax.spines[spine].set_visible(False) plt.show()
def genetic(): ITERATIONS = 30 POP_SIZE = 40 CROSSOVER_RATE = 0.5 MUTATION_RATE = 0.01 TOURNAMENT_SIZE = 6 task = read('generatedTaskFile.csv') print("Here it goes...") population = init_population(task.numberOfObjects, POP_SIZE, task) i = 0 while i < ITERATIONS: print("New iteration") j = 0 new_population = Population(task) new_population.listOfIndividuals = [] while j < POP_SIZE: parent1 = tournament(population, TOURNAMENT_SIZE, task) parent2 = tournament(population, TOURNAMENT_SIZE, task) child = Individual() child.itemsTaken = crossover(parent1, parent2, CROSSOVER_RATE) mutate(child, MUTATION_RATE) new_population.addIndividualToPopulation(child) j += 1 population = new_population i += 1 return population.best()
def collect(device_path): rrds = listrrd(device_path) if len(rrds) == 0: return hostname = device_path.split('/')[-1] ts = get_timestamp() xml = '<ServerMeasurement xmlns="http://mms/cwo/types/1.2.0" origin="generator.py" site="Blunck WAN" state="UP" hostname="%s" timestamp="%s">' % (hostname, ts) hit = False for rrd in rrds: millis = INTERVAL * 1000 name = os.path.basename(rrd) value = read.read(rrd) if value is None: continue numeric = '<NumericMeasurement intervalInMilliseconds="%d" measurementName="%s" measurementType="RATE" units="unknown" value="%f"/>' % (millis, name, value) xml += numeric hit = True mvnm = collect_interfaces(device_path) if mvnm is not None: xml += mvnm xml += "</ServerMeasurement>" if hit: return xml else: return None
def collect_interfaces(device_path): root = device_path + "/os/interfaces" interfaces = listdir(root) if len(interfaces) == 0: return None interface = os.path.basename(interfaces[0]) rrds = listrrd(root + "/" + interface) xml = '' for rrd in rrds: millis = INTERVAL * 1000 name = os.path.basename(rrd) mvnm = '<MultiValueNumericMeasurement intervalInMilliseconds="%d" measurementName="%s" measurementType="COUNTER" units="unknown">' % (millis, name) numerics = "<numericValues>" for interface in interfaces: interface = os.path.basename(interface) try: filename = root + "/" + interface + "/" + os.path.basename(rrd) value = read.read(filename) numeric = '<numericValue key="%s">%f</numericValue>' % (interface, value) numerics += numeric except: pass numerics += "</numericValues>" mvnm += numerics + "</MultiValueNumericMeasurement>" xml += mvnm return xml
def download(url,local): r = read(url) if r != '': with open(local,'w') as f: f.write(r) return True else: return False
def new(self, params): os.makedirs(params.out_dir, exist_ok=True) self.params = params w.write(params.out_dir + "/params.json", w.pretty_json(params.to_json())) self.sample = Sample().new(params) sample_file = params.out_dir + "/sample.fasta" w.write(sample_file, w.fasta(self.sample)) art_prefix = params.out_dir + "/art" art = os.environ['ART_ILLUMINA'] subprocess.run([ art, "--in", sample_file, "--out", art_prefix, "--rndSeed", str(params.seed) ] + params.art_flags, stdout=subprocess.DEVNULL) self.art_output = r.read(art_prefix + ".aln", r.aln(params.take_ref)) self.instance = Instance().new(params, self.art_output) w.write(params.out_dir + "/instance.json", w.json(self.instance.to_json())) w.write(params.out_dir + "/instance.txt", w.text(self.instance.to_text())) w.write(params.out_dir + "/instance.stats.json", w.json(self.instance.stats())) return self
def new(self, root_dir, strains, aligned, sample): out_dir = root_dir + "/" + sample[0] os.makedirs(out_dir, exist_ok=True) # sample_json = out_dir + "/sample.json" # w.write(sample_json, w.json(s.to_json(sample))) sample_fasta = out_dir + "/sample.fasta" w.write(sample_fasta, w.fasta(s.to_fasta(sample))) art_prefix = out_dir + "/art" art = os.environ['ART_ILLUMINA'] subprocess.run([ art, "--in", sample_fasta, "--out", art_prefix, "--seqSys", "HS20", "--len", "100", "--fcov", "100" ], stdout=subprocess.DEVNULL) take_ref = False art_output = r.read(art_prefix + ".aln", r.aln(take_ref)) instance = Instance().new(strains, aligned, art_output) # w.write(out_dir + "/instance.json", w.json(instance.to_json())) w.write(out_dir + "/instance.txt", w.text(instance.to_text())) w.write(out_dir + "/instance.stats.json", w.json(instance.stats()))
def exchange_X_Y(filename): #input_file = open(filename,'r') #input_lines = input_file.readlines() basename=os.path.splitext(filename)[0] ext = os.path.splitext(filename)[1] savefile = "%s_exchanged_X_Y%s" % (basename,ext) print "savefile is",savefile output_file = open(savefile,'w') #nlines = len(input_lines) X=[] Y=[] (X,Y)=read.read(filename) index = 0 for x in X: y = Y[index] output_file.write('%e %e\n' % (y,x)) index = index + 1
def test_classification(): from read import read import numpy, tfidf from sklearn.decomposition import TruncatedSVD from sklearn.pipeline import make_pipeline from sklearn.preprocessing import Normalizer m, files = read("training.json") y_map = [str(file["topic"]) for file in files] map = [] for i in range(len(y_map)): if(len(map) == 0 or not map.__contains__(y_map[i])): map.append(y_map[i]) y = numpy.array([map.index(y_map[i]) for i in range(len(y_map))]) print("Construindo TF-IDF...") X, vectorizer = tfidf.vectorizeTFIDF(files) print X.shape print("Performing dimensionality reduction using LDA...") lda = LDA(n_components=9) X = X.toarray() lda.fit(X, y) X = lda.transform(X) mlp = MLPClassifier() mlp.fit(X, y) training_score = mlp.score(X, y) print("training accuracy: %f" % training_score)
def __init__(self, master=None, path="", pin=[0, 0, 0]): super().__init__(master) self.master = master self.pack() self.power, self.layer, self.time = read(path) self.create_widgets() self.pin = pin
def load(self, level): if type(level) == str: try: inp = read.read(level) level = Level(level, (9, 9)) level.values = inp['values'] except: print 'File does not exist!' return False elif type(level) == int: if level < 0 or level >= len(self.save.levels): return False level = self.save.levels[level] self.jlyf.getComponent('name').set('text', level.name) #self.jlyf.build() #self.cells = [self.jlyf.getComponent('"%i,%i"' % (x, y)) for x in range(9) for y in range(9)] self.width = level.width self.height = level.height index = 0 for v in level.values: self.cells[index].set('text', v) index+=1 return True
def menu(self): while self.dato: selection = input( '\nSelecciona 1 para insertar, 2 para actualizar, 3 para leer, 4 para borrar.\n' ) if selection == '1': insert.insert() elif selection == '2': update.update() elif selection == '3': read.read() elif selection == '4': delete.delete() else: print('\nSelección inválida.')
def main(): start, goals, walls, goals_ls, boxes_ls = read() ## ##Comment out any of the function you don't want to use. For levels 10 ##and higher, only use the rank/unrank implementiation of each search, ##(the name is followed by a 2 if ranking/unranking is used) ## print("\nBestFS") #BestFS no rank/unrank: heuristic can be: boxes, manhattan best_fs(start, goals, walls, boxes, cornered=True, verbose=False) #BestFS with rank/unrank: heuristic can be: boxes, manhattan best_fs2(start, goals, walls, boxes, cornered=True, verbose=False) print("\nA*") #A Star no rank/unrank: heuristic can be: boxes, manhattan a_star(start, goals, walls, boxes, cornered=True, verbose=False) #A Star with rank/unrank: heuristic can be: boxes, manhattan a_star(start, goals, walls, boxes, cornered=True, verbose=False) print("\nA* with is_stuck") #A Star using is_stuck: heuristic can be: boxes, manhattan Astar_stuck(start, goals, walls, boxes, boxes_ls, goals_ls, verbose=False) #A Star using is_stuck and rank/unrank: heuristic can be: boxes, manhattan Astar_stuck2(start, goals, walls, boxes, boxes_ls, goals_ls, verbose=False) print("\nBranch and Bound") #Branch and Bound no rank/unrank: heuristic can be: boxes, manhattan b_bound(start, goals, walls, boxes, cornered=True, verbose=False) #Branch and Bound with rank(no pruning): heuristic can be: boxes, manhattan b_bound2(start, goals, walls, boxes, cornered=True, verbose=False)
def genetic_algorithm(tournament_size=TOURNAMENT_SIZE, crossover_rate=CROSSOVER_RATE, mutation_rate=MUTATION_RATE, population_size=POPULATION_SIZE): task = read(input_file=OUTPUT_FILE) population = init_population(NUMBER_OF_ITEMS, population_size) best_ind = [] i = 0 new_pop_val = [] while i < ITERATIONS: # print(i) j = 0 new_pop_arr = [] while j < population_size: parent1 = population.tournament(tournament_size, task) parent2 = population.tournament(tournament_size, task) child = crossover(parent1, parent2, crossover_rate) mutated_child = mutate(child, mutation_rate) new_pop_arr.append(mutated_child) j += 1 population = Population(new_pop_arr) i += 1 best_from_pop = population.tournament(population_size, task) best_evaluated = best_from_pop.best_individual(task) new_pop_val.append(best_evaluated) return new_pop_val
def main(): parser = argparse.ArgumentParser( description='The Kantan test framework', formatter_class=argparse.ArgumentDefaultsHelpFormatter ) parser.add_argument('compiler', type=str, help='path to the compiler executable') parser.add_argument('tests', type=str, help='path to the test files directory') parser.add_argument('--valgrind', type=bool, default=False, help='use valgrind to check for memory leaks') parser.add_argument('--suppress', type=str, default=None, help='path to valgrind suppress file') parser.add_argument('--threads', type=int, default=multiprocessing.cpu_count() * 2, help='number of worker threads') args = parser.parse_args() test_cases = read(args.tests) memory_leaks = [] with ThreadPoolExecutor(max_workers=args.threads) as executor: executor.map(lambda case: run_test_case(case, args, memory_leaks), test_cases) if len(memory_leaks) > 0: print('THERE WERE COMPILER MEMORY PROBLEMS IN THE COMPILER') else: print('no memory leaks in the compiler detected') for memory_leak in memory_leaks: print(memory_leak.files)
def kmeans(task): start = time.time() output = [] buildings, antennas, reward = read(path + task) X = np.array([[int(building.x), (building.y)] for building in buildings]) kmeans = KMeans(n_clusters=len(antennas), max_iter=1) kmeans.fit(X) centroids_x = [int(value[0]) for value in kmeans.cluster_centers_] centroids_y = [int(value[1]) for value in kmeans.cluster_centers_] antennas.sort(key=lambda antenna: antenna.speed, reverse=True) for i in range(len(centroids_x)): antennas[i].x = centroids_x[i] antennas[i].y = centroids_y[i] output.append(antennas[i]) write('output/' + str(task.split('.')[0]) + '.txt', output) print(task, str(time.time() - start))
def test_classification(): from read import read import numpy, tfidf from sklearn.decomposition import TruncatedSVD from sklearn.pipeline import make_pipeline from sklearn.preprocessing import Normalizer m, files = read("training.json") y_map = [str(file["topic"]) for file in files] map = [] for i in range(len(y_map)): if (len(map) == 0 or not map.__contains__(y_map[i])): map.append(y_map[i]) y = numpy.array([map.index(y_map[i]) for i in range(len(y_map))]) print("Construindo TF-IDF...") X, vectorizer = tfidf.vectorizeTFIDF(files) print X.shape print("Performing dimensionality reduction using LDA...") lda = LDA(n_components=9) X = X.toarray() lda.fit(X, y) X = lda.transform(X) mlp = MLPClassifier() mlp.fit(X, y) training_score = mlp.score(X, y) print("training accuracy: %f" % training_score)
def main(): start_time = time() test, goals, walls, _, _ = read() # best_fs(test, goals, walls, manhattan, cornered=False) #no pruning best_fs2(test, goals, walls, boxes) #with pruning print("--- %s seconds ---" % (time() - start_time))
def download(url, local): r = read(url) if r != '': with open(local, 'w') as f: f.write(r) return True else: return False
def main(): start_time = time() test, goals, walls, _, _ = read() # a_star(test, goals, walls, manhattan, cornered=True) #no pruning a_star2(test, goals, walls, manhattan, cornered=True) #with pruning print("--- %s seconds ---" % (time() - start_time))
def main(): start_time = time() test, goals, walls, _, _ = read() b_bound(test, goals, walls, manhattan) #without pruning # b_bound2(test, goals, walls, manhattan) #with pruning print("--- %s seconds ---" % (time() - start_time))
def tests_teshu(self): '''用户名和密码输入特殊字符''' k=read.read() driver=self.driver() driver.get(self.url) time.sleep(2) username='******' ped
def main(): start_time = time() test, goals, walls, ls1, ls2 = read() # Astar_stuck(test, goals, walls, manhattan, ls1, ls2) #without pruning Astar_stuck2(test, goals, walls, manhattan, ls1, ls2) #with pruning print("--- %s seconds ---" % (time() - start_time))
def menu(self): while self.dato: # chossing option to do CRUD operations selection = input( '\nSelect 1 to insert, 2 to update, 3 to read, 4 to delete\n') if selection == '1': insert.insert() elif selection == '2': update.update() elif selection == '3': read.read() elif selection == '4': delete.delete() else: print('\n INVALID SELECTION \n')
def getCaptcha(self): re = self.s.get("http://202.193.80.58:81/academic/getCaptcha.do") image_bytes = re.content data_stream = io.BytesIO(image_bytes) captcha = Image.open(data_stream) strcat = read(captcha) #使用空间向量对比识别验证码 # print(strcat) return strcat
def transform(): regions, us = read() meta = us['meta'] long_form = defaultdict(list) for key, data in us.items(): if key in meta: metadata = meta[key] fips = metadata['fips'] population = int(metadata['population']) if fips.strip() == '': continue if population == 0: continue county, state, *rest = key data = us[key] confirmed = data['confirmed_US'] dead = data['deaths_US'] today = confirmed[-1] yesterday = confirmed[-2] two_days = confirmed[-3] delta = today - yesterday d_delta = yesterday - two_days if d_delta == 0: change_ratio = 0.0 else: change_ratio = delta / d_delta percent = today / population percent_yesterday = yesterday / population percent_delta = percent - percent_yesterday custom = change_ratio / log(population, 10) fips = str(int(float(fips))) if len(fips) == 4: fips = '0' + fips long_form['county'].append(county) long_form['state'].append(state) long_form['fips'].append(fips) long_form['today'].append(today) long_form['yesterday'].append(yesterday) long_form['two_days'].append(two_days) long_form['dead'].append(dead[-1]) long_form['delta'].append(delta) long_form['change_ratio'].append(change_ratio) long_form['percent'].append(percent) long_form['percent_yesterday'].append(percent_yesterday) long_form['percent_delta'].append(percent_yesterday) long_form['custom'].append(custom) return DataFrame(long_form)
def menu(self): while self.dato: print('\nSelect: ') print('\n 1 to Insert ') print('\n 2 to Update ') print('\n 3 to Read ') print('\n 4 to Delete ') selection = input('\nChoose your option: ') if selection == '1': insert.insert() elif selection == '2': update.update() elif selection == '3': read.read() elif selection == '4': delete.delete() else: print('\n INVALID SELECTION \n')
def get_reads(samtools_comm, rev_comp): """ """ read_strs = check_output(samtools_comm, shell=True).split('\n') read_strs.remove("") reads = [read(y) for y in read_strs] [t_read.set_rev_comp(rev_comp) for t_read in reads] return reads
def read_csf(): """ Read in CSF results from each file and concatenate them into a single data frame """ data = [] for csf_file in CSF_FILES: data.append(read(BASE_DIR+csf_file)) return pd.concat(data, ignore_index=True)
def read_(id): data = read.read(id, addr).split("\n") id = data[2] time = data[3] title = data[4] from_ = data[5] message = ' '.join(data[7:]) num = check() to_ = title.split()[1] title_ = from_.split()[1] return render_template("read.html", title_=title_, to_=to_, num=str(len(num)), id=id, time=time, from_=from_, message=message, title=title, addr=addr)
def do_read(self, line): line = line.split() id = line[0] data = read.read(id, self.username, self.password) data = json.loads(data) print """ ID: {1} From: {0} {2} """.format(data['from'], id, data['message'])
def shebang(path): """return file/string shebang""" if not path: return path = str(path) if not os.path.exists(path): return if isbinaryfile(path): return content = read(path) lines = content.splitlines() if lines: l = lines[0] # first line if isshebang(l): l = l.replace("#!", "", 1) return l.lstrip().rstrip()
def nextEntry(self): #name = self.entry.get() #info = self.entry2.get() name, title, org, keyword = read.read('reviewer/'+fileList[self.cnt].strip()) wordList = str() for word in keyword: wordList += " " + word self.entry.delete(0,len(self.entry.get())) self.entry2.delete(0,len(self.entry2.get())) self.entry3.delete(0,len(self.entry3.get())) self.entry4.delete(0,len(self.entry4.get())) self.entry.insert(0,name) self.entry2.insert(0,org) self.entry3.insert(0,title) self.entry4.insert(0,wordList.strip()) self.cnt = self.cnt + 1
def main(i): from read import read from preproc import preproc from blob_identification import blob_identification from recognition import recognition from os import popen,mkdir,environ path = environ.get("HOME") popen("rm -rf blobs") mkdir("blobs") name=path+"/alpr/latest/images/"+str(i)+".jpg" print name image=read(name) binary_image=preproc(image) blob_identification(binary_image) number = recognition() return number
# coding=utf-8 import read import geturl fr = open('fileList.txt','r') fileList = fr.readlines() fr.close() fw = open('ans.txt','a') cnt = 24 n = len(fileList) while cnt <= n: name,title,org,keyword = read.read('reviewer/'+fileList[cnt-1].strip()) url = geturl.search(name,org,title,keyword) fw.write('%s %s %s %s\n' % (name.encode('utf-8'),title.encode('utf-8'),org.encode('utf-8'),url)) print cnt cnt += 1 fw.close()
from read import read from simulate import simulate init_dict = read('model.prefpy.ini') simulate(init_dict)
import matplotlib.pyplot as plt import numpy as np from mfcc import mfcc from read import read from stft import stft fname = "sineSweep.wav" (srate, data) = read(fname, "mono") N = 1024 X= stft(data, N) X = np.abs(X) X = X[:N/2+1] X = mfcc(X, 44100) #mag to dec conversion #X = 10 * np.log10(X) plt.imshow(X[1:], interpolation='nearest', aspect='auto', origin='lower') plt.show()
def do_read(self, id): addr = db.data.find("data", "all")[0]['addr'] print read.read(id, addr)
"""Read in data from clinical tests""" from read import read BASE_DIR = '/phobos/alzheimers/adni/' MMSE_FILE = BASE_DIR + 'MMSE.csv' CDR_FILE = BASE_DIR + 'CDR.csv' MMSE = read(MMSE_FILE) CDR = read(CDR_FILE) MMSE.loc[MMSE['VISCODE2'] == 'sc', 'VISCODE2'] = 'bl' CDR.loc[CDR['VISCODE2'] == 'sc', 'VISCODE2'] = 'bl'
# read in data in UTC if bys[buoy]['inmysql']: # mysql tables if table == 'sum': # need to have this choose most recent data available # choose to look for ven since sum mostly shows ven data dend = tools.query_setup_recent(engine, buoy, 'ven').tz_localize('utc') else: dend = tools.query_setup_recent(engine, buoy, table).tz_localize('utc') else: dend = pd.Timestamp('now', tz='utc') # start 5 days earlier from 00:00 on day of last data, and account for time zones # so that enough data is read in for time zone conversion tzoffset = (dend.tz_localize(None) - dend.tz_convert(tz).tz_localize(None)).seconds/3600. dstart = (dend - pd.Timedelta('5 days')).normalize() + pd.Timedelta(str(tzoffset) + ' hours') dend += pd.Timedelta(str(tzoffset) + ' hours') df = read.read(buoy, dstart, dend, table=table, usemodel=False, userecent=True, tz=tz) if len(buoy) == 1: fname = path.join('..', 'daily', 'tabs_' + buoy + '_' + table) else: fname = path.join('..', 'daily', buoy) # write daily data file, for whatever most recent time period # data was available if df is not None: tools.write_file(df, fname) # if there are too few rows to plot, set as None if df is not None and len(df) < 2: df = None # no model output for stations in bays or outside domain now = pd.Timestamp('now', tz='utc').normalize() past = now - pd.Timedelta('5 days') future = now + pd.Timedelta('4 days')
import pandas as pd from read import read from patient_info import clean_visits BASE_DIR = '/phobos/alzheimers/adni/' # data from ADNIGO/ADNI2 DICTIONARY_51_FILE = BASE_DIR + 'UCSFFSX51_DICT_08_01_14.csv' DATA_51_FILE = BASE_DIR + 'UCSFFSX51_08_01_14.csv' # data from ADNI1 DICTIONARY_FILE = BASE_DIR + 'UCSFFSX_DICT_08_01_14.csv' DATA_FILE = BASE_DIR + 'UCSFFSX_08_01_14.csv' FSX_51 = read(DATA_51_FILE) FSX = read(DATA_FILE) if 'VISCODE2' in FSX.columns: FSX = clean_visits(FSX) else: FSX['VISCODE2'] = FSX['VISCODE'] if 'VISCODE2' in FSX_51.columns: FSX_51 = clean_visits(FSX_51) else: FSX_51['VISCODE2'] = FSX_51['VISCODE'] def find_unique(src, target): """ Keyword Arguments:
if 'tabs_' in fname: # only need table name for tabs table = fname.split('/')[-1].split('_')[2] buoy = fname.split('/')[-1].split('_')[1] else: buoy = fname.split('/')[-1].split('_')[0] table = bys[buoy]['table1'] # force the use of metric units if making a plot since both units shown anyway if datatype == 'pic': units = 'M' ## Read in data ## # from daily file, only for showing table since images created in run_daily.py if dstart is None: df = read.read(fname, dstart=None, dend=None, table=table, units=units, tz=tz, datum=datum) dfmodelhindcast = None dfmodelrecent = None dfmodelforecast = None # Call to database if needed else: ## Read data ## if not modelonly: df = read.read(buoy, dstart, dend, table=table, units=units, tz=tz, datum=datum) if df is not None: # won't work if data isn't available in this time period tools.write_file(df, fname) ## Read model ## # To use NOAA-provided model predictions if usemodel and bys[buoy]['table1'] == 'ports' and buoy != 'cc0101':
"""Read and clean the UCSF Free-surfer data""" import pandas as pd from read import read from patient_info import clean_visits import numpy as np import matplotlib.pyplot as plt from patient_info import get_dx, get_baseline_classes, get_dx_with_time from read_clinical import MMSE, CDR BASE_DIR = '/phobos/alzheimers/adni/' FDG_FILE = BASE_DIR + 'UCBERKELEYFDG_03_13_14.csv' AV_FILE = BASE_DIR + 'UCBERKELEYAV45_07_30_14.csv' FDG = read(FDG_FILE) AV = read(AV_FILE) FDG['ROI'] = FDG['ROINAME'] + '_' + FDG['ROILAT'] if 'VISCODE2' in FDG.columns: FDG = clean_visits(FDG) else: FDG['VISCODE2'] = FDG['VISCODE'] if 'VISCODE2' in AV.columns: AV = clean_visits(AV) else: AV['VISCODE2'] = AV['VISCODE'] def flatten_pet(): """
def do_read(self, id): addr = db.data.find("data", "all")[0]["addr"] print read.read(id, addr).decode("utf-8") self.lastcmd = ""
BASE_DIR = '/phobos/alzheimers/adni/' # diagnostic summary data DXSUM_FILE = BASE_DIR + 'DXSUM_PDXCONV_ADNIALL.csv' # data dictionary for all ADNI data DATADIC_FILE = BASE_DIR + 'DATADIC.csv' # data dictionary for the ARM assignments ARM_FILE = BASE_DIR + 'ARM.csv' # data file for the Registries REG_FILE = BASE_DIR + 'REGISTRY.csv' DXSUM = read(DXSUM_FILE) DICT = read(DATADIC_FILE) ARM = read(ARM_FILE) REG = read(REG_FILE) """ 1: Normal 2: Serious Memory Complaints (SMC) 3: Early MCI 4: Late MCI 5: Alzheimer's Disease """ NORMAL = 1 SMC = 2 EMCI = 3 LMCI = 4
def custom_list(): from linked_list import Link li = [] with open("ips.txt", 'r') as f: for i in f: word = i.strip() index= word.find(':') l = Link(word[0:index], word[index+1:]) li.append(l) # print(li) return li if __name__=="__main__": subprocess.call("bash get_latest.sh", shell=True) ip4list = read.read("latest.json") # ip4list = custom_list() print("starting to create all nodes") unique_id = 10000 # start here to ensure len() = 5 try: for link in ip4list: # a_thread(link=link) # use only this line for single-threaded th = threading.Thread(target=a_thread, kwargs={'link':link}) th.name = unique_id th.start() unique_id += 1 print("waiting for all threads to complete") master_thread = threading.current_thread().ident for i in threading.enumerate(): if master_thread != i.ident:
from read import read import sys if __name__ == "__main__": reads = [] with open(sys.argv[1], "r") as read_f: for line in read_f: reads.append(read(line)) pos = 8878 bases = {} for read in reads: base = read.get_base_at_pos(pos) bases[base] = bases.get(base,0) bases[base] += 1 print read.get_base_at_pos(pos)," " , read.get_base_qual_at_pos(pos), " ", read.MAPQ print bases
path = '.' #files = os.listdir(path) px_dict = pixel_dict() #np.seterr(divide='ignore', invalid='ignore') #for ifile in files: for ifile in sys.argv[1:]: print('Processing {}...'.format(ifile)) if ifile.endswith(".tif"): print('getting pixel sizes') [name, ext] = os.path.splitext(ifile) px = px_dict[name] print('converting to array') intensity_array = read(ifile) print('binarizing') intensity_array = (intensity_array < 2000) print('getting void labels') lbl, num = label(intensity_array, np.ones((3,3,3))) print("finding part com") part_com = np.mean(np.argwhere(lbl==0), axis=0) print('find blobs') #blobs = [(i, np.argwhere(lbl==i)) for i in range(2,num+1)] try: start = time.time() indices = np.argwhere(lbl > 1)
def createExpressionFromString(string): inputStream = Stream(string) return read(inputStream)