def get_devs(): data1 = fi.read_file("bdev") data2 = fi.read_file("vdev") data3 = fi.read_file("rdev") b = data1[:] v = data2[:] r = data3[:] N = len(b) - 1 bsq = np.power(b, 2) bquot = np.divide(bsq, N) bsum = np.sum(bquot) bd = np.sqrt(bsum) #print str("b error = " + str(bd)) vsq = np.power(v, 2) vquot = np.divide(vsq, N) vsum = np.sum(vquot) vd = np.sqrt(vsum) #print str("v error = " + str(vd)) rsq = np.power(r, 2) rquot = np.divide(rsq, N) rsum = np.sum(rquot) rd = np.sqrt(rsum) #print str("r error = " + str(rd)) bsigma.write(str(bd) + "\n")
def make_notice(args): logging.info("Making notice") components = [] if args.maven: components.extend(convert_maven(read_file(args.maven))) if args.node: components.extend(convert_node(read_file(args.node))) logging.info("Update the cache") for component in components: if not is_component_cached(component): write_component_to_cache(component, download_definition(component)) logging.info("Collect licenses") lib_description = [] for component in components: data = read_component(component) if data: try: description = construct_lib_description(data) except KeyError: logging.error( f'package {component.to_string()} have bad representation') continue if description.license in ['OTHER', 'NOASSERTION']: logging.error( f'package {component.to_string()} have not defined license' ) lib_description.append(construct_lib_description(data)) else: logging.error(f'component {component.to_string()} is not defined') with open('NOTICE', 'w') as notice: notice.write('## Third-party Content\n') for lib in sorted(lib_description, key=lambda item: item.name): notice.write( f'\n{lib.name} ({lib.version})\n\n * License: {lib.license}\n')
def preprocess(houses=HOUSES, channels=CHANNELS, path=PATH, threshold=THRESHOLD, input_ftype=INPUT_FTYPE, output_ftype=OUTPUT_FTYPE): """ Maps the match_times_channel function to all of the given channels """ ls = [] for house in houses: times_m, data_m_1 = read_file( path + f"/house_{house}/channel_1.{input_ftype}", input_ftype) _, data_m_2 = read_file( path + f"/house_{house}/channel_2.{input_ftype}", input_ftype) data_m = data_m_1 + data_m_2 times_m, data_m = remove_negative_diffs(times_m, data_m, input_ftype=None, output_ftype=None) times_m, data_m = join_results( interpolate_channel(times_m, data_m, threshold=threshold, input_ftype=None, output_ftype=None)) for channel in channels[house]: times_c, data_c = read_file( path + f"/house_{house}/channel_{channel}.{input_ftype}", input_ftype) times_c, data_c = remove_negative_diffs(times_c, data_c, input_ftype=None, output_ftype=None) times_c, data_c = join_results( interpolate_channel(times_c, data_c, threshold=threshold, input_ftype=None, output_ftype=None)) max_power = get_max_power(house, channel, path) times_m_copy, data_m_copy = np.copy(times_m), np.copy(data_m) l = match_times_channel((times_c, times_m_copy, house), (data_c, data_m_copy, channel), path=path, threshold=threshold, input_ftype=None, output_ftype=output_ftype, scale=max_power, mean=True) ls.append(l) if not output_ftype: return ls
def get_least_p(): p_data = fi.read_file("ks_output_5.csv", ",") p_val = p_data[:, 0] r_i = p_data[:, 1] r_f = p_data[:, 2] N = p_data[:, 3] m = 0 least_p_list = [] while m < len(p_val): if p_data[m, 0] < 0.05: if N[m] >= 6: if r_f[m] - r_i[m] <= 8: if r_f[m] - r_i[m] >= 3: least_p_list.append(p_data[m, :]) m = m + 1 else: m = m + 1 else: m = m + 1 else: m = m + 1 else: m = m + 1 least_p_array = np.array(least_p_list) print least_p_array
def read_output(ofile): lines = ff.read_file(ofile) E = float(lines[0]) gcc = [line.split() for line in lines[1:]] gcc = fncs.flatten_llist(gcc) gcc = [float(g_i) for g_i in gcc] return E, gcc
def view_catalog(): catalog = f.read_file() i = 1 for item in catalog: title = get_product_title(item) print "{0}) {1}".format(i, title) i += 1
def main(): for xml_file, py_file in get_files(): print("Simulating %s ... " % colored(xml_file, attrs=["bold"])) sim_result = psim(read_file(xml_file), {}, {"quiet": True}) test_result = run_tests(sim_result, py_file) if not test_result: sys.exit(1)
def select_quote(): quotes = read_file(join(NOTES_DIR,"SeamansLog/LifeLessons")) if len(quotes)>1: n = randint(0, len(quotes)-1) return quotes[n] else: return 'No quote'
def read_context(self): try: atoms = pickle.loads(read_file(self.context_file)) for name, value in atoms.iteritems(): self.set(name, value) except IOError: pass
def get_site_title(site): if site=='': site = '/' #file path:/home/seaman/webapps/mybook/Private/HarborWalk/EmailNeighbors #site: ../Private/HarborWalk/ content = read_file(get_doc_path(site+'Title')) if len(content)>0: return [ convert_line(content[0]), content[1] ] return [ 'Shrinking World Guides', 'Tips for thriving in the modern world' ]
def main(): args = docopt(usage, version='pml v0.1') xml = pml(app_file=args['<app.json>'], graphml=read_file(args['<file.graphml>']), props=read_json(args['--props']) if args['--props'] else {}, params=parse_params(args['--param'] or ''), prettify=args['--pretty']) print(xml)
def instance(xml_file): """Show graph instance information of an XML file.""" xml = read_file(xml_file) instance = parse_poets_xml(xml)[1] result = { "devices": len(instance["devices"]), "edges": len(instance["edges"]) } pp(result)
def test_post_crud(): # * CSV file post 'Rattlesnakes, I hate snakes' open_CSV() # * Print post list print_post() # * Add post 'Kittens, Kittens are Fuzzy' add_posts() write.('Kittens, Kittens are Fuzzy') write_post('Kittens, Kittens are Fuzzy') # * Add user_id of 4 to posts add_users() # * Print posts showing user names print_posts_users() # * Remove post remove_post() # * Remove user remove_users() text = "line1\nline2" path = 'test.txt' write_file(path, text) t = read_file(path) print('text:'+text+'$') print('t:'+t+'$') assert(t==text) assert(t!=text) pass
def fit_params(lraw, braw, rraw, vgsrraw): rawdata = fi.read_file("hyllus_pm2_7stars_092816.csv", ",") lraw = rawdata[:, 2] braw = randbarray[:] rraw = rawdata[:, 4] vgsrraw = rawdata[:, 5] os.system("touch orbit.input") inputfile = open('orbit.input', 'r+') j = 0 for j in range(len(lraw)): print >> inputfile, lraw[j], braw[j], vgsrraw[j], rraw[j] inputfile.close()
def add_jcl_file(imagename, dockerfile): jclfile = options.jcl_name(imagename) jcl_data = "" if os.path.isfile(jclfile): # use local jcl file print("Using jockler definition from %s ..." % jclfile) jcl_data = files.read_file([jclfile]) # deserialize and re-serialize as syntax check jcl_data = json.dumps(json.loads(jcl_data), indent=2) else: print("Generating jockler data from %s ..." % dockerfile) # generate jcl file from dockerfile jcl_data = extract_jcl_data(imagename, dockerfile) store.write_data(jclfile, imagename, jcl_data)
def psim(xml_input, rmap={}, options={}): """Simulate POETS XML. Arguments: - xml_input (str) : an XML file or string. - rmap (dict) : device region map [optional]. - options (dict) : simulation options [optional]. See docstring of simulator.simulate for simulation options documentation. Return: - result (dict) : simulation result. """ xml = read_file(xml_input) if is_file(xml_input) else xml_input schema = Schema(xml, rmap) result = simulate(schema, options) return result
def devices(xml_file): """Show device type information of an XML file.""" attr = lambda atribute, items: [item[atribute] for item in items] xml = read_file(xml_file) graph_type = parse_poets_xml(xml)[0] devices = { dev["id"]: { "state": { "scalars": attr("name", dev["state"].get("scalars", [])), "arrays": attr("name", dev["state"].get("arrays", [])) }, "pins": { "input": attr("name", dev["input_pins"]), "output": attr("name", dev["output_pins"]) } } for dev in graph_type["device_types"] } pp(devices)
def init_data_for_test(file): if file is None: return False data = read_file(file) record_list = [] user_ids = [] for line in data: detail = str(line).split('-') if len(detail) == 4: record = action_log(user_id=str(detail[0]), action_id=str(detail[1]), item_id=str(detail[2]), time_line=str(detail[3]), uuid=None) record_list.append(record) user_ids.append(str(detail[0])) user_list = list(set(user_ids)) for user_id in user_list: redis_util.delete_action_log(user_id, None, 'user_itemall') redis_util.delete_action_log(user_id, None, None) cassandra_util.insert_action_record(record_list) return True
def remove_negative_diffs(house_or_times, channel_or_data, path=PATH, input_ftype=INPUT_FTYPE, output_ftype=OUTPUT_FTYPE): """ Removes negative time differences within the data """ if input_ftype: house, channel = house_or_times, channel_or_data times, data = read_file(path + f"/house_{house}/channel_{channel}.{input_ftype}", input_ftype) else: house = channel = "undefined" times, data = house_or_times, channel_or_data sortinds = times.argsort() new_times = times[sortinds] new_data = data [sortinds] if output_ftype: write_file(path + f"/house_{house}/channel_{channel}.{output_ftype}", new_times, new_data, output_ftype) else: return new_times, new_data
def incoming_email(request): logging.info(request.POST) filename= request.POST['filename'] logging.info("filename = %s"%(filename)) data = files.read_file(filename) logging.info('email fetch ok') email = EmailMessage(data) a_to = parseaddr(email.to)[1] a_from = parseaddr(email.sender)[1] logging.info('email.to=%s'%a_to) logging.info('email.sender=%s'%a_from) r = re.match(r'^import-email-(\d+)@',a_to) if r: logging.info('import email, id %s'%r.group(1)) process_incoming_email_template(r.group(1),data) return HttpResponse("ok - import email") return HttpResponse('ok - ign')
def test_article_crud(): # * CSV file Article 'Rattlesnakes, I hate snakes' # * Print Article list # * Add Article 'Kittens, Kittens are Fuzzy' # * Add author_id of 4 to Articles # * Print Articles showing Author names # * Select articles for Author 4 # * Lookup '4, Kittens' # * Change 'Kittens' body to 'Kittens are cute!' # * Remove Article # * Remove Author text = "line1\nline2" path = 'test.txt' write_file(path, text) t = read_file(path) print('text:' + text + '$') print('t:' + t + '$') assert (t == text) assert (t != text)
def read_ispe(filename): ''' read input file for ispe ''' # read lines lines = ff.read_file(filename) lines = fncs.clean_lines(lines, strip=True) # initialize data ispe_xy = [] VR, VP = None, None tension = 0.0 # find data in lines for line in lines: if line == "\n": continue label, val = line.split() val = float(val) if label.lower() == "tension": tension = val elif label.lower() == "reac": VR = val elif label.lower() == "prod": VP = val else: ispe_xy.append((label, val)) return ispe_xy, tension, VR, VP
def set_EXE(): global EXE txt = os.path.dirname(os.path.realpath(__file__)) + "/paths.txt" # Defined in this file if 'EXE' in globals(): return # Try to export it from bashrc elif "OrcaExe" in os.environ: # in .bashrc: export OrcaExe="$MYHOME/Software/orca_4_0_1_2/orca" EXE = os.environ["OrcaExe"] return # Export it from file elif os.path.exists(txt): lines = read_file(txt) lines = clean_lines(lines, "#", True) for line in lines: if line == "\n": continue name, path = line.split() if name == "orca": EXE = path return # Not found else: raise Exc.ExeNotDef(Exception)
'r+') #name of output file j = 0 for j in range(len(newl1)): print >> test1, newl1[j], newb1[j], newrgal1[j], newrsol1[j], newvgsr1[ j], newx1[j], newy1[j], newz1[j] test1.close() py.scatter(newl1, newb1, c='r') py.plot(l3, b3) py.plot(l2, b2) py.show() if __name__ == "__main__": data1 = fi.read_file("sidd.5.05e6.globular.2.02B.params", " ") #name of input nbody file data2 = fi.read_file("hermustest31.back.0.25B.params.small", ",") #names of input orbit files data3 = fi.read_file("hermustest31.forward.0.25B.params.small", ",") #for Pauls nbody data and orbit params l1 = data1[:, 10] b1 = data1[:, 11] x1 = data1[:, 0] y1 = data1[:, 1] z1 = data1[:, 2] rgal1 = data1[:, 6] rsol1 = data1[:, 8] vgsr1 = data1[:, 9]
leny = int(np.amax(y) - np.amin(y)) vdispmatrix = np.zeros((leny, lenx)) i = 0 j = 0 while i < lenx: while j < leny: counter = (180 * i) + j vdispmatrix[j][i] = z[counter] j = j + 1 i = i + 1 j = 0 data = vdispmatrix dx = np.arange(lmin, lmax, lrange / 10) dy = np.arange(bmin + 90, bmax + 90, brange / 10) pl.xticks(dx) pl.yticks(dy) pl.xlim(lmin, lmax) pl.ylim(bmin + 90, bmax + 90) pl.xlabel('l') pl.ylabel('b + 90') pl.pcolor(data, vmin=0, vmax=100) pl.colorbar() pl.show() if __name__ == "__main__": data1 = fi.read_file(str(filename)) x = np.asarray(data1[:, 1]) y = np.asarray(data1[:, 2]) z = np.asarray(data1[:, 0]) vdispplot(x, y, z)
import os import files as fi def get_devs(b, v, r): N = len(b) bsq = np.power(b, 2) bquot = np.divide(bsq, N) bsum = np.sum(bquot) bd = np.sqrt(bsum) print str("b error = " + str(bd)) vsq = np.power(v, 2) vquot = np.divide(vsq, N) vsum = np.sum(vquot) vd = np.sqrt(vsum) print str("v error = " + str(vd)) rsq = np.power(r, 2) rquot = np.divide(rsq, N) rsum = np.sum(rquot) rd = np.sqrt(rsum) print str("r error = " + str(rd)) if __name__ == "__main__": data1 = fi.read_file("bdev") data2 = fi.read_file("vdev") data3 = fi.read_file("rdev") b = data1[:] v = data2[:] r = data3[:] get_devs(b, v, r)
relatorio = [] d = os.getcwd() d1 = os.path.join(d, "compostos") nome_do_arquivo_1 = input( "Insira nome do arquivo do grafo 1(incluindo o .txt): ") fname1 = os.path.join(d1, nome_do_arquivo_1) relatorio.append("RELATÓRIO") relatorio.append("Arquivo do composto 1: ") relatorio.append(fname1) graph1 = Graph() file = file_to_list(read_file(fname1).split('\n')) graph1.add_edges(file[0]) print("\nLegenda dos grafos:") print("Vértice: [Lista de arestas]\n") print("GRAFO 1") relatorio.append(' ') relatorio.append("Legenda dos grafos:") relatorio.append("Vértice: [Lista de arestas]") relatorio.append("GRAFO 1") for key, value in graph1.nodes_edges.items(): print("{}: {}".format(key, value)) string1 = "{}: {}".format(key, value) relatorio.append(string1)
import scipy as sp import numpy as np import math as ma import matplotlib.pyplot as py import files as fi #filename = raw_input("Enter filename: ") #lcol = int(raw_input("Enter l column: ")) #bcol = int(raw_input("Enter b column: ")) #lmin = float(raw_input("Min l value: ")) #lmax = float(raw_input("Max l value: ")) #bmin = float(raw_input("Min b value: ")) #bmax = float(raw_input("Max b value: ")) def orbit_polynomial(l, b): l1 = l[0, :] b1 = b[0, :] #print l #print b orbfit = np.polyfit(l1, b1, 3) print orbfit if __name__ == "__main__": data1 = fi.read_file("hermus_ra_dec_l_b", ",") l = np.array([data1[:, 2]]) b = np.array([data1[:, 3]]) orbit_polynomial(l, b)
import matplotlib.pyplot as plt import pylab as py import files as fi data = fi.read_file("50_orbits_vary_data.csv", ",") data1 = fi.read_file("hermustest64.forward.100M.csv.params", ",") data2 = fi.read_file("hermustest64.back.100M.csv.params", ",") x = data[:, 0] y = data[:, 1] z = data[:, 2] r = data[:, 8] vgsr = data[:, 9] l = data[:, 10] i = 0 for i in range(len(l)): if l[i] > 180: l[i] = l[i] - 360.0 b = data[:, 11] xorbfor = data1[:, 0] yorbfor = data1[:, 1] zorbfor = data1[:, 2] rorbfor = data1[:, 8] vgsrorbfor = data1[:, 9] lorbfor = data1[:, 10] j = 0 for j in range(len(lorbfor)): if lorbfor[j] > 180: lorbfor[j] = lorbfor[j] - 360.0 borbfor = data1[:, 11]
'r+') #name of output file j = 0 for j in range(len(newl1)): print >> test1, newl1[j], newb1[j], newrgal1[j], newrsol1[j], newvgsr1[ j], newx1[j], newy1[j], newz1[j] test1.close() py.scatter(newl1, newb1, c='r') py.plot(l3, b3) py.plot(l2, b2) py.show() if __name__ == "__main__": data1 = fi.read_file("hermus.1e5.10pc.orb61.2.05B.nbody.params", ",") #name of input nbody file data2 = fi.read_file("hermustest61.forward.250M.params.small", ",") #names of input orbit files data3 = fi.read_file("hermustest61.back.250M.params.small", ",") #for Pauls nbody data and orbit params l1 = data1[:, 10] b1 = data1[:, 11] x1 = data1[:, 0] y1 = data1[:, 1] z1 = data1[:, 2] rgal1 = data1[:, 6] rsol1 = data1[:, 8] vgsr1 = data1[:, 9]
def get_headline(page): text = read_file(page) if len(text)>0: return convert_line(text[0]).replace('*','') return 'Shrinking World Guides'
def interpolate_channel(house_or_times, channel_or_data, threshold=THRESHOLD, path=PATH, input_ftype=INPUT_FTYPE, output_ftype=OUTPUT_FTYPE): """ Goes over all the data in a given channel, interpolates and splits it """ if input_ftype: house, channel = house_or_times, channel_or_data times, data = read_file(path + f"/house_{house}/channel_{channel}.{input_ftype}", input_ftype) else: house = channel = "undefined" times, data = house_or_times, channel_or_data times, data = remove_negative_diffs(times, data, input_ftype=None, output_ftype=None) # Calculate the differences between the consecutive timestamps diffs = np.diff(times) # Discard those with difference 1, and add the index in the original array l1 = np.array([diffs, np.arange(len(diffs))]).transpose()[(diffs - 1).astype(np.bool)] arrays = [] current_times = np.empty(times[-1] - times[0], dtype = np.int32) current_data = np.empty(times[-1] - times[0], dtype = np.float32) array_index = 0 last_i = -1 for diff, i in l1: if diff == 0: current_times[array_index : array_index + i - last_i] = times[last_i : i] current_data [array_index : array_index + i - last_i] = data [last_i : i] array_index += i - last_i last_i = i continue # Add the elements up to the current index to the current arrays current_times[array_index : array_index + i - last_i] = times[last_i + 1 : i + 1] current_data [array_index : array_index + i - last_i] = data [last_i + 1 : i + 1] array_index += i - last_i # If the time jump is less than the set threshold, interpolate between them if diff <= threshold: new_times, new_data = interpolate(data, times, diff, i) current_times[array_index : array_index + diff - 1] = new_times current_data [array_index : array_index + diff - 1] = new_data array_index += diff - 1 # Else, start new arrays and save the current ones else: current_times.resize((array_index,)) current_data .resize((array_index,)) arrays.append((np.copy(current_times), np.copy(current_data))) current_times = np.empty(times[-1] - times[i], dtype = np.int32) current_data = np.empty(times[-1] - times[i], dtype = np.float32) array_index = 0 last_i = i # Add the final elements to the current array to_go = len(times) - i - 1 current_times[array_index : array_index + to_go] = times[i + 1:] current_data [array_index : array_index + to_go] = data [i + 1:] array_index += to_go # Deallocate the unnecessarily allocated space current_times.resize((array_index,)) current_data .resize((array_index,)) # Save it as well arrays.append((current_times, current_data)) if output_ftype: # Write all of the data to new files for file_num, (times_array, data_array) in enumerate(arrays): # Make the channel_?_preprocessed directory if it doesn't exist if not os.path.exists(PATH + f"/house_{house}/channel_{channel}_preprocessed"): os.mkdir(PATH + f"/house_{house}/channel_{channel}_preprocessed") write_file(path + f"/house_{house}/channel_{channel}_preprocessed/file_{output_ftype}.npy", times_array, data_array, output_ftype) else: return arrays
def get_contents(filename): if not isfile(filename): return '' return text_to_html(read_file(filename)[1:])
def messages(xml_file): """Show message type information of an XML file.""" xml = read_file(xml_file) gtype = parse_poets_xml(xml)[0] messages = {msg["id"]: msg for msg in gtype["message_types"]} pp(messages)
def domain_map(): domains = {} for b in read_file(get_mybook_path('Domains')): (domain,directory) = b.split(' ') domains[domain] = directory return domains
def read(file): """Read file.""" return read_file(file)
redis_cl.srem("running", pid) @user_function def run(xml_input, rmap={}, rcon={}, verbose=False, async=False, level=1): """Start process.""" # Prepare Redis keys. pid = redis_cl.incr("process_counter") completed = "completed-%d" % pid process_key = get_process_key(pid) result_queue = "result-%d" % pid # Prepare Schema. xml = read_file(xml_input) if is_file(xml_input) else xml_input schema = Schema(xml, rmap) regions = schema.get_regions() # Prepare process and job information process = { "xml": xml, "pid": pid, "user": whoami(), "level": level, "nedges": len(schema.graph_inst["edges"]), "verbose": verbose, "ndevices": len(schema.graph_inst["devices"]), "nregions": len(regions), "completed": completed, "start_time": time.time(),