def __init__(self, in_str): """ Parses the output of dplace -qqq """ buf = StringIO.StringIO(in_str) lines = buf.readlines() lines = [l.rstrip() for l in lines] # Remove newlines, as we already linefied this lines = [' '.join(l.split()) for l in lines] # Remove whitespace i = 0 lastJob = None for l in lines: # Parse all possible line-types rjob = parse("0x{key} {nTasks:d} {owner} {pid:d} {cpu:d} {name}", l) rfork = parse("{pid} {cpu:d} {name}", l) if rjob != None: # Init with an empty list, if this is the first entry if not rjob["name"] in self.processes: self.processes[rjob["name"]] = [] self.processes[rjob["name"]].append({"nTasks":rjob["nTasks"], "owner":rjob["owner"], "pid":rjob["pid"], "cpu":rjob["cpu"], "name":rjob["name"]}) lastJob = rjob elif rfork != None and lastJob != None: # Forked process use an other format, but are listed below the parent self.processes[lastJob["name"]].append({"nTasks":lastJob["nTasks"], "owner":lastJob["owner"], "pid":rfork["pid"], "cpu":rfork["cpu"], "name":rfork["name"]})
def parseSshConfig(inFd): confDict = SafeDict() ansible_node = "unknown" for l in inFd: line = l.lstrip() res = parse("Host {hostId}", line) if res is not None: ansible_node = res['hostId'] confDict[ansible_node] = SafeDict() continue res = parse("HostName {ssh_host}", line) if res is not None: confDict[ansible_node]['ansible_ssh_host'] = res['ssh_host'] res = parse("Port {ssh_port}", line) if res is not None: confDict[ansible_node]['ansible_ssh_port'] = res['ssh_port'] continue res = parse("IdentityFile {pvt_key}", line) if res is not None: confDict[ansible_node]['ansible_ssh_private_key_file'] = res['pvt_key'] continue return confDict
def benchmark(n, pthreads, reps): with open('bm.csv', 'wb') as myfile: wr = csv.writer(myfile, quoting=csv.QUOTE_ALL) wr.writerow(titles) for i in range(0, reps): result = [n, pthreads] output = Popen(["../fib", str(n), str(pthreads)], stdout=PIPE).communicate()[0] buf = StringIO.StringIO(output) for line in buf: r = parse("called {called} times\n", line) if r is not None: result.append(r['called']) continue r = parse("forked {forked} times\n", line) if r is not None: result.append(r['forked']) continue r = parse("time elapsed: {elapsed1}\n", line) if r is not None: result.append(r['elapsed1']) continue r = parse("time elapsed after init: {elapsed2}\n", line) if r is not None: result.append(r['elapsed2']) continue print result wr.writerow(result)
def parse(self): items = [] with open(NFS_CONFIG) as f, open(TARGET_FILE, "w") as g: lines = tuple(f) i = 0 for line in lines: i += 1 params = line.split() if not params: continue folder = params[0] try: pattern = "{}/{}({})" r = parse(pattern, params[1]) (ip, netmask, flags) = r.fixed except Exception as exc: try: pattern = "{}({})" r = parse(pattern, params[1]) (ip, flags) = r.fixed netmask = '255.255.255.255' except Exception as exc2: print "Unable to parse %d line of nfs config" % i raise Exception(exc2.message) #print ip, netmask, flags g.write("('%s', '%s', '%s', '%s')\n" % (folder, ip, netmask, flags))
def loadExpr(): """Le uma expressao (entrada via teclado).""" expr = raw_input('Expressao: ') if len(expr) > 0 and expr[0] == '=': l = expr[1:].split(' ', 1) symb_dict[l[0]] = parse(l[1]) return None return parse(expr)
def _guess_property_from_scans(self, _property): self.logger.print_debug("Generating value for property '{:s}'.".format(_property)) vx_file = self.get_file() #********************************************************************** # Verifies if the given property is available in the idents, # i.e. the property is CLASS, OS, NAME, or VARIANT. #********************************************************************** if (_property in Virus.VirusIdentItems): #****************************************************************** # Checks if the default ident, i.e. an ident which usually contains # all the information we need, is present in the list of idents. # If so, use it to generate the value to the given property. #****************************************************************** if (vx_file.is_detected_by(Virus.DEFAULT_IDENT_FORMAT)): ident = vx_file.get_detection_by(Virus.DEFAULT_IDENT_FORMAT) self.logger.print_debug(INFO_DETECTED_BY.format(Virus.DEFAULT_IDENT_FORMAT, ident)) name_fmt = Virus.AvNameFormats[Virus.DEFAULT_IDENT_FORMAT] id_items = parse(name_fmt, ident) return id_items[_property] else: # # Otherwise, iterate thru all the available idents and parse them. # Retrieve the required property if available. Each time we # successfully retrieve the property, stored it in a dictionary # along the number of times it was seen. # At the end, consider the value most often observed in the idents, # if there is a draw, select the first one. # try: scoreboard = {} for (av, name_fmt) in Virus.AvNameFormats.iteritems(): if (vx_file.is_detected_by(av)): ident = vx_file.get_detection_by(av) id_items = parse(name_fmt, ident) if (id_items and _property in id_items.named and _property != None): property_value = id_items[_property] if (property_value in scoreboard): scoreboard[property_value] += 1 else: scoreboard[property_value] = 1 if (len(scoreboard) > 0): max_value = max(scoreboard.values()) best_value = [prop for prop,val in scoreboard.items() if val == max_value] self.logger.print_debug(INFO_BEST_GUESS.format(_property, max_value, best_value[0])) return best_value[0] else: raise Exception(ERR_NO_DETECTION) except Exception as e: print(e.message) return Virus.UNKNOWN else: raise Exception(ERR_UNKNOWN_PROPERTY)
def __init__(self, eid, line, pyshell): temp = parse("{}:{}", line) Entry.__init__(self, eid, temp[0] + ":", pyshell) self.style = "If" self.open = True self.loopCounter = 1 x = parse("if{}:{}", line) self.makeBlock(temp[1]) x = string.strip(x[0]) + "= " + str(bool(x[0])) self.conditional = Var_Entry(eid, x, pyshell) self.report = temp[0] + ":\n" self.lastSub = ""
def __init__(self, eid, line, pyshell): temp = parse("{}:{}", line) Entry.__init__(self, eid, temp[0] + ":", pyshell) self.style = "For" self.open = True self.loopCounter = 1 x = parse("for {} in {}:{}", line) self.makeBlock(temp[1]) x = string.strip(x[0]) + "= NONE" self.iterator = Var_Entry(eid, x, pyshell) self.report = temp[0] + ":\n" self.lastSub = ""
def scraping(url, log_display = True): Law = {} result = {} log("start scraping") r = requests.get(url) log("fetched data from "+url, log_display) r.encoding = r.apparent_encoding log("changed encoding", log_display) res = BeautifulSoup(r.text,"html.parser") law_name = res.b.text.replace('\n','') if "(" in law_name: titles = parse("{}({})",law_name) if titles: law_name = titles[0] Law["number"] = titles[1] from datetime import datetime Law["date"] = datetime.now().strftime("%Y/%m/%d %H:%M:%S") Law["url"] = url Law["name"] = law_name log("Law Name:"+law_name, log_display) parent = "" for i in res.find_all("div",class_="item"): name = i.b tmp = i.text.strip().split("\n\n\n\u3000") if len(tmp) >= 2: content = "".join(tmp[1:]) else: content = tmp[0] if name.a and parent != "": result[parent].append_sub(name.a.string, Provision(content)) else: if name.string and name.string.isdigit(): name.string = "" + name.string result[name.string] = Provision(content, "附則") elif name.string: print(name.string) tmps = parse("第{}条",name.string) if tmps: print(tmps) result[kansuji2arabic(tmp[0])] = Provision(content, name.string, "条文") parent = kansuji2arabic(tmp[0]) else: result[name.string] = Provision(content, name.string, "条文") parent = name.string log("converted json") import json result = dict([(k, json.loads(v.json())) for k,v in result.items()]) Law["provision"] = result return law_name, Law
def info(doc_name): id_row = 3 id_col = 10 from_row = 2 from_col = 6 to_row = 3 to_col = 6 sheet = doc_open(doc_name) esewa_id = str(parse(sheet, id_row, id_col)) from_date = str(parse(sheet, from_row, from_col)) to_date = str(parse(sheet, to_row, to_col)) db.session.add(Info(esewa_id, from_date, to_date)) db.session.commit()
def getsettings(self): self.settings.sens=lockin.get_sens() self.settings.tau=lockin.get_tau() self.settings.slope=lockin.get_slope() self.settings.sync=lockin.get_sync() self.settings.input=lockin.get_input() self.settings.couple=lockin.get_couple() self.settings.ground=lockin.get_ground() self.settings.filter=lockin.get_filter() self.settings.reserve=lockin.get_reserve() tempdisprat=lockin.get_disp_rat(1) self.settings.ch1disp=tempdisprat.split(',')[0] self.settings.ch1ratio=tempdisprat.split(',')[1] tempexpoff=lockin.get_exp_off(1) self.settings.ch1expand=tempexpoff.split(',')[0] self.settings.ch1offset=tempexpoff.split(',')[1] tempdisprat=lockin.get_disp_rat(2) self.settings.ch2disp=tempdisprat.split(',')[0] self.settings.ch2ratio=tempdisprat.split(',')[1] tempexpoff=lockin.get_exp_off(2) self.settings.ch2expand=tempexpoff.split(',')[0] self.settings.ch2offset=tempexpoff.split(',')[1] self.settings.trigsource=lockin.get_trigsource() self.settings.trigshape=lockin.get_trigshape() #set GUI to match settings senstext=self.settings.sensset.get(int(self.settings.sens)) sensval,sensunit=parse('{:d}{}',senstext) self.sensunit.setCurrentIndex(self.sensunit.findText(sensunit)) self.sensval.setCurrentIndex(self.sensval.findText(str(sensval))) tautext=self.settings.tauset.get(int(self.settings.tau)) tauval,tauunit=parse('{:d}{}',tautext) self.tauunit.setCurrentIndex(self.tauunit.findText(tauunit)) self.tauval.setCurrentIndex(self.tauval.findText(str(tauval))) self.slopeval.setCurrentIndex(int(self.settings.slope)) self.synccheck.setCheckState(int(self.settings.sync)*2) self.input.setCurrentIndex(int(self.settings.input)) self.coupling.setCurrentIndex(int(self.settings.couple)) self.ground.setCurrentIndex(int(self.settings.ground)) self.filterset.setCurrentIndex(int(self.settings.filter)) self.reserveset.setCurrentIndex(int(self.settings.reserve)) self.ch1_disp.setCurrentIndex(int(self.settings.ch1disp)) self.ch1_ratio.setCurrentIndex(int(self.settings.ch1ratio)) self.ch1_offset.setCurrentIndex(int(self.settings.ch1offset)) self.ch1_expand.setCurrentIndex(int(self.settings.ch1expand)) self.ch2_disp.setCurrentIndex(int(self.settings.ch2disp)) self.ch2_ratio.setCurrentIndex(int(self.settings.ch2ratio)) self.ch2_offset.setCurrentIndex(int(self.settings.ch2offset)) self.ch2_expand.setCurrentIndex(int(self.settings.ch2expand)) self.trigshape.setCurrentIndex(int(self.settings.trigshape)) self.trigsource.setCurrentIndex(int(self.settings.trigsource))
def main(*args, **kwargs): default_file = "single_function.x" default_file = "simple_0.x" filepath = os.path.dirname(os.path.realpath(__file__)) argument_parser = argparse.ArgumentParser() argument_parser.add_argument("--input", "-i", default = pjoin(filepath, "../tests/lang/" + default_file)) args = argument_parser.parse_args() data = open(args.input).read() try: program = parse(data) except ParseError as e: print(e) program.make_tables() try: program.sema() except ast.SemaError as e: print(e) print(e.ast.end_token.highlight(5, 5)) raise except KeyError as e: print(e) #print(e.ast.start_token.highlight(5, 5)) print(e.ast.end_token.highlight(5, 5)) raise t = program.make_tac(tac.TacState()) for x in t: if isinstance(x, (tac.Label, tac.StartFunc, tac.EndFunc)): print(x) else: print("\t{}".format(x)) if isinstance(x, tac.EndFunc): print("") graph = program.output_graph("out.png")
def get_line_details(station_id, line_id): """ Findet heraus, welche Stationen eine Linie anfährt """ url = "http://www.kvb-koeln.de/german/hst/showline/%d/%d/" % ( station_id, line_id) r = requests.get(url, headers=HEADERS) soup = BeautifulSoup(r.text) details = { "station_id": station_id, "line_id": line_id, "stations_forward": [], "stations_reverse": [] } station_key = "stations_forward" for td in soup.find_all("td", class_=re.compile(".*station")): tdclass = td.get("class")[0] a = td.find("a") if a is None: continue href = a.get("href") if href is None: continue result = parse( URL_TEMPLATES["station_details"], href) if result is None: continue details[station_key].append(int(result["station_id"])) if tdclass == u'btstation': station_key = "stations_reverse" return details
def defExprMacro(inp,exprMacroDict): """ Define a macro expression and put it in the macro provided expression dict """ if inp.count('=') != 1: return 'Too many \'=\' in expression macro definition' #Split to get the definition and the expression separately macroDef,macroExp = inp.split('=') #USe the parser functions (parse.py) to find the name and the arguments macroName = funcname(macroDef) macroArgs = funcargs(macroDef) for arg in macroArgs: if len(arg) == 0: return "Error: Argument name cannot be empty" if not arg.isalnum(): return "Error: Only alphanumericals may be used in argument names" if arg.isdigit(): return "Error: Numbers may not be used as argument names" #If the name is valid, parse the macro expression parsedMacro = parse(macroExp) if 'Error:' in parsedMacro: return parsedMacro #If is parses ok, make a macro (expr.py) and add it to the provided macro dict em = ExprMacro(parsedMacro,macroArgs) exprMacroDict[macroName] = em return inp
def push(self, message): if message == "enter": print "Start calibration" self.world3DPoints = np.array([], dtype=np.float32).reshape(0,3) for key in self.cameras: if isinstance(self.cameras[key], Camera): self.cameras[key].enterCalibrationMode() self.server.send_message(self.client, "Camerea detected : " + self.cameras[key].macadress) self.server.send_message(self.client, "Calibration Started") elif message == "exit": print "End calibration, calculates..." self.server.send_message(self.client, "Calibration Finished, calculating matrix...") for key in self.cameras: if isinstance(self.cameras[key], Camera): camPosition = self.cameras[key].exitCalibrationMode(self.world3DPoints) self.server.send_message(self.client, str(camPosition)) else: extracted_data = parse("calib:{}-{}-{}", message) if len(extracted_data.fixed) == 3: self.server.send_message(self.client, "New calibration point received") xyz = [int(extracted_data.fixed[0]), int(extracted_data.fixed[1]), int(extracted_data.fixed[2])] self.world3DPoints = np.append(self.world3DPoints, [xyz], axis=0) for key in self.cameras: if isinstance(self.cameras[key], Camera): xyposition = self.cameras[key].saveCalibrationPoint2D() self.server.send_message(self.client, "Camera " + str(key) + " 2D position : " + str(xyposition))
def create_predictions(tree, predict): ''' Given a tree and a url to a data_set. Create a csv with a prediction for each result using the classify method in node class. ''' # first calculate the predict list predict_set, _ = parse(predict, True) predict_set = changemissing(predict_set) predictedClasslist = [] for x in predict_set: predictedClass = tree.classify(x) predictedClasslist.append(predictedClass) # next output the file to csv with open(predict,'r') as csvinput: with open('./output/PS2.csv', 'wb') as csvoutput: writer = csv.writer(csvoutput) reader = csv.reader(csvinput) i = 0 size = len(predictedClasslist) for row in reader: if(i!=0 and i<=size): row[len(row)-1] = predictedClasslist[i-1] writer.writerow(row) i+=1 csvoutput.close() csvinput.close()
def load_lta(lta): with file(lta,'r') as f: lines = f.readlines() lines = [l.partition('#')[0].rstrip()+'\n' for l in lines] #strip comments lines = [l for l in lines if l != '\n'] #remove empty lines header = parse(_lta_template_header, ''.join(lines[:4])) if header['type'] != 1 or header['nxforms'] != 1: return None #don't know how to handle this case xform = parse(_lta_template_xform, ''.join(lines[4:9])) arr = array(xform.fixed).reshape((4,4)) return arr
def get_markers(locations): """ Get the food pantry options in proper format :param locations: information about the food pantries :return: formatted marker locations """ markers = [] for result in locations["results"]: latitude = result["geometry"]["location"]["lat"] longitude = result["geometry"]["location"]["lng"] address = result["formatted_address"] address_data = {} address_data["street"], address_data["city"], address_data["state"], address_data["zip_code"], \ address_data["country"] = parse("{}, {}, {} {}, {}", address) marker_addresses[result["name"]] = address_data markers.append( {'lat': latitude, 'lng': longitude, 'infobox': "<h2>" + result["name"] + "</h2><p>" + address + "</p>"} ) markers.append( {'icon': 'http://maps.google.com/mapfiles/ms/icons/blue-dot.png', 'lat': current_user.latitude, 'lng': current_user.longitude, 'infobox': "Current Location"} ) return markers
def getFrame(self): frame_state = self.connection.recv(1000) # print frame_state frame_state = frame_state.strip() frame_dict = {} if frame_state != "": b = parse("{:d} {}", frame_state) if b is not None: frame_state = b[1] for i in range(b[0]): oneFiducial = parse("{id:d} {x:d} {y:d} {angle:f}{rest}", frame_state) res = transform(oneFiducial) frame_dict[oneFiducial["id"]] = {"x": res["x"], "y": res["y"], "angle": res["angle"]} frame_state = oneFiducial["rest"] print frame_dict return frame_dict
def get_stations(): """ Ruft Liste aller Stationen ab und gibt Dict mit ID als Schlüssel und Name als Wert aus. """ url = "https://www.kvb.koeln/haltestellen/overview/" r = requests.get(url, headers=HEADERS) soup = BeautifulSoup(r.text) #print(soup.prettify()) mystations = [] for a in soup.find_all("a"): #print(a, a.get("href"), a.text) href = a.get("href") if href is None: continue result = parse( URL_TEMPLATES["station_details"], href) if result is None: continue mystations.append({ "id": int(result["station_id"]), "name": a.text }) # sort by id mystations = sorted(mystations, key=lambda k: k['id']) station_dict = {} for s in mystations: station_dict[s["id"]] = s["name"] return station_dict
def getDataNK(yamlFile,lamb): yamlStream=open(yamlFile,'r') allData=yaml.load(yamlStream); materialData=allData["DATA"][0] assert materialData["type"]=="tabulated nk" matLambda=[] matN=[] matK=[] #in this type of material read data line by line for line in materialData["data"].split('\n'): parsed=parse("{l:g} {n:g} {k:g}",line) try: n=parsed["n"]+1j*parsed["k"] matLambda.append(parsed["l"]); matN.append(parsed["n"]) matK.append(parsed["k"]) except TypeError as e: sys.stderr.write("TypeError occured:"+str(e)+"\n") matLambda=np.array(matLambda) matN=np.array(matN) matK=np.array(matK) interN=interp1d(matLambda,matN) interK=interp1d(matLambda,matK) return [ x for x in (interN(lamb)+1j*interK(lamb)) ]
def get_station_details(station_id): """ Liest Details zu einer Station. """ url = "https://www.kvb.koeln/haltestellen/overview/%d/" % station_id r = requests.get(url, headers=HEADERS) soup = BeautifulSoup(r.text) details = { "station_id": station_id, "name": stations[station_id], "line_ids": set() } div = soup.find("ul", class_="info-list") for a in div.find_all("a"): href = a.get("href") if href is None: continue result = parse( URL_TEMPLATES["line_details"], href) if result is None: continue details["line_ids"].add(result["line_id"]) details["line_ids"] = sorted(list(details["line_ids"])) return details
def main(): # init graphe and infos (NB_INTERSECTIONS, NB_STREETS, NB_SECS, NB_CARS, INIT_INTER, lNode, lEdge) = parse() G = CreateGraph(lNode, lEdge) # init des voitures cars = [] for i in range(NB_CARS): car = Car(G, INIT_INTER) # find_a_way(car, G) cars.append(car) # algo # C = Car(); # nx.draw(G) # plt.show() # print(visitStreet(G, 1903, 9877)) for car in cars: while (1): nextMove = car.getNextMove(G) if (nextMove == None): break; printOutput(NB_CARS, cars)
def __init__(self, in_str=""): """ Parses the output of dlook <PID> """ if in_str == "": return buf = StringIO.StringIO(in_str) lines = buf.readlines() lines = [l.rstrip() for l in lines] # Remove newlines, as we already linefied this lines = [' '.join(l.split()) for l in lines] # Remove whitespace nodeSet = {} i = 0 for l in lines: # Parse all possible line-types rpages = parse("[{memStart}-{memEnd}] {pages:d} {huge}pages on node {node:d} {flags}", l) if rpages != None: # Read memory range as Base-16 memStart = int(rpages["memStart"], 16) memEnd = int(rpages["memEnd"], 16) memPages = memEnd - memStart if not rpages["node"] in nodeSet: nodeSet[rpages["node"]] = 0 # Accumulate self.totalMemoryUsed += memPages nodeSet[rpages["node"]] += memPages self.nodesAllocatedOn = nodeSet.items()
def test_end_to_end(): with open(DAT_FILE, 'r') as f: data = parse(f) conn = sqlize(data, ':memory:') c = conn.cursor() for row in c.execute('select * from enzymes where id=?', ('1.1.1.2',)): assert desqlize_row(row) == test_item_1_1_1_2
def replace(txt): txt = txt.replace("\n", "") r = parse("{number}\u3000{text}", txt) if r and 'text' in r: txt = r['text'] r = parse("{number} {text}", txt) if r and 'text' in r: txt = r['text'] txt = txt.replace("\u3000", "") txt = txt.replace(" ", "") txt = txt.replace("○", "") return txt
def parse(self, data): extracted_data = parse("{}x{}y{}h{}w{}a{}", data) if len(extracted_data) == 6: new_point = {'x': extracted_data[1], 'y': extracted_data[2], 'height': extracted_data[3], 'width': extracted_data[4]}; return new_point else : return None
def verbal(): topics = parse("verbal") # g.db = connect_db() # cur = g.db.execute('select * from quantpractice') # practice = [dict(topic = row[0], date = row[1], correct = row[2], total = row[3]) for row in cur.fetchall()] # g.db.close() return render_template("verbal.html", topics=topics)
def encode(content): global _tagCnt global _bin global _bin_ext _tagCnt = 0 _globalIndex = 0 r = parse(content) encodeItem(r)
def quant(): topics = parse("quant") g.db = connect_db() cur = g.db.execute("select * from quantpractice") practice = [dict(topic=row[0], date=row[1], correct=row[2], total=row[3]) for row in cur.fetchall()] g.db.close() return render_template("quant.html", topics=topics, practice=practice)
def _getValByRefDes(lines, refDes): #Use reversed() to read main schematic from #.end and avoid included models for line in reversed(lines): if line == "*============== Begin SPICE netlist of main design ============": break split = parse("%s {} {} {}" % (refDes), line) if split != None: return split[2]
def validate(fa, fb): a = parse(fa) if a is None: return False, f'Failed to parse {fa!r}' b = parse(fb) if b is None: return False, f'Failed to parse {fb!r}' bad = [] if not math.isclose(a[0], b[0], rel_tol=1e-8): bad.append('min-dist') if not abs(a[1] - b[1]) <= 1: bad.append('hit-time-step') if a[2] != b[2]: bad.append('gravity-device-id') if not math.isclose(a[2], b[2], rel_tol=1e-8): bad.append('missile-cost') if bad: return False, ','.join(bad) return True, 'ok'
def functionParse(line): vars = parse("{:w}({}):{}", line) name = vars.fixed[0] typename = vars.fixed[2] return tp.Token( typename + " " + name + "(" + arglist.arglistParse(vars.fixed[1]).string + "){", "funcwa")
def getPHPversion(): # returns the version of PHP, if installed phpPath = '/usr/bin/php' if os.path.isfile(phpPath): # PHP is installed s = subprocess.check_output([phpPath, "-v", "2>/dev/null"]) # returns something like: PHP 5.6.27-0+deb8u1 (cli) (built: Oct 24 2016 18:22:27) s = parse( "PHP {:S}{}", s)[0] else: s = "Not installed" return s
def test(sentence): for i in command_format(): #trying to pull arguments from string passed in ret = parse(i, sentence) if ret is not None: vals = ret break if vals is not None: print(vals[0]) download_song(vals[0])
def get_gen_query_time(logfile, training_size): '''return time_gen_train_query, time_gen_train_label''' t_tr_query = [[], []] t_tr_label = [[], []] # time_update_model = [[],[]] with open(logfile, 'r') as log_f: lines = log_f.readlines() for line in lines: line = line.strip() # parse time for training query update s_tr_query = parse( "[{time} INFO] lecarb.workload.gen_workload: Start generate workload with {train_num:d} queries for train...", line) e_tr_query = parse( "[{time} INFO] lecarb.workload.gen_workload: Start generate workload with {test_num:d} queries for valid...", line) if s_tr_query and s_tr_query['train_num'] == training_size: t_tr_query[0].append(dt.strptime(s_tr_query['time'], TIME_FMT)) if e_tr_query: t_tr_query[1].append(dt.strptime(e_tr_query['time'], TIME_FMT)) # parse time for training label update s_tr_label = parse( "[{time} INFO] lecarb.workload.gen_label: Updating ground truth labels for the workload, with sample size {}...", line) e_tr_label = parse( "[{time} INFO] lecarb.workload.gen_label: Dump labels to disk...", line) if s_tr_label: t_tr_label[0].append(dt.strptime(s_tr_label['time'], TIME_FMT)) if e_tr_label: t_tr_label[1].append(dt.strptime(e_tr_label['time'], TIME_FMT)) # print(t_tr_query, t_tr_label) time_gen_tr_query = 0 time_gen_tr_label = 0 if len(t_tr_query[0]) >= 1: time_gen_tr_query = (t_tr_query[1][0] - t_tr_query[0][0]).total_seconds() if len(t_tr_label[0]) >= 1: time_gen_tr_label = (t_tr_label[1][0] - t_tr_label[0][0]).total_seconds() return time_gen_tr_query, time_gen_tr_label
def inlineIfStatementParse(string): result = parse("if {}-> {}", string) boolean_value = result.fixed[0] inline_if_token = tp.parseToken(result.fixed[1]) return tp.Token('if(' + boolean_value + ') ' + inline_if_token, 'inlineifstatement')
def root(): parse() print(request.form) seasonlist = None yearlist = None season = None year = None if request.form: if request.form['season'] != '': season = int(request.form['season'][6:]) seasonlist = getSeason(season) if request.form['year'] != '': year = int(request.form['year']) yearlist = getYear(year) return render_template("homepage.html", season=season, year=year, seasonlist=seasonlist, yearlist=yearlist)
def functionParsenoArgs(line): vars = parse("{:w}({}):{}", line) name = vars.fixed[0] typename = vars.fixed[2] return typename + " " + name + "(" + arglist.arglistParse( vars.fixed[1]) + "){"
def handle_line(self, line): for key, value in self.formats.items(): pres = parse(value, line) if pres != None: retval = self.action(key, pres) if type(retval) is int: return retval if retval == True: return retval return False
def command_handler(sentence, info): msg = sentence + " is not a known command" function = None comms, classify = commands() for i in comms: #pulling parsing formats for j in i: res = parse(j, sentence) #try and parse using parse formats if res: msg, function = setTimer(res[0]) return msg, function
def get_graph(lines): G = nx.DiGraph() for line in lines: left, right = line.split("contain") upper_bag = parse("{color} bags", left.strip()) if right.strip() != "no other bags.": lower_bags = right.split(",") for lower in lower_bags: lower = re.sub('s?\.?$', '', lower) # dirty af regex hax for plurals lower_bag = parse("{num:d} {color} bag", lower.strip()) G.add_edge(upper_bag.named["color"], lower_bag.named["color"], weight=lower_bag.named["num"]) else: G.add_node(upper_bag.named["color"]) return G
def newPerson(): if request.method == 'POST': i = request.get_json() person = Person() person.name = i["name"] person.surname = i["surname"] person.mobile = i["mobile"] person.email = i["email"] person.city = i["city"] person.country = i["country"] person.username = i["username"] person.created = datetime.datetime.now() try: db.session.add(person) db.session.commit() except IntegrityError as e: dupe_field = parse( '(1062, "Duplicate entry \'{}\' for key \'{}\'")', str(e.orig)) if (dupe_field[1] == 'email'): return jsonify({"error": "Email already registered in system"}) elif (dupe_field[1] == 'username'): return jsonify({"error": "Username already taken"}) except Exception as f: cantConnet = parse( '({}, "Can\'t connect to MySQL server on \'localhost\' (10061)")', str(f.orig)) if (cantConnet): if (cantConnet[0] == '2003'): return jsonify( {"error": "Cannot connect to system database"}) except Exception as g: goneAway = parse('({}, "MySQL server has gone away")', str(g.orig)) if (goneAway): if (goneAway[0] == '2006'): return jsonify({"error": "System database off "}) except Exception as h: return jsonify({"error": str(h)}) return jsonify({"success": "successfully added person"})
def main(): x="x = x+5" c=parse("{}={}", x) print c #c[1]=2 y={'1':2, 'a':2} print y y['1']=3 y["v"]=1 print y print c[0],c[1]
def read_file(filename): flags = parse_flags(filename) # train_str = get_format(filename, FMT_TRAIN) # eval_str = get_format(filename, FMT_EVAL) dtrain, deval = [], [] with open(filename) as f: for line in f: line = line.strip() if is_train(line): dtrain.append(parse(FMT_TRAIN, line[line.find(START_TRAIN):].strip(), dict(Num=parse_float_number))) elif is_eval(line): deval.append(parse(FMT_EVAL, line[line.find(START_EVAL):].strip(), dict(Num=parse_float_number))) else: pass return dtrain, deval, flags
def getMac(): s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.connect(("8.8.8.8", 80)) ip_addr = s.getsockname()[0] s.close() ip_field = parse("{}.{}.{}.{}", ip_addr) ip_router = "{}.{}.{}.1".format(ip_field[0], ip_field[1], ip_field[2]) ip_mac = get_mac_address(ip=ip_router) return ip_mac
def process_raw_schedule(raw_schedule): """ raw_scheduleSchedule begins Aug 17, 2013 Start Finish Saturday 11:00AM 8:00PM Sunday 11:00AM 8:00PM Monday 1:00PM 10:00PM Tuesday 1:00PM 10:00PM Wednesday 00:00AM 00:00AM Thursday 00:00AM 00:00AM Friday 11:00AM 8:00PM """ #let's start builting the ical object cal = vobject.iCalendar() macro = "Schedule begins {} Start Finish Saturday {} Sunday {} Monday {} Tuesday {} Wednesday {} Thursday {} Friday {}" raw_schedule = raw_schedule.replace('RTO', '') parsed = parse(macro, " ".join(raw_schedule.split())) startdate = datetime.strptime(parsed.fixed[0], "%b %d, %Y") #not localizing for now #startdate = timezone('US/Pacific').localize(startdate) days = parsed.fixed[1:] schedule_list = [] stuff = [] for d in range(len(days)): daydelta = timedelta(days=d) mydate = startdate + daydelta stuff.append(days[d]) stime_string, etime_string = days[d].split(" ") if stime_string == "00:00AM": print "YOU ARE OFF" else: stime = datetime.strptime(stime_string, "%I:%M%p").time() etime = datetime.strptime(etime_string, "%I:%M%p").time() start_datetime = mydate.replace(hour=stime.hour, minute=stime.minute) end_datetime = mydate.replace(hour=etime.hour, minute=etime.minute) day_schedule = [start_datetime, end_datetime] schedule_list.append(day_schedule) v = cal.add("vevent") v.add('summary').value = days[d] start = v.add('dtstart') start.value = start_datetime end = v.add('dtend') v.add('location').value = "Apple Store Bay Street" end.value = end_datetime return cal.serialize()
def _dcs(self): """ Go through the metadata and find the dataset short names """ ref_dc = self._ref_dc() dcs = dict() for k in self.meta.keys(): parsed = parse(globals._ds_short_name_attr, k) if parsed is not None and len(list(parsed)) == 1: dc = list(parsed)[0] if dc != ref_dc: dcs[dc] = k return dcs, ref_dc
def parse_PRIVMSG(self): try: pattern = ":{NICK}!{NICK}@{NICK}.tmi.twitch.tv PRIVMSG #{CHAN} :{CONTENT}" parsed = parse(pattern, self.raw) PRIVMSG(bot=self.bot, NICK=parsed["NICK"], CHAN=parsed["CHAN"], CONTENT=parsed["CONTENT"]) return True except TypeError: return False
def parse_timer_window(pattern): """ Finds a timer window (timer:interval, timer:within, ...) in the pattern :param pattern: the pattern to search in :return: updated pattern """ index = pattern.find("timer") if index > -1: tokenized_timer = parse("timer:{timer[type]}({timer[value]}) ", pattern[index:]).named return tokenized_timer return pattern
def getMACaddresses( ): # get the ethernet hardware media access (MAC) addresses for each interface used res = [] output = subprocess.check_output(["/sbin/ifconfig"]) output = split( output, "\n") for line in output: line = parse("{:S}{:s}Link encap:{}HWaddr{}{:S}{}", line) # test for line with media access (MAC) address if line is not None: res.append ((line[0], line[4])) return res
def split_expr(exprs): # TODO fix with own parsing approach x = parse("({e1}) {e2}", exprs) if (x != None): tmp = [x['e1']] tmp.extend(split_expr(x['e2'])) return tmp x = parse("({e})", exprs) if (x != None): return [x['e']] x = exprs.split(' ', 1) if len(x) == 1: return [x[0]] else: tmp = [x[0]] tmp.extend(split_expr(x[1])) return tmp
def getIPaddresses( ): # get the IP addresses for each interface used res = [] s = subprocess.check_output(["/sbin/ifconfig"]) s = split( s, "\n\n") # one string per interface for line in s: item = parse("{:S}{}inet addr:{:S}{}", line) # test for line with media access (MAC) address if item is not None: res.append ((item[0], item[2])) return res
def find_selection(event_type): """ Finds a selection clause (from the std namespace) and updates the event type. :param event_type: the event type to inspect :return: updated event type """ try: tokenized_event_type = parse("{name}.std:{selection}", event_type).named return tokenized_event_type except AttributeError: return {"name": event_type}
def get_lw_nn_training_time(logfile): with open(logfile, 'r') as log_f: lines = log_f.readlines() for line in lines: line = line.strip() update_time = parse( "[{} INFO] lecarb.estimator.lw.lw_nn: Training finished! Time spent since start: {train_time:f} mins", line) if update_time: return update_time['train_time'] return 0
def read_samples(path, base_name, extension=".png"): pattern = re.compile(base_name + "_ay=[0-9]+_ax=[0-9]+\\" + extension) samples_paths = [entry for entry in listdir(path) if pattern.match(entry)] k = int(len(samples_paths) ** 0.5) # assume the correct number of samples in the folder sample_shape = imread(os.path.join(path, samples_paths[0]), flatten=True).shape samples = np.empty((k, k) + sample_shape) for sample_name in samples_paths: r = parse(base_name + "_ay={:d}_ax={:d}" + extension, sample_name) (ay, ax) = r.fixed samples[ay, ax] = imread(os.path.join(path, sample_name), flatten=True) return samples
def getHttpdVersion(): path = "/usr/bin/httpd" if os.path.isfile(path): # apache is installed s = subprocess.check_output([path, "-v", "2>/dev/null"]) # outputs something like: # Server version: Apache/2.4.18 (Unix) # Server built: Feb 20 2016 20:03:19 s = parse( "{}version: {:S}{}", s)[1] else: s = "Not installed" return s
def do_dir(in_dir, output_path, executable): print("{}>change dir to {}".format(os.path.basename(__file__), in_dir)) for item in os.listdir(in_dir): item = os.path.abspath(in_dir + "/" + item) if os.path.isdir(item): do_dir(item, output_path, executable) else: results = parse("APC_{}.bin", os.path.basename(item)) if results: do(item, output_path, executable)
def getLatexMain(directory): files = os.listdir('.') for f in files: if f.endswith('.latexmain'): result = parse('{}.{}.latexmain', f) main_file = result[0] return main_file return None
def repeat(second=1.0): global current, is_seoul_bus, route_name, routeID response = parse(is_seoul_bus, get_response(is_seoul_bus, routeID)) if current == {}: compare(route_name, response, current) current = response else: compare(route_name, response, current) current = response #print('Repeating...') threading.Timer(second, repeat, [second]).start()