def run(self): while True: data = self.server.recv(4096) if data: #print "[{}] <- {}".format(self.port, data[:100].encode('hex')) try: parser.parse(data, self.port, 'server') if len(parser.client_stash) > 0: OAO = parser.client_stash.pop() self.game.sendall(OAO) except Exception as e: print 'server[{}]'.format(self.port), e # forward to client self.game.sendall(data)
def run(self): while True: data = self.game.recv(4096) if data: #print "[{}] -> {}".format(self.port, data[:100].encode('hex')) try: # reload( parser ) parser.parse(data, self.port, 'client') if len(parser.server_stash) > 0: OAO = parser.server_stash.pop() self.server.sendall(OAO) except Exception as e: print 'client[{}]'.format(self.port), e # forward to server self.server.sendall(data)
async def memes(ctx, *, request): try: results = myparser.parse(request) except Exception as e: await ctx.send(f"Oh shit, i'm sorry, something is wrong: {e}") for i in range(5): await ctx.send(choice(results))
def load_file(file): f = open(file, 'r') f.readline() # skip header documents = [] for line in f: field = line.split('\t') documents.append((parse(field[2]), int(field[3]))) return documents
def get_snippets(person, aff=True): if aff == True: query = '{} {} email'.format(person['name'], person['simple_affiliation']) else: query = '{} email'.format(person['name']) gpage = crawler.search(query, usecache=True, useproxy=True, cache='../cache/{}.html'.format(person['id']), checkpage=True) snippets1 = myparser.parse(gpage) if len(person['name_zh']) > 0: if aff == True: query = '{} {} email'.format(person['name_zh'], person['simple_affiliation']) else: query = '{} email'.format(person['name_zh']) gpage = crawler.search(query, usecache=True, useproxy=True, cache='../cache/Chinese/{}.html'.format(person['id']), checkpage=True) snippets2 = myparser.parse(gpage) return snippets1+snippets2 else: return snippets1
def download_page(url, useproxy=True, verbose=True, maxtry=999, timeout=5, checkpage=True): def retry(): if verbose: print('[FAIL-{}] {} -> {}'.format(maxtry, proxy_ip, url)) return download_page(url, useproxy, verbose, maxtry - 1, timeout, checkpage) if maxtry <= 0: return None try: proxy = None proxy_ip = 'localhost' if useproxy: proxy_ip = Proxy.choose_proxy() proxy = { 'http': proxy_ip, 'https': proxy_ip, } header = { 'user-agent': 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)' } r = requests.get(url, proxies=proxy, headers=header) # time.sleep(10) content = r.text statuscode = r.status_code print('content:') print(content) if checkpage: try: snippets = myparser.parse(content) if not snippets and statuscode != 200: print('1') raise Exception except: print('2') raise Exception if verbose: print('[OK] {} -> {}'.format(proxy_ip, url)) if useproxy: Proxy.modify_proxy_info(proxy_ip, 1) return content except Exception as e: print('Exception:') print(e) if useproxy: Proxy.modify_proxy_info(proxy_ip, -1) return retry()
def comments(name): allprojects = parse() currproj = allprojects.getProject(name) form = CommentForm() commentObj = db.session.query(Comment).filter(Comment.project_name == name) if form.validate_on_submit(): c = Comment(user_name=form.username.data, content=censor(form.comment.data), project_name = name, timestamp = datetime.now()) db.session.add(c) db.session.commit() flash("Comment successfully posted") return redirect('/comments/'+ name) return render_template("comments.html", project = currproj, form = form, commentData = commentObj)
def search_query(self, raw_query, weighted=False): #query parsing and expanding query, qweight = [], {} if not weighted: query = parse(raw_query, self.config) if not query: self.print_not_found(raw_query) return #assign equal weight to all terms qweight = dict([ key_wt for key_wt in zip(query, [1.0 / len(query)] * len(query)) ]) else: raws = raw_query.split() weight = [] for i in range(0, len(raws), 2): weight.append(float(raws[i].strip())) query.append(raws[i + 1].strip()) if not query: self.print_not_found(raw_query) return qweight = dict([key_wt for key_wt in zip(query, weight)]) #initialize the search bucket buckets = {} count = 0 for key in query: posts = self.qp.search(key) if not posts: continue if count >= self.config['bucket_size']: break for doc_id, rtf_idf in posts.iteritems(): buckets[doc_id] = buckets.get( doc_id, 0.0) + qweight[key] * float(rtf_idf) count += 1 if not buckets: self.print_not_found(raw_query) return print "displaying top 10 results for '{0}':".format(' '.join(query)) count = 0 for doc_id, weight in sorted(buckets.iteritems(), key=lambda (k, v): (v, k), reverse=True): if count < self.config['output_size']: print "in '{0:s}' (id:{1:0>3d}) with weight {2:f}".format( self.qp.get_file_name(doc_id), doc_id, weight) count += 1 else: break
def select(stream, db=DATABASE): ''' Zwraca listę słów posortowanych według wartości funkcji użyteczności dla podanego strumienia znakowego. :param stream: strumień znakowy :param db: baza danych zawierająca informacje o słownictwie użytkownika :return: lista słów posortowanych według wartości funkcji użyteczności ''' words = myparser.parse(stream) temp_fl = freqlist.FreqList() temp_fl.load(stream=stream) fl = freqlist.DynMixedFreqList([(freqlist.FREQLIST, 0.5), (temp_fl, 0.5)]) utility_fun = get_utility_func(db=db, fl=fl) pr = get_pr_func(db=db) mean_word = [] for word in words: if DICT.correct(word) is not None and DATABASE.known_now(word) == False: mean_word.append((utility_fun(word), word, pr(word), )) mean_word.sort(reverse=True) # print(mean_word) return [word for (_, word, _) in mean_word]
def main(): with open('{}.txt'.format(sys.argv[1]), 'r') as file: prog = myparser.parse(file.read()) '''prog = myparser.parse( public class A { public static int factorial(int n){ int res = n; if (n > 1){ res *= factorial(n-1); } return res; } public static void main(string[] argc){ int n = factorial(5); Console.WriteLine(n); } } )''' #print(sys.argv[1]) print(*prog.tree, sep=os.linesep)
while True: query = input(" group1sql>> ") #get query if query == 'exit': #exits the db break elif query == 'clear': #clears the screen os.system('clear') elif query[-1] != ';': print(" ERROR: Invalid syntax") else: #parse.check returns the first invalid token (if any) error = lexer.check(query) if(error): #check if there are no invalid tokens print(" ERROR: Invalid token near", error) else: #print(' All tokens valid') tokens = myparser.parse(query) if(tokens): if(tokens[0].lower()=='select' or tokens[0].lower()=='update' or tokens[0].lower()=='delete'): if len(tokens) !=0: #the query is valid #print(" Syntax is valid") if(not('select' in tokens) and not('SELECT' in tokens)): error,message = myparser2.checkSemantics(tokens,MainHashTable,metaTB) if(error == False): evalExp.main(tokens,MainHashTable,metaTB) else: print(" ERROR: Invalid ",message," near", error) else: tokens = myparser2.checkSemantics(tokens,MainHashTable,metaTB) #print('PARSER tokens',tokens) #print(tokens) if(tokens):
import sys import myparser import itinerary import json rawData = sys.stdin.readlines() data = list(myparser.parse(rawData)) itineraries = [] def expandItineraries(validItineraries): nextItineraryWave = [] for itinerary in validItineraries: nextItineraryWave.extend(itinerary.validNextItineraries(data)) itineraries.extend(nextItineraryWave) if len(nextItineraryWave) > 0: expandItineraries(nextItineraryWave) expandItineraries(map(lambda d: itinerary.Itinerary([d]), data)) def segmentsByBagsAllowed(bags_allowed): return list(itinerary for itinerary in itineraries if itinerary.bags_allowed >= bags_allowed) def JSONByBagsAllowed(bags_allowed): return list(
f = open(filelist[0], "r") emu = f.readlines() f.close() f = open(filelist[2], "r") emuTH = f.readlines() f.close() Keywords = [ "threads", "sparsity", "compression time taken", "solution time taken" ] Case_sensitivity = [0, 0, 0, 0] Method = [1, 0, 0, 0] Splitting = [0, 2, 3, 3] t_x86 = mpr.parse(x86, Keywords, Case_sensitivity, Method, Splitting) t_x86TH = mpr.parse(x86TH, Keywords, Case_sensitivity, Method, Splitting) t_emu = mpr.parse(emu, Keywords, Case_sensitivity, Method, Splitting) t_emuTH = mpr.parse(emuTH, Keywords, Case_sensitivity, Method, Splitting) if (type(t_emu) == int): print(error[t_emu - 1]) #plt = mp.plot(np.array((x86[:,0]),dtype=int), #X data # np.array([emuTH[:,2],x86TH[:,2]*np.ones((x86.shape[0],))]), #Y data # [0,0], #Plot Type # ["blue","orange"], #Colors # ["Threads","Clock Cycles","EMU vs x86 1024","EMU","x86"], #Names # [1,1]) #Labels plt = mpl.plot(
# Read command line if len(sys.argv) >= 3: tsv_file = sys.argv[1] txt_file = sys.argv[2] if len(sys.argv) == 4: test = sys.argv[3] == 'test' with open(tsv_file, 'r') as tsvin, open(csv_sam, 'r') as csvsam, open(txt_file, 'w') as txtout: tsvin.readline() # Skip header tsvin = csv.reader(tsvin, delimiter='\t') csvsam = csv.reader(csvsam, delimiter=',') for row, row2 in zip(tsvin, csvsam): to_write = '' phrase = parse(row[2]) # clean and store the phrase # Write label and tag to_write += str(int(row[3]) + 1) if test == False else '1' to_write += " '" + row[0] # Parse to_write += ' |phrase ' to_write += phrase # Add features # I wrote one inline but we can of course make call to other scripts here to_write += ' |count word_count:' to_write += str(1 + phrase.count(' ')) to_write += ' |i ' for k in range(0, 24):
def project(name): allprojects = parse() currproj = allprojects.getProject(name) return render_template("project.html", project = currproj)
def index(): allprojects = parse() currproj = allprojects.getProject("HW0") return render_template("index.html", title = 'Home', project = currproj)