def nagiosMaxConns(blob, warn, crit): results = [] code = 0 for upstream in blob["upstreams"]: for instance in blob["upstreams"][upstream]["peers"]: try: pct = (int(instance["active"]) * 100) / int( instance["max_conns"]) if pct > crit: code = 2 results.append(" ".join([ lookup(instance["server"], env), #gets the hostname upstream, instance["active"], ])) elif pct > warn and code != 2: code = 1 results.append(" ".join([ lookup(instance["server"], env), #gets the hostname upstream, instance["active"], ])) except: pass return results, code
def handle_lookup(nodes_path, anew_path, sn_path, pred_path, rels_path, edges_path): nodes = _load(nodes_path) anew = _load(anew_path, atof) sn = _load(sn_path, atof) pred = _load(pred_path, atof) rels = _load(rels_path) edges = _load_edges(edges_path) lookup(nodes, anew, sn, pred, rels, edges)
def main(argv): if len(argv) < 1: usage() if len(argv) == 2: spelling = argv[1] lookup.lookup(spelling) elif len(argv) == 1: lookup.interact()
def invoke(self, args, from_tty): # print node in linked list global linked_list_printer linked_list_printer = None argv = args.split() if len(argv)!=1: self.usage() return if lookup.lookup_state == False: print ("Pretty-struct system is currently deactivated.\n" "To activate invoke \"ps on\"") return if settings.link_list == False: print ("Service \"link-list pretty-printing\" is currently " "deactivated.\n" "To activate invoke \"ps link-list on\"") return val = gdb.parse_and_eval(argv[0]) printer = lookup.lookup(val,False) if printer is None: print ("No linked-list pretty-printer class is registered for " "type name of variable under given symbol!") return if not isinstance(printer,LinkedList): print ("Variable under given symbol doesn't represent node of " "linked-list!") return linked_list_printer = printer lookup.register_onetime_printer(val,printer) print str(val) # invoke printer
def do_POST(self): if None != re.search('/api/v1/lookup', self.path): ctype, pdict = cgi.parse_header( self.headers.getheader('content-type')) if ctype == 'application/json': length = int(self.headers.getheader('content-length')) dataJSON = cgi.parse_qs(self.rfile.read(length), keep_blank_values=1) latlonvalues = json.loads(dataJSON.keys()[0]) stationcodes = lookup.findstations(latlonvalues) optimalstations = lookup.lookup(stationcodes) if not optimalstations: send_error(400, 'Bad Request: no input stations') return optimallatlongs = lookup.reverselookup(optimalstations) self.send_response(200) self.send_header('Content-Type', 'application/json') self.end_headers() self.wfile.write(json.dumps(optimallatlongs)) else: send_error(400, 'Bad Request: record does not exist') else: send_error(403, "")
def main_loop(self): while True: args = input(">>> ").lower().split() if len(args) == 0: print("Invalid argument(s), type websoc for usage.") continue if args[0] == 'websoc': if len(args) > 3: print("Invalid argument(s), type websoc for usage.") elif len(args) == 1: print("Usage: websoc [department] [course number]") else: args[1] = lookup(args[1]) if len(args) == 2: self.send_request(args[1], '') elif len(args) == 3: self.send_request(args[1], args[2]) self.resolve_text(args[1]) self.display_courses(args) elif args[0] == 'quit': break else: print("Invalid argument(s), type websoc for usage.")
def __init__(self, source_list=["mmdb"]): self.source_list = source_list self.lookup = None self.qqwry = None self.reader = None self.database = None #bgp. if "bgp" in source_list: self.lookup = lookup.lookup() #czdb. if "czdb" in source_list: self.czdb_path = "qqwry.dat" if not os.path.exists(self.czdb_path): qqwry.update_db(self.czdb_path) self.qqwry = qqwry.QQWry(self.czdb_path) #mmdb. if "mmdb" in source_list: self.reader = geoip2.database.Reader('GeoLite2-City.mmdb') #ip2location: if "ip2location" in source_list: self.database = IP2Location.IP2Location() self.database.open("IP2LOCATION-LITE-DB11.BIN")
def get_stats(_name, stat_type='PER_GAME', playoffs=False, career=False, ask_matches = True): name = lookup(_name, ask_matches) suffix = get_player_suffix(name).replace('/', '%2F') selector = stat_type.lower() if playoffs: selector = 'playoffs_'+selector r = get(f'https://widgets.sports-reference.com/wg.fcgi?css=1&site=bbr&url={suffix}&div=div_{selector}') if r.status_code==200: soup = BeautifulSoup(r.content, 'html.parser') table = soup.find('table') df = pd.read_html(str(table))[0] df.rename(columns={'Season': 'SEASON', 'Age': 'AGE', 'Tm': 'TEAM', 'Lg': 'LEAGUE', 'Pos': 'POS'}, inplace=True) if 'FG.1' in df.columns: df.rename(columns={'FG.1': 'FG%'}, inplace=True) if 'eFG' in df.columns: df.rename(columns={'eFG': 'eFG%'}, inplace=True) if 'FT.1' in df.columns: df.rename(columns={'FT.1': 'FT%'}, inplace=True) career_index = df[df['SEASON']=='Career'].index[0] if career: df = df.iloc[career_index+2:, :] else: df = df.iloc[:career_index, :] df = df.reset_index().drop('index', axis=1) return df
def cactiGet(blob, query, index, regex, env): index=str(index) for upstream in blob["upstreams"]: for instance in blob["upstreams"][upstream]["peers"]: if instance["server"]==index and match(regex,upstream): if query == "hostname": return lookup(instance["server"], env) return instance[query] return None
def __init__(self): #bgp. self.lookup = lookup.lookup() #czdb. self.czdb_path = "qqwry.dat" if not os.path.exists(self.czdb_path): qqwry.update_db(self.czdb_path) self.qqwry = qqwry.QQWry(self.czdb_path) #mmdb. self.reader = geoip2.database.Reader('GeoLite2-City.mmdb')
def index(word): try: year, books = lookup.lookup(word) except lookup.BadCall: abort(400, 'plze provide a word to look up') except lookup.NotFound: abort(404, 'pff {word}\'s not a word'.format(word=word)) return { 'word': word, 'year': year, 'books': books, }
def combined(filepath, nrange, syllables='all'): ldict = {} for n in range(nrange[0], nrange[1]): ldict[n] = [] string = index.get_data_string(filepath) data_list = string.split('/') if syllables == 'all': syllables = [] for item in data_list: for char in item: if char not in syllables: syllables.append(char) # print(data_list) # print(len(data_list)) metadict = {} for i in range(0, len(data_list)): # print(i) result = [] held_out_list = [] for item in data_list: held_out_list.append(item) held_out_list.remove(held_out_list[i]) print('calculating entropy for song ' + str(i + 1) + ' of ' + str(len(data_list))) held_out_string = '/'.join(held_out_list) for n in range(nrange[0], nrange[1]): dct = entropy.p_to_ent(held_out_string, [nrange[0], n + 1]) look = lookup.lookup(dct, data_list[i]) for ktem in look: # print('ktem='+str(ktem)) if ktem[0][-2] in syllables: ldict[len(ktem[0])].append(ktem[1][0]) i += 1 avdict = {} for key, value in ldict.items(): probs = [] # print(len(value)) for item in value: if isinstance(item, float): probs.append(item) if len(probs) > 0: avdict[key] = sum(probs) / len(probs) else: avdict[key] = 0 with open("./output/p.csv", 'w') as output_file: writer = csv.writer(output_file) for key, value in avdict.items(): row = [] row.append(key) row.append(value) writer.writerow(row) return avdict
def OnEnter(self, event): input = self.txt.GetValue() input = input.lower() from chat import chat chat(input) from operations import oper oper(input) from mathOper import math math(input) from lookup import lookup lookup(input) from translate import translate translate(input) if 'help' in input: with open('help.txt', 'r') as fin: print fin.read() #-----------Speech Recognition----------# if input == '': import speech_recognition as sr r = sr.Recognizer() with sr.Microphone( ) as source: # use the default microphone as the audio source audio = r.listen( source ) # listen for the first phrase and extract it into audio data try: self.txt.SetValue(r.recognize_google(audio)) except LookupError: # speech is unintelligible print("Could not understand audio")
def nagiosMaxConns(blob, warn, crit): results=[] code=0 for upstream in blob["upstreams"]: for instance in blob["upstreams"][upstream]["peers"]: try: pct = (int(instance["active"]) * 100) / int(instance["max_conns"]) if pct > crit: code = 2 results.append(" ".join([ lookup(instance["server"], env), #gets the hostname upstream, instance["active"], ])) elif pct > warn and code != 2: code = 1 results.append(" ".join([ lookup(instance["server"], env), #gets the hostname upstream, instance["active"], ])) except: pass return results, code
def get_game_logs(_name, start_date, end_date, playoffs=False): name = lookup(_name) suffix = get_player_suffix(name).replace('/', '%2F').replace('.html', '') start_date_str = start_date end_date_str = end_date start_date = pd.to_datetime(start_date) end_date = pd.to_datetime(end_date) years = list(range(start_date.year, end_date.year + 2)) if playoffs: selector = 'div_pgl_basic_playoffs' else: selector = 'div_pgl_basic' final_df = None for year in years: r = get( f'https://widgets.sports-reference.com/wg.fcgi?css=1&site=bbr&url={suffix}%2Fgamelog%2F{year}%2F&div={selector}' ) if r.status_code == 200: soup = BeautifulSoup(r.content, 'html.parser') table = soup.find('table') if table: df = pd.read_html(str(table))[0] df.rename(columns={ 'Date': 'DATE', 'Age': 'AGE', 'Tm': 'TEAM', 'Unnamed: 5': 'HOME/AWAY', 'Opp': 'OPPONENT', 'Unnamed: 7': 'RESULT', 'GmSc': 'GAME_SCORE' }, inplace=True) df['HOME/AWAY'] = df['HOME/AWAY'].apply( lambda x: 'AWAY' if x == '@' else 'HOME') df = df[df['Rk'] != 'Rk'] df = df.drop(['Rk', 'G'], axis=1) df['DATE'] = pd.to_datetime(df['DATE']) df = df.loc[(df['DATE'] >= start_date) & (df['DATE'] <= end_date)] active_df = pd.DataFrame(columns=list(df.columns)) for index, row in df.iterrows(): if row['GS'] != 1 and row['GS'] != '1': continue active_df = active_df.append(row) if final_df is None: final_df = pd.DataFrame(columns=list(active_df.columns)) final_df = final_df.append(active_df) return final_df
def respond(word, tweet_id): try: year, books = lookup.lookup(word) except lookup.NotFound: print('google disagrees, nyt:', word) return except lookup.BadCall: print('wat', word) return url = 'https://api.twitter.com/1.1/statuses/update.json' data = { 'status': '@{} {}'.format(first_said, humanize(word, year, books)), 'in_reply_to_status_id': tweet_id, } resp = requests.post(url, auth=auth, data=data) print('posted', data['status'], resp.ok)
def make(rerun_r, add_comments, output_dir): if rerun_r: r_data_readme = os.path.join(opp_root_dir(), 'lib', 'data_readme.R') sub.run(['Rscript', r_data_readme]) tables = pd.read_csv('/tmp/data_readme.csv') results = lookup('all', 'all', 'all', n_lines_after=0, update_repo=False) for r in results: idx = (tables.city == r['city']) & (tables.state == r['state']) # NOTE: this happens when there is code but the data is bad if not any(idx): print('Skipping %s, %s' % (r['city'], r['state'])) continue r['table'] = tables.loc[idx, 'predicated_null_rates'].tolist()[0] r['date_range'] = tables.loc[idx, 'date_range'].tolist()[0] write_md(results, add_comments, output_dir) return results
def cactiQuery(blob, query, regex, env): results=[] if query == 'upstream': for upstream in blob["upstreams"]: for instance in blob["upstreams"][upstream]["peers"]: if match(regex,upstream): results.append("!".join([instance["server"], upstream])) elif query == 'hostname': for upstream in blob["upstreams"]: for instance in blob["upstreams"][upstream]["peers"]: if match(regex,upstream): results.append("!".join([instance["server"], lookup(instance["server"], env)])) else: for upstream in blob["upstreams"]: for instance in blob["upstreams"][upstream]["peers"]: if match(regex,upstream): results.append("!".join([instance["server"], str(instance[query])])) return results
def search(keywordString): global index global ranks #Initialise urlRelations urlRelations = crawler.readPhoebyFile('urlRelations') #Initialise index index = crawler.readPhoebyFile('index') #Compute ranks ranks = rank.compute_ranks(urlRelations) urlWeights = lookup.lookup(index, keywordString) for url in urlWeights.keys(): urlWeights[url] += ranks[url] sortedUrlWeights = sorted(urlWeights.items(), key=lambda x: x[1], reverse=True) return map(lambda x: x[0], sortedUrlWeights)
def attr_to_string(self, field_val, other_req_fields): if (settings.union == True and other_req_fields.has_key(self.field_union_state) and type(self.mapping) == type({})): union_state = other_req_fields[self.field_union_state] union_state_type = get_pure_type(union_state) if (union_state_type.code == gdb.TYPE_CODE_INT or union_state_type.code == gdb.TYPE_CODE_ENUM): union_state = int(union_state) elif (union_state_type.code == gdb.TYPE_CODE_FLT or union_state_type.code == gdb.TYPE_CODE_DECFLOAT): union_state = float(union_state) elif (union_state_type.code == gdb.TYPE_CODE_STRING or union_state_type.code == gdb.TYPE_CODE_ARRAY or union_state_type.code == gdb.TYPE_CODE_CHAR): union_state = str(union_state) elif (union_state_type.code == gdb.TYPE_CODE_BOOL): union_state = bool(union_state) else: self.failed = True return "", field_val if self.mapping.has_key(union_state): to_print = [self.mapping[union_state]] else: to_print = None if self.union_struct_class is not None: union_printer = self.union_struct_class(field_val) else: union_printer = lookup.lookup(field_val,False) if union_printer is None: union_printer = struct.UnionStruct(field_val) union_printer.type_name = str(field_val.type.tag) union_printer.attrs_to_print = to_print lookup.register_onetime_printer(field_val,union_printer) return "", field_val else: self.failed = True return "", field_val
def nagiosCheckLB(blob, env=None): results=[] code=0 okStates=["up"] criticalStates=["down","unavail","unhealthy"] warningStates=["warning"] for upstream in blob["upstreams"]: for instance in blob["upstreams"][upstream]["peers"]: if instance["state"] not in okStates: results.append(" ".join([ lookup(instance["server"], env), #gets the hostname upstream, instance["state"], ])) if (instance["state"] in criticalStates): code = 2 elif (instance["state"] in warningStates) and (code != 2): code = 1 elif code == 0: code = 3 if results==[]: results=["All hosts in up state"] return results, code
def __init__(self, parent=None): super(Window, self).__init__(parent) self.setGeometry(600, 600, 250, 250) self.setWindowTitle("Open Youdao") bl = QtGui.QHBoxLayout() self.webView = QtWebKit.QWebView() bl.addWidget(self.webView) self.setLayout(bl) self.gettextThread = gettext.gettext(self) self.gettextThread.start() self.lookupThread = lookup.lookup(self) self.lookupThread.signal.connect(self.slot) self.lookupThread.start() self.inputconfigThread = inputconfig.inputConfig(self) self.inputconfigThread.start()
def generate_sound(message: Message) -> AudioSegment: data = lookup(message) if (data.get('ignored')): l.debug('message from {} ignored'.format(message.sender)) return None l.debug('parsing sentence {}'.format(message.text)) message_text = parse_sentence(message.text) l.info("[chat] {}: {}".format(message.sender, message.text)) if (data.get('animalese', animalese)): l.debug('sending sentence to animalese, and returning') return render_animalese(message_text, data.get('pitch')) l.debug('sending data to google TTS') # generate the message tts = gTTS(message_text, lang=data.get('language'), tld=data.get('accent')) # save tts.save('temp.mp3') # reload audio = AudioSegment.from_mp3("temp.mp3") os.remove('temp.mp3') # change speed and return return (speed_change(audio, 0.9 + data.get('pitch') * 0.3))
def get_game_logs(_name, year, playoffs=False, ask_matches=True): name = lookup(_name, ask_matches) suffix = get_player_suffix(name).replace('/', '%2F').replace('.html', '') if playoffs: selector = 'div_pgl_basic_playoffs' else: selector = 'div_pgl_basic' r = get(f'https://widgets.sports-reference.com/wg.fcgi?css=1&site=bbr&url={suffix}%2Fgamelog%2F{year}%2F&div={selector}') if r.status_code==200: soup = BeautifulSoup(r.content, 'html.parser') table = soup.find('table') if table: df = pd.read_html(str(table))[0] df.rename(columns = {'Date': 'DATE', 'Age': 'AGE', 'Tm': 'TEAM', 'Unnamed: 5': 'HOME/AWAY', 'Opp': 'OPPONENT', 'Unnamed: 7': 'RESULT', 'GmSc': 'GAME_SCORE'}, inplace=True) df['HOME/AWAY'] = df['HOME/AWAY'].apply(lambda x: 'AWAY' if x=='@' else 'HOME') df = df[df['Rk']!='Rk'] df = df.drop(['Rk', 'G'], axis=1) df['DATE'] = pd.to_datetime(df['DATE']) df = df[df['GS'] == '1'].reset_index(drop=True) return df
def read(): """Display augmented Chinese characters for reading.""" if request.method == "GET": return redirect(url_for("index")) if request.method == "POST": # begin timing how long it takes to load the CEDICT dictionary timer = TicToc() text = request.form["pastedtext"] # add input text to search database, store search id for later updates, search_id = insertSearch( text[0:1000] ) # Limit search added to database to 1000 characters to prevent size from growing too large # update database that search was not successful if len(text) > 1000: searchFailed(search_id, "Input exceeded length limit") # notify user return render_template("checkinput.html") (phrases, mode) = lookup(text) elapsed_time = timer.toc("Returning phrases") character_count = 0 phrase_count = 0 for phrase in phrases: if phrase["definitions"] != None: phrase_count += 1 characters = phrase['lookup'] character_count += len(characters) # update database that search was successful searchSucceeded(search_id, mode) # return page to user return render_template("read.html", phrases=phrases, elapsed_time=elapsed_time, character_count=character_count, phrase_count=phrase_count)
def nagiosCheckLB(blob, env=None): results = [] code = 0 okStates = ["up"] criticalStates = ["down", "unavail", "unhealthy"] warningStates = ["warning"] for upstream in blob["upstreams"]: for instance in blob["upstreams"][upstream]["peers"]: if instance["state"] not in okStates: results.append(" ".join([ lookup(instance["server"], env), #gets the hostname upstream, instance["state"], ])) if (instance["state"] in criticalStates): code = 2 elif (instance["state"] in warningStates) and (code != 2): code = 1 elif code == 0: code = 3 if results == []: results = ["All hosts in up state"] return results, code
replies = replies.split("\n") replies = filter(None, replies) #get Merriam Webster API Key collkey = os.getenv("MERRIAM_WEBSTER_COLLEGIATE_KEY") #keyWord = "DefineMe!" while True: subreddit = r.get_subreddit('UMW_CPSC470Z') subreddit_comments = subreddit.get_comments() for comment in subreddit_comments: #has_keyWord = any(string in comment.body for keyWord) if comment.id not in replies: if re.search("DefineMe! ", comment.body): #get word from comment word = re.search("DefineMe! (.*)", comment.body, re.IGNORECASE).groups() query = word[0] lookupResult = lookup(CollegiateDictionary, collkey, query) REPLY = lookupResult print REPLY comment.reply(REPLY) print("Should have replied") replies.append(comment.id) print("Saving new ids to file") with open("readComments.txt", "w") as f: for i in replies: f.write(i + "\n") print("sleeping!") time.sleep(600)
def __init__(self): self.output = "Welcome to Jorts's Bomb Defusal Tool" self.table = lookup()
def get_player_headshot(_name): name = lookup(_name) suffix = get_player_suffix(name) jpg = suffix.split('/')[-1].replace('html', 'jpg') url = 'https://d2cwpp38twqe55.cloudfront.net/req/202006192/images/players/' + jpg return url
if mode == 'arrive by': # Calculate the second drive first, as that determines the times for the first drive # lookup timezone coordinates2 = get_coordinates(destination2) tz = tzLookup( coordinates2, dt ) # -06:00 # this is that weird service that needs a time string arrive_by2String = timeVariableStamp #+ tz # 2018-7-30T10:0:00-06:00 try: arrive_by2Int = convertStringsToEpoch(arrive_by2String) except: print('failed to convert Time string to Epoch time') # print('fly to', origin2, 'then drive to', destination2, 'arriving by', arrive_by2Int) drive2googleTime = int( lookup(mode, origin2, destination2, arrive_by2Int, trafficModel) / 60) # convert the googletime to safeTime safeTime2 = int(convert(drive2googleTime)) if mode == 'depart at': # calculate the first drive first # lookup timezone coordinates = get_coordinates(origin) tz = tzLookup(coordinates, dt) depart_at1String = timeVariableStamp #+ tz try: depart_at1Int = convertStringsToEpoch(depart_at1String) # print('epoch time =', depart_at1Int) except:
def analyse(self, problem): """ ici se fait tout le boulot. """ self.problemBase = problem.strip() #first letter of sentence lower key: if len(self.problemBase) > 1: self.problemBase = self.problemBase[0].lower( ) + self.problemBase[1:] self.special = self.problemBase.split(":")[0] #print "***",self.problemBase.split(":"),self.special if self.special in ["words", "lemmas", "all"]: self.problemBase = " ".join(self.problemBase.split(":")[1:]) else: self.special = None #self.problem = self.problemBase self.problem = self.linguist.decontracte(self.problemBase) if self.special == "words": return "words: " + self.problem self.cas = [] # contains (quality,case) couples inputPropre = self.linguist.nettoyerTexte(self.problem) inputSplit = inputPropre.split() #print inputSplit if self.special == "lemmas": return "lemmas: " + " / ".join(lookup(inputPropre)) lemmas = lookup( inputPropre) # list of lemmas (that are different from the form) sex = self.linguist.guesssex(self.problem) if sex != "unknown": self.sess.data['sex'] = sex if self.sex != "unknown" and sex != self.sex: c = "votre identité sexuelle" self.cas += [(self.rogers.quality[c], c)] #return self.repo() self.sex = sex #for lemma in lemmas: #print lemma,"-" allKeys = list(set(inputSplit + lemmas)) allKeys.sort() if self.special == "all": return "all: " + " / ".join(allKeys) # special cases: if len(self.problem) > 0 and self.cleTrouvee( self.rogers.keywords['vos SMS'], self.problem, self.problem.split()): c = "vos SMS" self.cas += [(self.rogers.quality.get(c, 0), c)] #voir si le patient a dit qqch if len(inputSplit) == 0: #if len(self.patientInput) > 0: c = "Vide" self.cas += [(self.rogers.quality.get(c, 0), c)] return self.repo() # voir si le patient a pê posé une question : # donc si ça commence avec un mot interrogatif ou termine sur un point d'interrogation if inputSplit[0] in self.listeInterro or self.problem[-1] == "?": c = "Question" self.cas += [(self.rogers.quality.get(c, 0), c)] elif self.problemBase in self.memoProblems: c = "Répétition" self.cas += [(self.rogers.quality.get(c, 0), c)] # regular cases: for nom in self.rogers.keywords.keys(): # pour chaque clé if self.cleTrouvee(self.rogers.keywords[nom], inputPropre, allKeys): self.cas += [(self.rogers.quality[nom], nom)] #voir si le patient est très bref if len(inputSplit) == 1: c = "Bref" self.cas += [(self.rogers.quality.get(c, 0), c)] c = "Retour" self.cas += [(self.rogers.quality.get(c, 0), c)] return self.repo()
#!/usr/bin/env python # -*- code=utf-8 -*- #By [email protected] #Using GPL v2 from lookup import lookup import sys if __name__ == "__main__": dict_prefix = "/usr/share/stardict/dic/stardict-langdao-ec-gb-2.4.2/langdao-ec-gb" #dict_prefix = "./dic/stardict-langdao-ec-gb-2.4.2/langdao-ec-gb" if len(sys.argv) != 2: print "give me a word" sys.exit(1) ifo_file = dict_prefix + ".ifo" f = file(ifo_file) s = f.readlines() wc = int(s[2].strip().split("=")[1]) file_size = int(s[3].strip().split("=")[1]) s = lookup(dict_prefix,file_size,wc,sys.argv[1]) print s
#!/usr/bin/python # coding:utf-8 import metaxml import conf import os import sys import upload import conf import lookup from archive import Item import archive import getpass import assembly if __name__ == '__main__': item = Item(conf.iTMSTransporter, conf.distribute_account, conf.distribute_pwd, conf.bundle_short_version_string, conf.bundle_version, conf.project_path, conf.scheme, conf.configuration, conf.provisioning_profile_name, conf.vendor_id) # 开始打包 archive.archive(item) # 获取itmsp lookup.lookup(item) # 准备上传 assembly.assembly(item) # 开始上传 upload.upload(item)
def __init__(self, root, is_lookup=True): #nodes. self.node = [] #dict for quick node lookup. self.dict = {} #stats for traces. self.num_traces = 0 self.path_len_dist = [0 for i in range(1, 1000)] #path tree. self.ptr = [] self.path_tree = [] if (is_lookup): #ip lookup. self.lkp = lookup.lookup() else: self.lkp = None #czdb. self.czdb_path = "qqwry.dat" if not os.path.exists(self.czdb_path): qqwry.update_db(self.czdb_path) self.qqwry = qqwry.QQWry(self.czdb_path) #add root. self.num_edges = 0 self.num_nodes = 1 self.num_border = 0 self.prev_index = -1 r = node(root) self.node.append(r) self.dict[root] = 0 #rtt. self.max_rtt = -1 self.min_rtt = 10000 self.rtt_list = [] self.rtt_dist_x = [] self.rtt_dist_y = [] #deg. self.degree_list = [] self.deg_dist_x = [] self.deg_dist_y = [] #networkx topo graph. self.graph = nx.Graph() #largest connected component. self.graph0 = nx.Graph() self.bet = {} self.bet_dist = [0 for i in range(100)] self.map_nodes = [] self.map_nodes_dict = {} self.map_paths = [] self.map_paths_dict = {} #data structure for simplified topo. self.data = { "nodes": {}, "edges": [] } self.visited = {} #knn. self.knn = {}
#!/usr/bin/env python3 import lookup """ Reverse-DNS lookup tool for all North Korean IP addresses. North Korean IP ranges are 175.25.176.0 - 175.45.179.255 and 210.52.109.0 - 210.52.109.255 """ print("North Korean DNS Lookup Tool\nType 'help' for commands") commandInput = input() if commandInput == "help": pass elif commandInput == "urllookup": url = input("Enter URL:") if lookup.lookup(url): print(url + " is hosted in North Korea.") elif lookup.lookup(url) is None: pass else: print("\033[1;42m" + url + " is not hosted in North Korea.\033[1;m")
import metaxml import conf import os import sys import upload import conf import lookup from archive import Item import archive import getpass import assembly if __name__ == '__main__': item = Item(conf.iTMSTransporter, conf.distribute_account, conf.distribute_pwd, conf.bundle_short_version_string, conf.bundle_version, conf.project_path, conf.scheme, conf.configuration, conf.provisioning_profile_name, conf.vendor_id) # 开始打包 archive.archive(item) # 获取itmsp lookup.lookup(item) # 准备上传 assembly.assembly(item) # 开始上传 upload.upload(item)
def __init__(self, root, is_lookup=True): #nodes. self.node = []; #dict for quick node lookup. self.dict = {}; #stats for traces. self.num_traces = 0; self.path_len_dist = [0 for i in range(1,1000)]; #path tree. self.ptr=[]; self.path_tree = []; if (is_lookup): #ip lookup. self.lkp = lookup.lookup(); else: self.lkp = None; #czdb. self.czdb_path = "qqwry.dat"; if not os.path.exists(self.czdb_path): qqwry.update_db(self.czdb_path); self.qqwry = qqwry.QQWry(self.czdb_path); #add root. self.num_edges = 0; self.num_nodes = 1; self.num_border = 0; self.prev_index = -1; r = node(root); self.node.append(r); self.dict[root] = 0; #rtt. self.max_rtt = -1; self.min_rtt = 10000; self.rtt_list = []; self.rtt_dist_x = []; self.rtt_dist_y = []; #deg. self.degree_list = []; self.deg_dist_x = []; self.deg_dist_y = []; #networkx topo graph. self.graph = nx.Graph(); #largest connected component. self.graph0 = nx.Graph(); self.bet = {}; self.bet_dist = [0 for i in range(100)]; self.map_nodes = []; self.map_nodes_dict = {}; self.map_paths = []; self.map_paths_dict = {}; #data structure for simplified topo. self.data = {"nodes":{}, "edges":[]}; self.visited = {}; #knn. self.knn = {};
#!/usr/bin/env python # -*- code=utf-8 -*- #By [email protected] #Using GPL v2 from lookup import lookup import sys if __name__ == "__main__": dict_prefix = "/usr/share/stardict/dic/stardict-langdao-ec-gb-2.4.2/langdao-ec-gb" #dict_prefix = "./dic/stardict-langdao-ec-gb-2.4.2/langdao-ec-gb" if len(sys.argv) != 2: print "give me a word" sys.exit(1) ifo_file = dict_prefix + ".ifo" f = file(ifo_file) s = f.readlines() wc = int(s[2].strip().split("=")[1]) file_size = int(s[3].strip().split("=")[1]) s = lookup(dict_prefix, file_size, wc, sys.argv[1]) print s
def send_request(self, department, course_number): web_lib = WebLib(lookup(department), course_number) self.response = web_lib.response
def modify_blocks(obj): global CURRENT_ARRAY_LENGTH global CURRENT_ARRAY global DISPLAY_AREA_POSITIONS global FLAG CURRENT_ARRAY_LENGTH = len(CURRENT_ARRAY) global_string = '' string = CURRENT_ARRAY FLAG = False display_area_calc() print(CURRENT_ARRAY_LENGTH) print("-----------------") for i in range (CURRENT_ARRAY_LENGTH): if(CURRENT_ARRAY[i] == " "): global_string+=" " continue print(i) # print("LOOKUP_OUTPUT = ",lookup.LOOKUP_OUTPUT) # print("DYNA_1_POS = ",lookup.DYNA_1_POS) # print("DYNA_2_POS = ",lookup.DYNA_2_POS) #--------------- PICK FORWARD -------------------------- print("Picking ",CURRENT_ARRAY[i]," from arena") global_string+=string[i] obj.update_label(global_string) lookup.lookup(CURRENT_ARRAY[i],0) # # eg:- "A",pick # print("LOOKUP_OUTPUT = ",lookup.LOOKUP_OUTPUT) dynamixel.GO_TO_DYNA_1_POS = lookup.LOOKUP_OUTPUT[0] dynamixel.GO_TO_DYNA_2_POS = lookup.LOOKUP_OUTPUT[1] dynamixel.dyna_move() lookup.DYNA_1_POS = dynamixel.GO_TO_DYNA_1_POS lookup.DYNA_2_POS = dynamixel.GO_TO_DYNA_2_POS #arduino.pick(LOOKUP_OUTPUT[2]) # print("DYNA_1_POS = ",lookup.DYNA_1_POS) # print("DYNA_2_POS = ",lookup.DYNA_2_POS) # print("----") #------------------------------------------------------- time.sleep(3) print("----") #---------------- PLACE FORWARD ------------------------ print("Placing ",CURRENT_ARRAY[i]," on display area") global_string+='....' obj.update_label(global_string) dynamixel.GO_TO_DYNA_1_POS = DISPLAY_AREA_POSITIONS[i][0] dynamixel.GO_TO_DYNA_2_POS = DISPLAY_AREA_POSITIONS[i][1] dynamixel.dyna_move() lookup.DYNA_1_POS = dynamixel.GO_TO_DYNA_1_POS lookup.DYNA_2_POS = dynamixel.GO_TO_DYNA_2_POS #arduino.place(DISPLAY_AREA_something) p = i+1 if(FLAG): break #------------------------------------------------------- print("-----------------") time.sleep(3) print("wait thoda...\nwait thoda...\nwait thoda...") print("-----------------") for k in range (p): i = p-k-1 if(CURRENT_ARRAY[i] == " "): global_string = global_string[:-1] continue #----------------- PICK REVERSE ------------------------ print("Picking ",CURRENT_ARRAY[i]," from display area") global_string = global_string[:-4] obj.update_label(global_string) dynamixel.GO_TO_DYNA_1_POS = DISPLAY_AREA_POSITIONS[i][0] dynamixel.GO_TO_DYNA_2_POS = DISPLAY_AREA_POSITIONS[i][1] dynamixel.dyna_move() lookup.DYNA_1_POS = dynamixel.GO_TO_DYNA_1_POS lookup.DYNA_2_POS = dynamixel.GO_TO_DYNA_2_POS #arduino.pick(DISPLAY_AREA_something) #------------------------------------------------------- time.sleep(3) print("----") #--------------- PLACE REVERSE -------------------------- print("Placing ",CURRENT_ARRAY[i]," in arena") global_string = global_string[:-1] obj.update_label(global_string) lookup.lookup(CURRENT_ARRAY[i],1) dynamixel.GO_TO_DYNA_1_POS = lookup.LOOKUP_OUTPUT[0] dynamixel.GO_TO_DYNA_2_POS = lookup.LOOKUP_OUTPUT[1] dynamixel.dyna_move() lookup.DYNA_1_POS = dynamixel.GO_TO_DYNA_1_POS lookup.DYNA_2_POS = dynamixel.GO_TO_DYNA_2_POS #arduino.place(LOOKUP_OUTPUT[2]) #------------------------------------------------------- print("-----------------") time.sleep(3) print("bring it on")
def about(): """About page""" if request.method == "GET": phrases = lookup("读中文") return render_template("about.html", phrases=phrases)
def search_word(self, word): """docstring for search_word""" word = word.strip() s = lookup(self.dict_prefix, self.file_size, self.word_count, word) return s
def modify_blocks(): global CURRENT_ARRAY_LENGTH global CURRENT_ARRAY global DISPLAY_AREA_POSITIONS CURRENT_ARRAY_LENGTH = len(CURRENT_ARRAY) display_area_calc() print(CURRENT_ARRAY_LENGTH) print("-----------------") for i in range (CURRENT_ARRAY_LENGTH): print(i) # print("LOOKUP_OUTPUT = ",lookup.LOOKUP_OUTPUT) # print("DYNA_1_POS = ",lookup.DYNA_1_POS) # print("DYNA_2_POS = ",lookup.DYNA_2_POS) #--------------- PICK FORWARD -------------------------- print("Picking ",CURRENT_ARRAY[i]," from trash") lookup.lookup(CURRENT_ARRAY[i],0) # # eg:- "A",pick # print("LOOKUP_OUTPUT = ",lookup.LOOKUP_OUTPUT) dynamixel.GO_TO_DYNA_1_POS = lookup.LOOKUP_OUTPUT[0] dynamixel.GO_TO_DYNA_2_POS = lookup.LOOKUP_OUTPUT[1] dynamixel.dyna_write() lookup.DYNA_1_POS = dynamixel.GO_TO_DYNA_1_POS lookup.DYNA_2_POS = dynamixel.GO_TO_DYNA_2_POS # print("DYNA_1_POS = ",lookup.DYNA_1_POS) # print("DYNA_2_POS = ",lookup.DYNA_2_POS) # print("----") #------------------------------------------------------- print("----") #---------------- PLACE FORWARD ------------------------ print("Placing ",CURRENT_ARRAY[i]," on display area") dynamixel.GO_TO_DYNA_1_POS = DISPLAY_AREA_POSITIONS[i][0] dynamixel.GO_TO_DYNA_2_POS = DISPLAY_AREA_POSITIONS[i][1] dynamixel.dyna_write() lookup.DYNA_1_POS = dynamixel.GO_TO_DYNA_1_POS lookup.DYNA_2_POS = dynamixel.GO_TO_DYNA_2_POS #------------------------------------------------------- print("-----------------") print("wait thoda...\nwait thoda...\nwait thoda...") print("-----------------") for k in range (CURRENT_ARRAY_LENGTH): i=CURRENT_ARRAY_LENGTH-k-1 #----------------- PICK REVERSE ------------------------ print("Picking ",CURRENT_ARRAY[i]," from display area") dynamixel.GO_TO_DYNA_1_POS = DISPLAY_AREA_POSITIONS[i][0] dynamixel.GO_TO_DYNA_2_POS = DISPLAY_AREA_POSITIONS[i][1] dynamixel.dyna_write() lookup.DYNA_1_POS = dynamixel.GO_TO_DYNA_1_POS lookup.DYNA_2_POS = dynamixel.GO_TO_DYNA_2_POS #------------------------------------------------------- print("----") #--------------- PLACE REVERSE -------------------------- print("Placing ",CURRENT_ARRAY[i]," in trash") lookup.lookup(CURRENT_ARRAY[i],1) dynamixel.GO_TO_DYNA_1_POS = lookup.LOOKUP_OUTPUT[0] dynamixel.GO_TO_DYNA_2_POS = lookup.LOOKUP_OUTPUT[1] dynamixel.dyna_write() lookup.DYNA_1_POS = dynamixel.GO_TO_DYNA_1_POS lookup.DYNA_2_POS = dynamixel.GO_TO_DYNA_2_POS #------------------------------------------------------- print("-----------------") print("bring it on")