def test_keywords(self): self._test_generator('while (true)', [Token(content='while', type='builtin'), Token(content=' ', type=''), Token(content='(', type='operator'), Token(content='true', type='keyword'), Token(content=')', type='operator')])
def scanIdentifier(self): s = "" while True: c = self.getc() if c == '': break if not (c.isdigit() or c.isalpha() or c == '_'): self.goback() break s += c if s in keywords: return Token(Element('"' + s + '"'), s, self.row, self.col) else: return Token(Element('"IDN"'), s, self.row, self.col)
async def dictionary(self, ctx, *, word): """Look up the definition of any word using an actual dictionary""" url = "https://od-api.oxforddictionaries.com:443/api/v1/entries/en/{}".format( word) request = Request(url) request.add_header("Accept", "application/json") request.add_header("app_id", "e01b354a") request.add_header("app_key", Token.dictionary()) try: data = json.loads(urlopen(request).read().decode()) except: await ctx.send("No results :no_entry:") return definition = data["results"][0]["lexicalEntries"][0]["entries"][0][ "senses"][0]["definitions"][0] pronounce = data["results"][0]["lexicalEntries"][0]["pronunciations"][ 0]["phoneticSpelling"] s = discord.Embed(colour=ctx.message.author.colour) s.set_author( name=data["results"][0]["id"], url="https://en.oxforddictionaries.com/definition/{}".format( data["results"][0]["id"])) s.add_field(name="Definition", value=definition) s.add_field(name="Pronunciation", value=pronounce, inline=False) await ctx.send(embed=s)
async def send_msgs(host: str, port: int, token: Token, sending_queue: asyncio.Queue, status_queue: asyncio.Queue, watchdog_queue: asyncio.Queue) -> None: authorized = False async with connect( host=host, port=port, status_queue=status_queue, connect_state_mode=gui.SendingConnectionStateChanged) as (reader, writer): if not token.is_exist: status_queue.put_nowait(gui.SendingConnectionStateChanged.CLOSED) token.value, user = await registration(reader, writer) status_queue.put_nowait( gui.SendingConnectionStateChanged.INITIATED) authorized = True await watchdog_queue.put(WatchdogSwitcher.ENABLE) if not authorized: await watchdog_queue.put("Prompt before auth") user = await authorise(reader, writer, token.value) await status_queue.put(gui.NicknameReceived(user)) await watchdog_queue.put("Authorization done") status_queue.put_nowait(gui.SendingConnectionStateChanged.ESTABLISHED) async with create_handy_nursery() as nursery: nursery.start_soon( send_message(writer=writer, sending_queue=sending_queue, watchdog_queue=watchdog_queue)) nursery.start_soon(ping_pong(reader, writer, watchdog_queue))
async def handle_connection(host: str, reader_port: int, writer_port: int, token: str, messages_queue: asyncio.Queue, status_queue: asyncio.Queue, history_queue: asyncio.Queue, sending_queue: asyncio.Queue, watchdog_queue: asyncio.Queue) -> None: token = Token(token) while True: try: async with create_handy_nursery() as nursery: nursery.start_soon( connection.read_msgs(host=host, port=reader_port, messages_queue=messages_queue, status_queue=status_queue, history_queue=history_queue, watchdog_queue=watchdog_queue)) nursery.start_soon( connection.send_msgs(host=host, port=writer_port, token=token, sending_queue=sending_queue, status_queue=status_queue, watchdog_queue=watchdog_queue)) nursery.start_soon( connection.watch_for_connection( watchdog_queue=watchdog_queue)) except aionursery.MultiError as e: if not any(isinstance(ex, ConnectionError) for ex in e.exceptions): raise except ConnectionError: await asyncio.sleep(RECONNECT_DELAY) else: return
def main(): token = Token() all_services = ListServices(session_auth=token.session_auth) services = all_services.neutron_services_list() data = [{"{#NAME}":service['binary'], "{#HOST}":service['host']} for service in services] print(json.dumps({"data": data}, indent=4))
def main(): token = Token() all_services = ListServices(session_auth=token.session_auth) services = all_services.neutron_services_list() for service in services: if (sys.argv[1] == service['host'] and sys.argv[2] == service['binary']): print(service['alive'])
def main(): token = Token() all_services = ListServices(session_auth=token.session_auth) services = all_services.cinder_services_list() for service in services: if (sys.argv[1] == service['host'] and sys.argv[2] == service['service_name']): print(service['state'])
def connect(signal): consumer = Token(CONSUMER_KEY,CONSUMER_SECRET) token = Token(ACCESS_KEY,ACCESS_SECRET) parameters = { 'oauth_consumer_key': CONSUMER_KEY, 'oauth_token': token.key, 'oauth_signature_method': 'HMAC-SHA1', 'oauth_timestamp': str(int(time.time())), 'oauth_nonce': token._generate_nonce(), 'oauth_version': '1.0', } access_token = token oauth_request = OAuthRequest.from_token_and_callback(access_token, http_url=STREAM_URL, parameters=parameters) signature_method = OAuthSignatureMethod_HMAC_SHA1() signature = signature_method.build_signature(oauth_request, consumer, access_token) parameters['oauth_signature'] = signature data = urllib.urlencode(parameters) print "%s?%s" % (STREAM_URL,data) req = urllib2.urlopen("%s?%s" % (STREAM_URL,data)) buffer = '' while True: chunk = req.read(1) if not chunk: print buffer break chunk = unicode(chunk) buffer += chunk tweets = buffer.split("\r\n",1) if len(tweets) > 1: #print tweets[0] signal.emit(tweets[0]) buffer = tweets[1]
def _read(self): """ (Somente) lê o próximo token da lista """ if self.tokens: self._current_line = self.tokens[0].line return self.tokens[0] else: return Token(False,False,False,False)
def get_next_token(self, see_only=False): if self._pos == len(self.source): return Token('\0', TokenEnum.eof, self._pos) token_t = None for i in Regex.types_list: regex_result = Regex.match(i, self.source[self._pos:]) if regex_result not in (None, ''): token_t = Token(regex_result, TokenEnum.types_list[Regex.types_list.index(i)], self._pos) break if token_t is None: line, col = pos_to_line(self.source, self._pos) raise BracesException("cannot determine symbol at line {} column {}".format(line, col)) if token_t.dtype in self.suppressTypes: self._pos += token_t.len return self.get_next_token(see_only) if not see_only: self._pos += token_t.len return token_t
def main(): token = Token() client = OpenstackClient(session_auth=token.session_auth) packet_vms = check_vms(client) packet_volumes = check_volumes(client) packet.extend(packet_vms) packet.extend(packet_volumes) result = ZabbixSender(zserver, port, use_config=None).send(packet) return result
async def youtube(self, ctx, *, search: str): url = "https://www.googleapis.com/youtube/v3/search?key=" + Token.youtube( ) + "&part=snippet&safeSearch=none&{}".format( urllib.parse.urlencode({"q": search})) request = requests.get(url) try: await ctx.send("https://www.youtube.com/watch?v={}".format( request.json()["items"][0]["id"]["videoId"])) except: await ctx.send("No results :no_entry:")
def _get(self): """ Retorna e apaga o token da lista """ if self.tokens: symbol = self.tokens.pop(0) self._current_line = symbol.line return symbol else: return Token(False,False,False,False)
async def discordmeme(self, ctx): """Have a discord meme""" url = "https://api.weeb.sh/images/random?type=discord_memes" request = Request(url) request.add_header("Authorization", "Wolke " + Token.wolke()) request.add_header('User-Agent', 'Mozilla/5.0') data = json.loads(urlopen(request).read().decode()) s = discord.Embed() s.set_image(url=data["url"]) s.set_footer(text="Powered by weeb.sh") await ctx.send(embed=s)
async def quote(self, ctx): """Gives you a random quote""" request = requests.post( "https://andruxnet-random-famous-quotes.p.mashape.com/", headers={ "X-Mashape-Key": Token.mashape(), "Content-Type": "application/x-www-form-urlencoded", "Accept": "application/json" }).json()[0] await ctx.send(embed=discord.Embed(description=request["quote"], title=request["author"], colour=ctx.author.colour))
async def shorten(self, ctx, *, url): url1 = "https://api.rebrandly.com/v1/links" request = requests.post(url1, data=json.dumps({"destination": url}), headers={ "Content-Type": "application/json", "apikey": Token.rebrandly() }) try: request.json()["message"] await ctx.send("Invalid Url :no_entry:") except: await ctx.send("<https://" + request.json()["shortUrl"] + ">")
def scanInteger(self): number = 0 isDecimal = False decimal = 0 factor = 1 while True: c = self.getc() if c.isdigit(): if not decimal: number = number * 10 + int(c) else: factor /= 10 decimal = decimal + factor * int(c) elif c == '.': if isDecimal == True: raise Exception("Repeated '.'!") isDecimal = True else: self.goback() break if isDecimal: return Token(Element('"FLOAT"'), number, self.row, self.col) else: return Token(Element('"INT10"'), number, self.row, self.col)
def scanSymbol(self): symbol = "" while True: c = self.getc() if c == '': break if c not in "!,+-*/%=(){}[];<>|^&:\"": self.goback() break if (symbol + c) in Code.__dict__: symbol += c else: self.goback() break return Token(Element('"' + symbol + '"'), symbol, self.row, self.col)
def main(): token = Token() client = OpenstackClient(session_auth=token.session_auth) servers = client.nova_api.servers.list(search_opts={'all_tenants':1}) total_vm_start = 0 total_vm_stop = 0 for server in servers: if server.status== 'ACTIVE': total_vm_start += 1 else: total_vm_stop += 1 if sys.argv[1] == 'runnning': print(total_vm_start) elif sys.argv[1] == 'shutoff': print(total_vm_stop)
def scan(self): while True: c = self.getc() if c == "": return Token(Element('"#"'), "", self.row, self.col) if c == " ": continue if c in "!,+-*/%=(){}[];<>|^&:\"": self.goback() return self.scanSymbol() elif c.isdigit(): self.goback() return self.scanInteger() elif c.isalpha() or c == '_': self.goback() return self.scanIdentifier()
def check_status_service(session): """Lấy thông tin các agents của từng service Arguments: session {[object]} -- [session trong openstack] Returns: [type] -- [dict] """ token = Token() client = OpenstackClient(session_auth=token.session_auth) cinder_services = client.cinder_api.services.list() neutron_services = client.neutron_api.list_agents() nova_services = client.nova_api.services.list() return cinder_services, nova_services, neutron_services
def tokenize(self, program: str) -> Iterable[Token]: program = re.sub(r'#.*', '', program).rstrip() pos = 0 line = 1 size = len(program) while pos < size: match = self.pattern.match(program, pos) if not match: raise LexerError('Error: invalid token in line {}'.format(line)) pos = match.end() line += program[match.start():match.end()].count('\n') for k, v in match.groupdict().items(): if v: token_type = TokenType.IDENTIFIER if k == 'int': token_type = TokenType.INT if v == 'true': v = 1 elif v == 'false': v = 0 else: v = int(v) elif k == 'char': token_type = TokenType.INT if v in Lexer.escape: v = Lexer.escape[v] v = ord(v) elif k == 'id' and v in Lexer.types: token_type = TokenType.TYPE # v is set to k for keywords elif k == 'id' and v in Lexer.keywords: token_type = TokenType(v) if v == 'inline': inline = self.inline.match(program, pos) if not inline: raise LexerError('unexpected EOF') pos = inline.end() line += program[inline.start():inline.end()].count('\n') v = re.sub(r'[^+\-><\[\].,]', '', inline[1]) elif k == 'op': token_type = TokenType.OPERATOR elif k == 'sep': token_type = TokenType.SEPARATOR yield Token(line, token_type, v) break
def start(self): # read the file content = self.file.read() # close the file self.file.close() content += '\n' content = str.upper(content) state = 0 for i in range(len(content)): if content[i] == '\n': self.comment = False # discard the comment if not self.comment: state = self.next_state(state, content[i]) # add the ending token self.tokens.append(Token(Token_Type.NONTOKEN.name, "", 0.0, None))
async def googleimage(self, ctx, *, search): """returns an image based on your search from google""" url = "https://www.googleapis.com/customsearch/v1?key=" + Token.google( ) + "&cx=014023765838117903829:klo2euskkae&searchType=image&{}".format( urllib.parse.urlencode({"q": search})) request = Request(url) data = json.loads(urlopen(request).read().decode()) s = discord.Embed() s.set_author( name="Google", icon_url= "https://images-ext-1.discordapp.net/external/UsMM0mPPHEKn6WMst8WWG9qMCX_A14JL6Izzr47ucOk/http/i.imgur.com/G46fm8J.png", url="https://www.google.co.uk/search?{}".format( urllib.parse.urlencode({"q": search}))) try: s.set_image(url=data["items"][0]["image"]["thumbnailLink"]) except: await ctx.send("No results :no_entry:") return await ctx.send(embed=s)
async def playlist(self, ctx, *, query): """Search for an play a playlist from youtube""" player = self.bot.lavalink.players.get(ctx.guild.id) if player.is_connected: if not ctx.author.voice or not ctx.author.voice.channel or player.connected_channel.id != ctx.author.voice.channel.id: return await ctx.send( "I'm already in a voice channel :no_entry:") else: if not ctx.author.voice or not ctx.author.voice.channel: return await ctx.send( "You are not in a voice channel :no_entry:") else: player.store('sessionowner', ctx.author.id) player.store('channel', ctx.channel.id) await player.connect(ctx.author.voice.channel.id) url = "https://www.googleapis.com/youtube/v3/search?key=" + Token.youtube( ) + "&part=snippet&safeSearch=none&maxResults=10&type=playlist&{}".format( urllib.parse.urlencode({"q": query})) request = requests.get(url).json() if not request["items"]: return await ctx.send("No results :no_entry:") event = await paged.page( ctx, request["items"], selectable=True, function=lambda x: "**[{}]({})**".format( x["snippet"]["title"], "https://www.youtube.com/playlist?list=" + x["id"]["playlistId"])) if event: results = await self.bot.lavalink.get_tracks( "https://www.youtube.com/playlist?list=" + event["object"]["id"]["playlistId"]) tracks = results["tracks"] for track in tracks: player.add(requester=ctx.author.id, track=track) s = discord.Embed() s.description = "Enqueued {} with **{}** tracks <:done:403285928233402378>".format( results['playlistInfo']['name'], len(tracks)) await ctx.send(embed=s) if not player.is_playing: await player.play()
async def google(self, ctx, *, search): """returns the top 5 results from google of your search query""" url = "https://www.googleapis.com/customsearch/v1?key=" + Token.google( ) + "&cx=014023765838117903829:mm334tqd3kg&{}".format( urllib.parse.urlencode({"q": search})) request = Request(url) data = json.loads(urlopen(request).read().decode()) try: results = "\n\n".join([ "**[{}]({})**\n{}".format(x["title"], x["link"], x["snippet"]) for x in data["items"] ][:5]) except: await ctx.send("No Results :no_entry:") return s = discord.Embed(description=results) s.set_author( name="Google", icon_url= "https://images-ext-1.discordapp.net/external/UsMM0mPPHEKn6WMst8WWG9qMCX_A14JL6Izzr47ucOk/http/i.imgur.com/G46fm8J.png", url="https://www.google.co.uk/search?{}".format( urllib.parse.urlencode({"q": search}))) await ctx.send(embed=s)
def _commit_token(self, code): RESERVED_WORDS = [ 'program', 'var', 'integer', 'real', 'boolean', 'procedure', 'begin', 'end', 'if', 'then', 'else', 'while', 'do', 'not' ] if self._current_symbol: temp = ''.join(self._current_symbol) if code == 'identifier': if temp in RESERVED_WORDS: self.tokens.append( Token(temp, 'reserved', self._current_line, self._current_id)) elif temp in 'and': self.tokens.append( Token(temp, 'multiplication', self._current_line, self._current_id)) elif temp in 'or': self.tokens.append( Token(temp, 'addition', self._current_line, self._current_id)) elif temp in ['true', 'false']: self.tokens.append( Token(temp, 'boolean', self._current_line, self._current_id)) else: self.tokens.append( Token(temp, 'identifier', self._current_line, self._current_id)) elif code == 'invalid': self._send_alert('invalid') elif code == 'comment': pass #não faz nada else: self.tokens.append( Token(temp, code, self._current_line, self._current_id)) self._current_symbol = [] # reseta a variavel temporaria self._current_id += 1
def build_index(in_dir, out_dict, out_postings): """ build index from documents stored in the input directory, then output the dictionary file and postings file """ print('indexing...') #reading the files corpus = PlaintextCorpusReader(in_dir, '.*') file_names_str = corpus.fileids() file_names = sorted(map(int, file_names_str)) #Load corpus and generate the postings dictionary postings = defaultdict(dict) tokens = list() for docID in file_names: content = corpus.raw(str(docID)) # read file content content = preprocess(content) words = tokenize(content) # tokenization: content -> words tokens = stemming(words) # stemming if phrasal_query: token_len = defaultdict(list) else: token_len = defaultdict(int) # count the apeearing times of the token in the file term_pos = 0 for token in tokens: if phrasal_query: if token in token_len.keys(): token_len[token][0] += 1 token_len[token][1].append(term_pos) else: token_len[token] = [1, [term_pos]] else: token_len[token] += 1 term_pos += 1 ''' Generate weighted token frequency. Generate dictionary of key -> token, value -> a dict with k,v as file_name, weighted_token_frequency ''' if phrasal_query: weighted_tokenfreq = normalize( [get_tf(y[0]) for (x, y) in token_len.items()]) for ((token, freq), w_tf) in zip(token_len.items(), weighted_tokenfreq): postings[token][docID] = PhrasalToken(freq[0], freq[1], w_tf) else: weighted_tokenfreq = normalize( [get_tf(y) for (x, y) in token_len.items()]) for ((token, freq), w_tf) in zip(token_len.items(), weighted_tokenfreq): postings[token][docID] = Token(w_tf) ''' Output dictionary and postings files - Dictionary file stores all the tokens, with their doc frequency, the offset in the postings file. - Postings file stores the list of tuples -> (document ID, term freq). ''' # write postings file dictionary = defaultdict(Entry) #print(postings.items()) with open(out_postings, mode="wb") as postings_file: for key, value in postings.items(): #print(value) ''' len(value) := the document frequency of the token := how many times the token appears in all documents offset := current writing position of the postings file ''' offset = postings_file.tell() pickle.dump(value, postings_file) size = postings_file.write(pickle.dumps(value)) dictionary[key] = Entry(len(value), offset, size) # write dictionary file with open(out_dict, mode="wb") as dictionary_file: pickle.dump(url_map, dictionary_file) pickle.dump(doc_id_map, dictionary_file) pickle.dump(pr_result, dictionary_file) pickle.dump(dictionary, dictionary_file) print("dictionary done")
] + ['<@440996323156819968> '] except: pass try: return [ x.encode().decode() for x in Prefix._prefixes["serverprefix"][str(message.guild.id)] ] + ['<@440996323156819968> '] except: pass return ['sx4 ', 's?', 'S?', '<@440996323156819968> '] bot = commands.AutoShardedBot(command_prefix=prefix_function) wrap = "```py\n{}\n```" dbltoken = Token.dbl() dbotspwtoken = Token.dbpw() botspacetoken = Token.botlistspace() konomitoken = Token.konomi() dbpwurl = "https://bots.discord.pw/api/bots/440996323156819968/stats" url = "https://discordbots.org/api/bots/440996323156819968/stats" botspaceurl = "https://botlist.space/api/bots/440996323156819968/" konomiurl = "http://bots.disgd.pw/api/bot/440996323156819968/stats" headers = {"Authorization": dbltoken} headersdb = {"Authorization": dbotspwtoken, "Content-Type": "application/json"} headerskon = {"Authorization": konomitoken, "Content-Type": "application/json"} headersbs = { "Authorization": botspacetoken, "Content-Type": "application/json" }
import time import datetime from utils import checks from urllib.parse import urlencode from urllib.request import Request, urlopen import requests from cogs import mod from utils import Token import aiohttp import json import traceback import sys import os import subprocess dbltoken = Token.dbl() dbotspwtoken = Token.dbpw() botspacetoken = Token.botlistspace() dbpwurl = "https://discord.bots.gg/api/v1/bots/440996323156819968/stats" url = "https://discordbots.org/api/bots/440996323156819968/stats" botspaceurl = "https://api.botlist.space/v1/bots/440996323156819968/" headers = {"Authorization" : dbltoken} headersdb = {"Authorization" : dbotspwtoken, "Content-Type" : "application/json"} headersbs = {"Authorization" : botspacetoken, "Content-Type" : "application/json"} class serverpost: def __init__(self, bot, connection): self.bot = bot self.db = connection self.task = bot.loop.create_task(self.server_post())
import pickle wico = pickle.load(open('resources/results_wico.p', 'r')) lefff = pickle.load(open('lefff_pickle.p', 'r')) for line in fileinput.input(): line = line.strip() words = map(Token.from_str, line.split(' ')) spellchecked = [] # Step 1: spellcheck for w in words: if 'TMP_TAG' not in w.getannotations(): spellchecked.append(w) continue if w.getform() in wico: spellchecked.append(Token.update_spelling(w, wico[w.getform()])) else: lefff_cand = get_candidates_from_lefff(w.getform()) lefff_corr = closest_word(lefff_cand, w.getform()) if lefff_corr and lefff_corr != w.getform(): spellchecked.append(Token.update_spelling(w, lefff_corr)) else: spellchecked.append(w) # Step 2: compound words merged = [] slen = len(spellchecked) i = 0 while i < len(spellchecked): # TODO include annotations in merge res = compounds_automaton.recognize(