def getBundle(b): charities = ["Electronic Frontier Foundation", "American Red Cross", "Child's Play Charity", "Mozilla Foundation", "CodeNow", "Maker Education Initiative", "Save the Children", "charity: water", "Exclusive Dreamcast T-Shirt", "AbleGamers", "Willow", "SpecialEffect", "GamesAid", "Girls Who Code", "The V Foundation", "buildOn", "The IndieCade Foundation", "Extra Life / Children's Miracle Network Hospitals", "Heifer International", "Comic Book Legal Defense Fund", "More games coming soon!", "More content coming soon!"] items = [] if b is 'm': soup = bs4.BeautifulSoup(requests.get("https://humblebundle.com/mobile").text, "html.parser") elif b is 'b': soup = bs4.BeautifulSoup(requests.get("https://humblebundle.com/books").text, "html.parser") else: soup = bs4.BeautifulSoup(requests.get("https://humblebundle.com/").text, "html.parser") res = soup.find_all('span', 'game-box') if res: bTitle = soup.find('img', class_="promo-logo")['alt'] for i in res: item = i.find('img')['alt'] if item not in charities and "Soundtrack" not in item: items.append(item) print_console("07%s: %s" % (bTitle, ", ".join(items))) else: print_console("This bundle is over!")
def compare_users(self, user, user2): try: # comparison = self.api.get_user(user).compare_with_user(user2) user1_favs = self.api.get_user(user).get_top_artists('overall', 1000) user2_favs = self.api.get_user(user2).get_top_artists('overall', 1000) except pylast.WSError as e: print_console(LEL + " WSError %s: %s" % (e.status, e.details)) exit(-1) n_artists1 = len(user1_favs) n_artists2 = len(user2_favs) user1_favs = [i.item.__str__() for i in user1_favs] user2_favs = [i.item.__str__() for i in user2_favs] intersection = [artist for artist in user1_favs if artist in user2_favs] artist_list = intersection[:5] comparison_index = round(200.0 * len(intersection) / (n_artists1 + n_artists2), 2) if comparison_index < 1.0: bar = PRETTY_BAR[4] else: bar = PRETTY_BAR[int(comparison_index / 25.01)] if artist_list: parsed_list = [str(item) for item in artist_list] chart_text = ", ".join(parsed_list) else: chart_text = "N/A" print_console(LEL + " Comparison: %s %s %s: Similarity: %d%% - Common Artists: %s" % (user, bar, user2, comparison_index, chart_text))
def init(): for feed in FEEDS: f = Feed(feed["id"], feed["logo"], feed["url"]) print_console("Init %s" % f.feedid) f.mark_all_as_read() f.save() print_console("All unseen items marked as read.")
def degredo(): """Processo aleatorio""" request = requests.get('https://inquisicao.deadbsd.org/api/degredo', timeout=TIMEOUT) j = request.json() result = auto_de_fe(j) print_console(result)
def get_user_info(self, user): try: ui = self.api.get_user(user).get_info() except pylast.WSError as e: print_console(LEL + " WSError %s: %s" % (e.status,e.details)) exit(-1) print_console(LEL + " Profile info for %s (%s, %s, %s) - Country: %s - Registered: %s - Play count: %s -- %s" % (ui['name'], ui['realname'], ui['age'], ui['gender'], ui['country'], ui['registered'], ui['playcount'], ui['url']))
def save(self): try: f = open(FEEDFILE, "w+") obj = f pickle.dump(self.feeds, obj) f.close() except Exception as e: print_console(e)
def item(feedid, n): exists = False for f in r.feeds: if r.feeds[f].feedid == feedid: exists = True r.feeds[f].get_item(n) r.save() if not exists: print_console("Feed %s doesn't exist! :(" % feedid)
def init(): if not os.path.exists("./rss-data"): os.makedirs("./rss-data") for feed in FEEDS: f = Feed(feed["id"], feed["logo"], feed["url"]) print_console("Init %s" % f.feedid) f.mark_all_as_read() f.save() print_console("All unseen items marked as read.")
def item(feedid, n): exists = False for feed in FEEDS: if feed["id"] == feedid: f = Feed(feed["id"], feed["logo"], feed["url"]) exists = True f.get_item(n) f.save() if not exists: print_console("Feed %s doesn't exist! :(" % feedid)
def recent(feedid, n): exists = False for f in r.feeds: if r.feeds[f].feedid == feedid: exists = True r.feeds[f].get_recent(n, mark_all_as_read = True) r.save() if not exists: print_console("Feed %s doesn't exist! :(" % feedid)
def recent(feedid, n): exists = False for feed in FEEDS: if feed["id"] == feedid: f = Feed(feed["id"], feed["logo"], feed["url"]) exists = True f.get_recent(n, mark_all_as_read=True) f.save() if not exists: print_console("Feed %s doesn't exist! :(" % feedid)
def reset(feedid): exists = False for f in r.feeds: if r.feeds[f].feedid == feedid: exists = True r.feeds[f].entries = [] print_console("Cleared feed %s" % feedid) if not exists: print_console("Feed %s doesn't exist! :(" % feedid) else: r.save()
def load(self): try: f = open(FEEDFILE, "rb") obj = pickle.load(f) self.feeds = obj f.close() except IOError as e: if e.errno == 2: #ignore when file doesn't exist pass except Exception as e: print_console(e)
def reset(feedid): exists = False for feed in FEEDS: if feed["id"] == feedid: f = Feed(feed["id"], feed["logo"], feed["url"]) exists = True f.entries = [] f.save print_console("Cleared feed %s" % feedid) if not exists: print_console("Feed %s doesn't exist! :(" % feedid)
def save(self): try: with FileLock(self.feedfile, timeout=5): f = open(self.feedfile, "w+") obj = f pickle.dump(self.entries, obj) f.close() except Exception as e: print_console(e) except FileLockException: print_error("Lock Timeout")
def ad_cautelam(key, page): """Pesquisa em elementos de um Processo""" request = requests.get( 'https://inquisicao.deadbsd.org/api/adcautelam?key=' + key + '&page=' + str(page), timeout=TIMEOUT) if request.status_code == 404: print_console("Not found") else: j = request.json() result = auto_de_fe(j, pesquisa=True) print_console(result)
def get_artist_events(self, artist): try: artist_info = self.api.get_artist(artist) artist_events = artist_info.get_upcoming_events() # list comprehension wont do cause EXCEPTIONS events_str = "" n = 0 for event in artist_events: try: e_date = event.get_start_date() e_name = event.get_title() e_url = event.get_url() events_str += " - %s: %s - %s\n" % (e_date[:-9], e_name, e_url) n += 1 if n >= NUM_EVENTS: break except pylast.WSError: pass if n > 0: print_console(LEL + " events for %s:" % artist_info.get_name()) print_console(events_str) else: print_console(LEL + " no events found for artist %s." % artist_info.get_name()) except pylast.WSError as e: print_console(LEL + " WSError %s: %s" % (e.status, e.details)) exit(-1)
def get_artist_info(self, artist): try: artist_info = self.api.get_artist(artist) bio = artist_info.get_bio_summary() if bio: bio = re.sub("<[^<]+?>", "", bio) name = artist_info.get_name() listener_count = artist_info.get_listener_count() tags = artist_info.get_top_tags() tag_text = "" if tags: tag_text = ", ".join([tag.item.__str__() for tag in tags[:10]]) tag_text = "Tags: %s." % tag_text similars = artist_info.get_similar() similars_text = ", ".join([similar.item.__str__() for similar in similars[:10]]) print_console(LEL + " %s (%d listeners). %s" % (name, listener_count, tag_text)) print_console("Similar Artists: %s" % (similars_text)) if bio: print_console(bio) except pylast.WSError as e: print_console(LEL + " WSError %s: %s" % (e.status, e.details)) exit(-1)
def getResults(n): # Request Data r=requests.get('https://api.twitch.tv/api/channels/{0}'.format(n)) l=requests.get('https://api.twitch.tv/kraken/streams?channel={0}'.format(n)) # Check Status Code if r.status_code == 200: # Check Account j=r.json() if 'message' not in j: # User Info data='Twitch {0}\'s - Title: \002"{1}"\002 - Game: \002"{2}"\002'.format(j["display_name"],c(j["status"]),j["game"]) # Partner Info if j["partner"] != False: data+=' [\0036 Partner \003]' # Steam Info if j["steam_id"] != None: data+=' [ Steam: http://steamcommunity.com/profiles/{0} ]'.format(j["steam_id"]) # Status Info if l.status_code == 200: l=l.json() # If "Online" if l["_total"] != 0: # User Totals sf=l["streams"][0]["channel"]["followers"] st=l["streams"][0]["channel"]["views"] data+=' [ Totals: {0} Followers | {1} Viewers ]'.format(sf,st) # Viewers sv=l["streams"][0]["viewers"] data+=' [\0039 \002Live\002 w/ {0} viewers \003]'.format(sv) else: data+=' [\0034 \002Off\002 \003]' #Note: # - Bold: \002 # - Color's: \0034 red \0039 green \0036 purle \0003 clear) # Output print_console('{0} - http://www.twitch.tv/{1}'.format(data,n)) else: print_console('Twitch Returned: {0}'.format(c(j["message"]))) elif r.status_code == 400 or r.status_code == 404: j=r.json() print_console('Twitch Returned: {0}'.format(c(j["message"]))) else: print_console('Request Returned: "{0}" status code.'.format(r.status_code))
def compare_users(self, user, user2): try: comparison = self.api.get_user(user).compare_with_user(user2) except pylast.WSError as e: print_console(LEL + " WSError %s: %s" % (e.status,e.details)) exit(-1) comparison_index = round(float(comparison[0]),2)*100 artist_list = comparison[1] parsed_list = [item.__str__() for item in artist_list] chart_text = ", ".join(parsed_list); print_console(LEL + " Comparison between %s and %s: Similarity: %d%% - Common Artists: %s" % (user, user2, comparison_index, chart_text))
def get_user_info(self, user): try: profile = self.api.get_user(user) name = user realname = profile.get_name(True) age = profile.get_age() if profile.get_age() is not 0 else "N/A" gender = profile.get_gender() country = profile.get_country() registered = time.strftime("%d-%m-%Y", time.localtime(int(profile.get_unixtime_registered()))) playcount = profile.get_playcount() url = profile.get_url() except pylast.WSError as e: print_console(LEL + " WSError %s: %s" % (e.status, e.details)) exit(-1) print_console(LEL + " Profile info for %s (%s, %s, %s) - Country: %s - Registered: %s - Play count: %s -- %s" % (name, realname, age, gender, country, registered, playcount, url))
def load(self): try: with FileLock(self.feedfile, timeout=5): f = open(self.feedfile, "rb") obj = pickle.load(f) self.entries = obj f.close() except IOError as e: if e.errno == 2: #ignore when file doesn't exist pass except Exception as e: print_console(e) except FileLockException: print_error("Lock Timeout")
def procura(pesquisa, indice = 1): if indice < 1: return parametro = urllib.quote(pesquisa) pagina = urllib2.urlopen(URL % parametro) pagina = pagina.read() sopa = BeautifulSoup(pagina, fromEncoding="utf-8") definicoes = sopa.find("div", {"class":"pb-main-content"}) definicoes = definicoes.findAll("p") contagem = len(definicoes) # sugestões if contagem < 1 : p = sopa.find("div", {"class":"pb-sugestoes-proximas"}) a = sopa.find("div", {"class":"pb-sugestoes-afastadas"}) resultado = [] if type(p) is not NoneType : resultado = resultado + p.findAll('a', text=True) if type(a) is not NoneType and len(a) > 0 : resultado = resultado + a.findAll('a', text=True) if len(resultado) <= 0: mylib.print_console('Palavra não encontrada.') return resultado = ''.join(resultado) resultado = ' '.join([w.strip() for w in resultado.split('\n')]) mylib.print_console('%s sugestes:%s.' % (pesquisa, resultado)) return if indice > contagem : return if contagem > 1 and indice == 1 : mylib.print_console("%d definições encontradas\002;\002 ? %s 2 para a próxima." % (contagem, pesquisa)) definicao = definicoes[indice - 1] # remover cocó pt_BR for br in definicao.findAll('span', {'class' :'varpb'}, recursive=True) : br.extract() # formatação resultado = ''.join(definicao.findAll(text=True)) resultado = [w.strip().replace("=", " = ") for w in resultado.split('\n')] #resultado = [w + '\002;\002 ' if len(w) > 1 else w for w in resultado] resultado = ''.join(resultado) mylib.print_console('\002%s \002%s' % (pesquisa, resultado.encode('utf-8')))
def list_record(position = 1): if position <= 0: position = 1 sql = 'select count(1) from fml' c = conn.execute(sql) total = c.fetchone()[0] if total == 0 or int(position) > int(total): mylib.print_console("Not found") sys.exit() if total > 1 and position < total: mylib.print_console("%d found '.lf %d' for the next one" % (total, position+1)) sql = 'select id, fml_id, datetime(dt, "unixepoch") as data, msg FROM fml ORDER BY id ASC LIMIT ?,1' args = [position-1] c = conn.execute(sql, args) rows = c.fetchall()[0] msg = "%s - #%s, %s" % (rows[3], rows[1], rows[2]) return msg
def find_record(find, position = 1): if position <= 0: position = 1 sql = 'select count(1) from fml where msg like ? OR fml_id = ?' args = ['%'+find+'%', find.translate(None, '#')] c = conn.execute(sql, args) total = c.fetchone()[0] if total == 0 or int(position) > int(total): mylib.print_console("Not found") sys.exit() if total > 1 and position < total: mylib.print_console("%d found '.ff %s %d' for the next one" % (total, find, position+1)) sql = 'select id, fml_id, datetime(dt, "unixepoch") as data, msg from fml WHERE msg like ? OR fml_id = ? ORDER BY data DESC LIMIT ?,1' args = ['%'+find+'%', find.translate(None, '#'), position-1] c = conn.execute(sql, args) rows = c.fetchall()[0] msg = "%s - #%s, %s" % (rows[3], rows[1], rows[2]) return msg
def init(mark_all_as_read = False): list_a = [ {"id":"b4chan", "logo":"-4chan /b/-", "url":"http://boards.4chan.org/b/index.rss"}, {"id":"a4chan", "logo":"-4chan /a/-", "url":"http://boards.4chan.org/a/index.rss"}, {"id":"g4chan", "logo":"-4chan /g/-", "url":"http://boards.4chan.org/g/index.rss"}, {"id":"v4chan", "logo":"-4chan /v/-", "url":"http://boards.4chan.org/v/index.rss"}, {"id":"gif4chan", "logo":"-4chan /gif/-", "url":"http://boards.4chan.org/gif/index.rss"}, {"id":"pplware", "logo": "11,2PPLWARE", "url":"http://pplware.sapo.pt/feed/"}, {"id":"apod", "logo": "1,15APOD", "url":"http://apod.nasa.gov/apod.rss"}, {"id":"tugaleaks", "logo": "14,01TUGALEAKS", "url":"http://feeds.feedburner.com/tugaleaks"}, {"id":"gunshow", "logo": "0,1Gun Show", "url":"http://www.rsspect.com/rss/gunshowcomic.xml"}, {"id":"qc", "logo": "10,12QC", "url":"http://www.questionablecontent.net/QCRSS.xml"}, {"id":"xkcd", "logo": "1,0xkcd", "url":"http://xkcd.com/rss.xml"}, {"id":"mojang", "logo":"Mojang", "url":"http://mojang.com/feed"}, {"id":"bukkit", "logo":"bukkit", "url":"http://forums.bukkit.org/forums/bukkit-news.2/index.rss"}, {"id":"wotd", "logo":"-palavra do dia-", "url":"http://priberam.pt/dlpo/DoDiaRSS.aspx"}, {"id":"blitz", "logo":"BLITZ.pt", "url":"http://blitz.aeiou.pt/gen.pl?p=rss"}, {"id":"smbc", "logo":"smbc", "url":"http://www.smbc-comics.com/rss.php"}, {"id":"ptsec", "logo":"ptsec", "url":"https://ptsec.info/wp/feed/"}, {"id":"kritzkast", "logo":"kritzkast", "url":"http://www.kritzkast.com/feed?cat=-14"}, {"id":"tf2", "logo":"TF2 Official Blog", "url":"http://www.teamfortress.com/rss.xml"}, {"id":"universetoday", "logo":"Universe Today", "url":"http://www.universetoday.com/feed/"}, {"id":"hackernews", "logo":"Hacker News", "url":"http://news.ycombinator.com/rss"}, {"id":"sceper", "logo":"Sceper", "url":"http://sceper.eu/feed"}, {"id":"thepiratebay", "logo":"ThePirateBay", "url":"https://rss.thepiratebay.se/0"}, {"id":"hackaday", "logo":"Hack A Day", "url":"http://www.hackaday.com/rss.xml"}, {"id":"astronomycast", "logo":"Astronomy Cast", "url":"http://feeds.feedburner.com/astronomycast"}, {"id":"yt_jamesnintendonerd", "logo":"1,00,4 JamesNintendoNerd", "url":"http://www.youtube.com/rss/user/JamesNintendoNerd/videos.rss"}, {"id":"blol", "logo":"0,13BLOL", "url":"http://blol.org/feed"}, ##{"id":"", "logo":"", "url":""}, ] for a in list_a: r.add(a["id"], a["logo"], a["url"]) if mark_all_as_read: for f in r.feeds: print_error("Init ", r.feeds[f].feedid) r.feeds[f].mark_all_as_read() print_console("All unseen items marked as read.") r.save()
def get_item(self, n=0): f = feedparser.parse(self.url) if check_status(f, self.feedid) == -1: print_error("in get_item()") try: entry = f.entries[n] except IndexError: print_console("%s Entry %s not available" % (self.logo, n)) exit(-1) e = self.add(entry) # is old if e is None: Entry(entry, self).print_me(print_summary=True) # is new else: e.print_me(print_summary=True, seen_as_new=True)
def conjuga(verbo, indice = 1): if indice < 1: return par = urllib.quote(verbo) res = urllib2.urlopen(CONJURL % par) content = res.read() # sacar aquele HTML de dentro do HTML XD expressao_regular = re.compile(r'<section>(.*)</section>', re.DOTALL) match = re.findall(expressao_regular, content) sopa = BeautifulSoup(match[0], fromEncoding="utf-8") conteudo = sopa.find("div", {"class":"clearfix"}) conteudo = conteudo.findAll("div", recursive=False) categoria = "" conjugacao = [] for d in conteudo : c = d['class'] if c == 'tdHEAD' : categoria = ''.join(d.contents) elif c == 'wrapCONJ' : tempo = d.findAll('div', {'class':'thCONJ'}) tempo = ''.join([''.join(f.contents) for f in tempo]) flexoes = d.findAll('div', {'class':'tdCONJ'}) flexoes = ''.join([' '.join(f.findAll(text=True)) for f in flexoes]) #flexoes = ''.join([' '.join(f.contents) for f in flexoes]) flexoes = ''.join([w.replace('.', '') for w in flexoes.split('\n')]) conjugacao.append('%s %s do verbo \002%s\002: %s' % (tempo, categoria, verbo, flexoes)) contagem = len(conjugacao) if indice > contagem : return if contagem > 1 and indice == 1 : mylib.print_console("%d tempos verbais encontrados\002;\002 ? %s 2 para o próximo." % (contagem, verbo)) mylib.print_console(conjugacao[indice - 1])
def print_me(self, print_summary=False, seen_as_new=False): title = self.entry.title link = self.entry.link try: summary = unescape(strip(self.entry.summary)) except AttributeError: summary = "" if 'published' in self.entry.keys(): published = self.entry.published.rsplit(' ', 1)[0] else: published = None if seen_as_new: str_new = " \x02new!\02 " else: str_new = " " if published is not None: print_console("%s%s%s - \x1f%s\x1f (%s)" % (self.feed.logo, str_new, title, link, published)) else: print_console("%s%s%s - %s" % (self.feed.logo, str_new, title, link)) if print_summary: for l in summary.split("\n"): l = l.strip() if len(l) > 0: print_console("%s %s" % (self.feed.logo, l))
def get_now_playing(self, artist): try: api_user = self.api.get_user(user) track = api_user.get_now_playing() except pylast.WSError as e: print_console(LEL + " WSError %s: %s" % (e.status,e.details)) exit(-1) if track is None: print_console(LEL + " %s doesn't seem to be playing anything right now" % user) else: tags = track.get_top_tags() track = track.get_add_info(user.__str__()) if track.userloved == "1": loved = " 13<3" else: loved = "" try: playcount = int(track.userplaycount) except (ValueError, TypeError): playcount = 1 name = track.__str__() if tags != []: tags = ", ".join([t.item.__str__() for t in tags[:5]]) s = " %s is now playing: %s (%d plays%s, %s)" % (user, name, playcount, loved, tags) else: s = " %s is now playing: %s (%d plays%s)" % (user, name, playcount, loved) print_console(LEL + s)
def help(): mylib.print_console(".tr [from] <to> <text>") mylib.print_console("languages: " + " ".join(langs))
def output(output_lang, result): mylib.print_console("\002[%s>%s]\002 %s" % (result[2], output_lang, result[0][0][0]))
def man(): # ~:python euromilhoes.py [e | t | j] print_console('Usage: !euromilhoes | !totoloto | !joker') exit(-1)
def help(): mylib.print_console(".tr <l1,l2,l3,...,ln> <text>") mylib.print_console("languages: " + " ".join(langs))
#!/usr/bin/env python # -*- coding: utf-8 -*- import json import sys import os import urllib2 # ../mylib.py sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) from mylib import print_console if (len(sys.argv) == 1): print_console("Usage .cc <coin> [ammount]") sys.exit(0) coin = sys.argv[1].upper() ammount = 1 if (len(sys.argv) > 2): try: ammount = float(sys.argv[2]) except: ammount = 1 if not isinstance(ammount, (int, long, float, complex)): print_console("Invalid input, usage .cc <coin> [ammount]") sys.exit(0) url = 'https://min-api.cryptocompare.com/data/pricemultifull?fsyms={}&tsyms=EUR,USD'.format( coin) try: response = urllib2.urlopen(url)