async def from_google_geocode(cls, bot, address): url = "https://maps.googleapis.com/maps/api/geocode/json?address={}&key={}" url = url.format(uriquote(address), bot.config.gsearch2) async with bot.session.get(url) as resp: results_json = await resp.json() status = results_json['status'] if status != "OK": return None city, state, country, poi = "", "", "", "" for component in results_json['results'][0]['address_components']: if 'locality' in component['types']: city = component['long_name'] elif 'point_of_interest' in component[ 'types'] or 'natural_feature' in component['types']: poi = component['long_name'] elif 'administrative_area_level_1' in component['types']: state = component['short_name'] elif 'country' in component['types']: country = component['long_name'] if not city: city = poi #if we didn't find a city, maybe there was a POI or natural feature entry, so use that instead lng = results_json['results'][0]['geometry']['location']['lng'] lat = results_json['results'][0]['geometry']['location']['lat'] loc = cls(lat, lng, city, state, country, address) return loc
async def translate(self, ctx, *, phrase: str): """Translate short phrases using google translate Optionally specify language code such as `!translate en-es cat`""" langs = re.search(r"(\w{2})-(\w{2})", phrase[0:5]) if langs: sl = langs.group(1) tl = langs.group(2) phrase = phrase[6:] else: sl = "auto" tl = "en" url = "https://translate.googleapis.com/translate_a/single" params = { 'client': 'gtx', 'sl': sl, 'tl': tl, 'dt': 't', "q": uriquote(phrase) } ua = "Mozilla/5.0 (X11; CrOS x86_64 12239.19.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.38 Safari/537.36" headers = {'User-Agent': ua} async with self.bot.session.get(url, headers=headers, params=params) as resp: result = await resp.json() await ctx.send("{} ({}): {}".format(result[0][0][1], result[2], result[0][0][0]))
async def wolfram(self, ctx, *, query: str): """Query wolfram alpha and return the output""" loc = ctx.author_info.location if loc: location = uriquote(loc.formatted_address) lat = loc.latitude lng = loc.longitude else: location, lat, lng = "", "", "" key = self.bot.config.wolframAPIkey url = "http://api.wolframalpha.com/v2/query" params = {"appid": key, "format": 'plaintext', 'output': 'json', "input": query, "location": location, "latlong": f"{lat},{lng}"} # print(params) # Wolfram api can take a while sometimes.... async with ctx.channel.typing(): if ctx.invoked_with.lower() == "c": result = await self.get_wolfram(url, params) else: result = await self.get_wolfram(url, params, full=True) if result: await ctx.send(result) else: await ctx.send("Wolfram didn't understand that")
async def geocode(house, street, postcode): cull = { 'geo.type': 'Point', 'geo.coordinates': { '$exists': 1 }, 'address.house': house, 'address.street': street, 'address.zip': postcode } reapc = await DB.geocache.count_documents(cull) if reapc > 0: reap = {'geo.coordinates': 1} harvest = await DB.geocache.find_one(cull, reap) lat, lng = harvest['geo']['coordinates'] else: async with aiohttp.ClientSession(raise_for_status=True) as http: address = uriquote(f'{house} {street}, {postcode}') async with http.get( f'https://maps.googleapis.com/maps/api/geocode/json' f'?address={address}&key={DM_TOKEN}') as rsp: payload = await rsp.json() latLng = payload['results'][0]['geometry']['location'] lat, lng = (float(latLng['lat']), float(latLng['lng'])) address = {'house': house, 'zip': postcode, 'street': street} point = {'type': 'Point', 'coordinates': (lat, lng)} sow = {'$set': {'address': address, 'geo': point}} await DB.geocache.update_many(cull, sow, upsert=True) return (lat, lng)
async def urban_dictionary(self, ctx, *, term: str = ""): """Searches for a term on urbandictionary.com""" if not term: url = "http://api.urbandictionary.com/v0/random" else: term = uriquote(term) url = f"http://api.urbandictionary.com/v0/define?term={term}" async with self.bot.session.get(url) as resp: data = await resp.json() pages = Paginator(ctx, data['list'], self.ud_callback) await pages.paginate()
async def get_card(self, card, cset=""): card = uriquote(card.strip()) cset = uriquote(cset.strip()) url = f"http://api.magicthegathering.io/v1/cards?name={card}" if cset: url += f"&set={cset}" headers = {'User-agent': 'Palbot for discord/2.0'} async with self.bot.session.get(url, headers=headers) as resp: data = await resp.json() cards = data['cards'] if not cards: return None data = None for card in cards: if "imageUrl" in card: data = card break return data
async def rt(self, ctx, *, movie_name: str): """Searches Flixster for a movie's Rotten Tomatoes score and critics consensus if available""" url = self.rt_search_url.format(uriquote(movie_name)) # RT api is slow... async with ctx.channel.typing(): data = await self.json_from_flxurl(url) movielist = [] for movie in data: movielist.append(movie['id']) if not movielist: await ctx.send(f"Couldn't find a movie named `{movie_name}` on Flixster") return pages = Paginator(ctx, movielist, self.rt_output_callback) await pages.paginate()
async def google_news(self, ctx, *, query: str = ""): """Search for a story on google news - returns the headline and a link""" if not query: url = "https://news.google.com/news/rss/?hl=en" else: url = "https://news.google.com/news/rss/search/section/q/{0}/{0}?hl=e".format( uriquote(query)) async with self.bot.session.get(url) as resp: data = await resp.read() dom = xml.dom.minidom.parseString(data) newest_news = dom.getElementsByTagName('item')[0] title = newest_news.getElementsByTagName( 'title')[0].childNodes[0].data.strip() link = newest_news.getElementsByTagName( 'link')[0].childNodes[0].data await ctx.send(f'{title} [ {link} ]')
async def get_google_entries(query, session=None): if not session: session = aiohttp.ClientSession() url = 'https://www.google.com/search?q={}'.format(uriquote(query)) params = { 'safe': 'off', 'lr': 'lang_en', 'h1': 'en' } headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; Win64; x64)' } entries = [] async with session.get(url, params=params, headers=headers) as resp: if resp.status != 200: config = load_optional_config() async with session.get( "https://www.googleapis.com/customsearch/v1?q=" + quote_plus(query) + "&start=" + '1' + "&key=" + config['google_api_key'] + "&cx=" + config['custom_search_engine']) as resp: result = json.loads(await resp.text()) return None, result['items'][0]['link'] try: root = etree.fromstring(await resp.text(), etree.HTMLParser()) search_nodes = root.findall(".//div[@class='g']") for node in search_nodes: url_node = node.find('.//h3/a') if url_node is None: continue url = url_node.attrib['href'] if not url.startswith('/url?'): continue url = parse_qs(url[5:])['q'][0] entries.append(url) except NameError: root = BeautifulSoup(await resp.text(), 'html.parser') for result in root.find_all("div", class_='g'): url_node = result.find('h3') if url_node: for link in url_node.find_all('a', href=True): url = link['href'] if not url.startswith('/url?'): continue url = parse_qs(url[5:])['q'][0] entries.append(url) return entries, root
async def get_google_entries(query, session=None): if not session: session = aiohttp.ClientSession() url = 'https://www.google.com/search?q={}'.format(uriquote(query)) params = { 'safe': 'off', 'lr': 'lang_en', 'h1': 'en' } headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; Win64; x64)' } entries = [] async with session.get(url, params=params, headers=headers) as resp: if resp.status != 200: config = load_optional_config() async with session.get("https://www.googleapis.com/customsearch/v1?q=" + quote_plus(query) + "&start=" + '1' + "&key=" + config['google_api_key'] + "&cx=" + config['custom_search_engine']) as resp: result = json.loads(await resp.text()) return None, result['items'][0]['link'] try: root = etree.fromstring(await resp.text(), etree.HTMLParser()) search_nodes = root.findall(".//div[@class='g']") for node in search_nodes: url_node = node.find('.//h3/a') if url_node is None: continue url = url_node.attrib['href'] if not url.startswith('/url?'): continue url = parse_qs(url[5:])['q'][0] entries.append(url) except NameError: root = BeautifulSoup(await resp.text(), 'html.parser') for result in root.find_all("div", class_='g'): url_node = result.find('h3') if url_node: for link in url_node.find_all('a', href=True): url = link['href'] if not url.startswith('/url?'): continue url = parse_qs(url[5:])['q'][0] entries.append(url) return entries, root
async def headlines(self, ctx, *, query=""): urlfmt = "http://newsapi.org/v2/top-headlines?language=en&q={}" headers = { "x-api-key": self.bot.config.apikeys["newsapi"] } url = urlfmt.format(uriquote(query)) async with self.bot.aio_session.get(url, headers=headers) as r: resp = await r.json() if resp["status"] != "ok": err = resp["message"] await ctx.send("The following error occured while running the " f"command: ```{err}```Please report this to the bot " "owner.") return articles = resp["articles"][:3] em = discord.Embed(title="News by NewsAPI.org", url="http://newsapi.org", color=discord.Color.green()) for art in articles: fname = f"[{art['title']}]({art['url']})" fname = art['title'] if len(fname) > 256 else fname fval = textwrap.shorten(art["description"] or "[...]", 140) em.add_field(name=fname, value=fval or "[...]") await ctx.send(embed=em)
async def google_for_urls(bot, search_term, *, url_regex=None, return_full_data=False): url = 'https://www.googleapis.com/customsearch/v1?key={}&cx={}&q={}' url = url.format(bot.config.gsearchapi, bot.config.gsearchcx, uriquote(search_term)) async with bot.session.get(url) as resp: json = await resp.json() if resp.status != 200: print(resp, json) return if return_full_data: return json['items'] if 'items' not in json: return None results = [] for result in json['items']: if url_regex: check = re.search(url_regex, result['link']) if not check: continue results.append(result['link'].replace('%25', '%')) return results
async def request(self, route: Route, **kwargs): """ Sends a request to the Discord API. Handles rate limits by utilizing LockManager and the Discord API Bucket system - https://discord.com/developers/docs/topics/gateway#encoding-and -compression. When the client wants to send a new request, this method attempts to acquire a ratelimit lock. When it eventually does, it sends a request and checks to see if the ratelimit has been exceeded. If so, that Bucket's LockManager is locked so other requests cannot acquire a lock. The Discord Bucket system returns a `delta` value which specifies how long it will take before another request can be sent and the LockManager for that Bucket can be unlocked. Parameters ---------- route: Route The Discord API route to send a request to. **kwargs: Dict[str, Any] The parameters to send with the request. """ bucket = route.bucket for i in range(self.retry_attempts): if not self.global_lock.is_set(): self.logger.debug("Sleeping for Global Rate Limit") await self.global_lock.wait() ratelimit_lock: asyncio.Lock = self.ratelimit_locks.get( bucket, asyncio.Lock(loop=self.loop)) await ratelimit_lock.acquire() with LockManager(ratelimit_lock) as lockmanager: # Merge default headers with the users headers, # could probably use a if to check if is headers set? # Not sure which is optimal for speed kwargs["headers"] = { **self.default_headers, **kwargs.get("headers", {}) } # Format the reason try: reason = kwargs.pop("reason") except KeyError: pass else: if reason: kwargs["headers"]["X-Audit-Log-Reason"] = uriquote( reason, safe="/ ") r = await self.session.request(route.method, self.baseuri + route.path, **kwargs) # check if we have rate limit header information remaining = r.headers.get('X-Ratelimit-Remaining') if remaining == '0' and r.status != 429: # we've depleted our current bucket delta = float(r.headers.get("X-Ratelimit-Reset-After")) self.logger.debug( f"Ratelimit exceeded. Bucket: {bucket}. Retry after: " f"{delta}") lockmanager.defer() self.loop.call_later(delta, ratelimit_lock.release) status_code = r.status if status_code == 404: raise NotFound(r) elif status_code == 401: raise Unauthorized(r) elif status_code == 403: raise Forbidden(r, await r.text()) elif status_code == 429: if not r.headers.get("Via"): # Cloudflare banned? raise HTTPException(r, await r.text()) data = await r.json() retry_after = data["retry_after"] / 1000 is_global = data.get("global", False) if is_global: self.logger.warning( f"Global ratelimit hit! Retrying in " f"{retry_after}s") else: self.logger.warning( f"A ratelimit was hit (429)! Bucket: {bucket}. " f"Retrying in {retry_after}s") await asyncio.sleep(retry_after) continue return r
async def request(self, route: Route, **kwargs): bucket = route.bucket for i in range(self.retry_attempts): if not self.global_lock.is_set(): self.logger.debug("Sleeping for Global Rate Limit") await self.global_lock.wait() ratelimit_lock: Lock = self.ratelimit_locks.get( bucket, Lock(loop=self.loop)) await ratelimit_lock.acquire() with LockManager(ratelimit_lock) as lockmanager: # Merge default headers with the users headers, # could probably use a if to check if is headers set? # Not sure which is optimal for speed kwargs["headers"] = { **self.default_headers, **kwargs.get("headers", {}) } # Format the reason try: reason = kwargs.pop("reason") except KeyError: pass else: if reason: kwargs["headers"]["X-Audit-Log-Reason"] = uriquote( reason, safe="/ ") r = await self.session.request(route.method, self.baseuri + route.path, **kwargs) # check if we have rate limit header information remaining = r.headers.get('X-Ratelimit-Remaining') if remaining == '0' and r.status != 429: # we've depleted our current bucket delta = float(r.headers.get("X-Ratelimit-Reset-After")) self.logger.debug( f"Ratelimit exceeded. Bucket: {bucket}. Retry after: " f"{delta}") lockmanager.defer() self.loop.call_later(delta, ratelimit_lock.release) status_code = r.status if status_code == 404: raise NotFound(r) elif status_code == 401: raise Unauthorized(r) elif status_code == 403: raise Forbidden(r, await r.text()) elif status_code == 429: if not r.headers.get("Via"): # Cloudflare banned? raise HTTPException(r, await r.text()) data = await r.json() retry_after = data["retry_after"] / 1000 is_global = data.get("global", False) if is_global: await ws.send_json({"t": "ratelimit", "d": "global"}) self.logger.warning( f"Global ratelimit hit! Retrying in " f"{retry_after}s") else: await ws.send_json({"t": "ratelimit", "d": bucket}) self.logger.warning( f"A ratelimit was hit (429)! Bucket: {bucket}. " f"Retrying in {retry_after}s") await sleep(retry_after) continue return r
async def request(self, route: Route, **kwargs): """ Sends a request to the Discord API. Parameters ---------- route: Route The Discord API route to send a request to. **kwargs: Dict[str, Any] The parameters being passed to asyncio.ClientSession.request """ if self.session.closed: self.session = ClientSession() bucket = route.bucket for retry_count in range(self.retry_attempts): if not self.global_lock.is_set(): self.logger.debug("Sleeping for global rate-limit") await self.global_lock.wait() ratelimit_lock: asyncio.Lock = self.ratelimit_locks.get( bucket, None) if ratelimit_lock is None: self.ratelimit_locks[bucket] = asyncio.Lock() continue await ratelimit_lock.acquire() with LockManager(ratelimit_lock) as lockmanager: # Merge default headers with the users headers, could probably use a if to check if is headers set? # Not sure which is optimal for speed kwargs["headers"] = { **self.default_headers, **kwargs.get("headers", {}) } # Format the reason try: reason = kwargs.pop("reason") except KeyError: pass else: if reason: kwargs["headers"]["X-Audit-Log-Reason"] = uriquote( reason, safe="/ ") r = await self.session.request(route.method, self.baseuri + route.path, **kwargs) headers = r.headers if r.status == 429: data = await r.json() retry_after = data["retry_after"] if "X-RateLimit-Global" in headers.keys(): # Global rate-limited self.global_lock.set() self.logger.warning( "Global rate-limit reached! Please contact discord support to get this increased. " "Trying again in %s Request attempt %s" % (retry_after, retry_count)) await asyncio.sleep(retry_after) self.global_lock.clear() self.logger.debug( "Trying request again. Request attempt: %s" % retry_count) continue else: self.logger.info( "Ratelimit bucket hit! Bucket: %s. Retrying in %s. Request count %s" % (bucket, retry_after, retry_count)) await asyncio.sleep(retry_after) self.logger.debug( "Trying request again. Request attempt: %s" % retry_count) continue elif r.status == 401: raise Unauthorized(r) elif r.status == 403: raise Forbidden(r, await r.text()) elif r.status == 404: raise NotFound(r) elif r.status >= 300: raise HTTPException(r, await r.text()) # Check if we are just on the limit but not passed it remaining = r.headers.get('X-Ratelimit-Remaining') if remaining == "0": retry_after = float( headers.get("X-RateLimit-Reset-After", "0")) self.logger.info( "Rate-limit exceeded! Bucket: %s Retry after: %s" % (bucket, retry_after)) lockmanager.defer() self.loop.call_later(retry_after, ratelimit_lock.release) return r
async def youtube(self, ctx, *, search: str): """Search for a youtube video and return some info along with an embedded link""" key = self.bot.config.gsearch2 url = "https://www.googleapis.com/youtube/v3/search" params = { 'part': 'snippet', 'q': uriquote(search), 'type': 'video', 'maxResults': 1, 'key': key, 'regionCode': 'US' } async with self.bot.session.get(url, params=params) as resp: data = await resp.json() if data['items']: yt_id = data['items'][0]['id']['videoId'] else: await ctx.send(f"Unable to find a youtube video for `{search}`" ) return link = f"https://youtu.be/{yt_id}" url = "https://www.googleapis.com/youtube/v3/videos" params = { 'part': "snippet,contentDetails,statistics", 'hl': 'en', 'id': yt_id, 'key': key, 'regionCode': 'US' } async with self.bot.session.get(url, params=params) as resp: ytjson = await resp.json() if ytjson['items']: ytjson = ytjson['items'][0] else: await ctx.send(f"Failed to load video info for `{link}`") return title = ytjson['snippet']['title'] uploader = ytjson['snippet']['channelTitle'] pubdate = ytjson['snippet']['publishedAt'][:10] likes = int(ytjson['statistics'].get('likeCount', 0)) dislikes = int(ytjson['statistics'].get('dislikeCount', 0)) if likes and dislikes: rating = "{0:.1f}/10".format((likes / (likes + dislikes)) * 10) else: rating = "N/A" viewcount = int(ytjson['statistics']['viewCount']) duration = ytjson['contentDetails']['duration'][2:].lower() category = "" catid = ytjson['snippet']['categoryId'] url = "https://www.googleapis.com/youtube/v3/videoCategories" params = {'part': 'snippet', 'id': catid, 'key': key} async with self.bot.session.get(url, params=params) as resp: catjson = await resp.json() category = catjson['items'][0]['snippet']['title'] out = "" if 'contentRating' in ytjson['contentDetails'] and \ ytjson['contentDetails']['contentRating']: print(ytjson['contentDetails']) out = "**NSFW** : " link = f"|| {link} ||" out += ( f"{title} [{category}] :: Length: {duration} - Rating: {rating} - " f"{viewcount:,} views - {uploader} on {pubdate} - {link}") await ctx.send(out)
async def untappd_beer_search(self, ctx, *, beername: str): """Search Untappd for a beer to return ratings, alcohol, etc""" clientid = self.bot.config.untappd_clientid clientsecret = self.bot.config.untappd_clientsecret top_rating = 4.7 url = "https://api.untappd.com/v4/search/beer" params = { 'client_id': clientid, 'client_secret': clientsecret, 'q': uriquote(beername) } async with self.bot.session.get(url, params=params) as resp: if resp.status != 200: return response = await resp.json() if not response['response']['beers']['items']: await ctx.send( f"Couldn't find a beer named `{beername}` on Untappd") return beerid = response['response']['beers']['items'][0]['beer']['bid'] params = {'client_id': clientid, 'client_secret': clientsecret} url = f"https://api.untappd.com/v4/beer/info/{beerid}?" async with self.bot.session.get(url, params=params) as resp: response = await resp.json() response = response['response']['beer'] beer_name = response['beer_name'] beer_abv = response['beer_abv'] #beer_ibu = response['beer_ibu'] beer_style = response['beer_style'] beer_url = f"https://untappd.com/b/{response['beer_slug']}/{beerid}" rating = int( round((float(response['rating_score']) / top_rating) * 100, 0)) rating_count = int(response['rating_count']) if rating >= 95: rating_word = "world-class" elif 90 <= rating <= 94: rating_word = "outstanding" elif 85 <= rating <= 89: rating_word = "very good" elif 80 <= rating <= 84: rating_word = "good" elif 70 <= rating <= 79: rating_word = "okay" elif 60 <= rating <= 69: rating_word = "poor" elif rating < 60: rating_word = "awful" cals = BeerCals(beer_abv).solve() e = discord.Embed(title=f"{beer_name} - {beer_style}", url=beer_url) e.add_field( name="Grade", value=f"{rating} - {rating_word} ({rating_count:,} ratings)", inline=False) if cals: beer_abv = f"{beer_abv}% - Est. Calories (12oz): {cals}" e.add_field(name="ABV", value=beer_abv, inline=False) if 'beer_label' in response: e.set_thumbnail(url=response['beer_label']) await ctx.send(embed=e)
async def epoll(req): contact = await Contact.find_by_id(req.match_info['hash']) if contact is None: raise web.HTTPFound('/earlybird_sites') triplet = dict( zip(('house', 'street', 'zip'), (contact.house, contact.street, contact.zipcode))) await asyncio.create_task(tag_contact_with(contact, 'vote4robin_earlybird')) early_polling_sites = ( '57 St. Paul St., 2nd Floor, Rochester, NY 14604', '700 North St., Rochester, NY 14605', '310 Arnett Blvd., Rochester, NY 14619', '10 Felix St., Rochester, NY 14608', '680 Westfall Rd., Rochester, NY 14620', '1039 N. Greece Rd., Rochester, NY 14626', '1 Miracle Mile Dr., Rochester, NY 14623', '1290 Titus Ave., Rochester, NY 14617', '3100 Atlantic Ave., Penfield, NY 14526', '6720 Pittsford Palmyra Rd., Fairport, NY 14450', '4761 Redman Rd., Brockport, NY 14420', '1350 Chiyoda Dr., Webster, NY 14580', ) residence = uriquote( f'{contact.house} {contact.street}, {contact.zipcode}') cull = {'residence': triplet, 'site': {'$in': early_polling_sites}} reapc = await DB.early_polling.count_documents(cull) if reapc < 1: lat, lng = await geocode(contact.house, contact.street, contact.zipcode) q = {'type': 'Point', 'coordinates': (lat, lng)} q = {'geo': {'$near': {'$geometry': q}}} top_three = [] sites = DB.early_polling_sites.find(q, {'address': 1}) async for site in sites: top_three.append(site['address']) if len(top_three) == 3: break try: closest = await address_closest(residence, *top_three) except Exception: raise web.HTTPFound(location='/earlybird_sites') sow = {'$set': {'residence': triplet, 'site': closest}} await DB.early_polling.update_many(cull, sow, upsert=True) else: reap = {'site': 1} harvest = await DB.early_polling.find_one(cull, reap) closest = harvest['site'] closest = uriquote(closest) doc, tag, text = Doc().tagtext() doc.asis('<!DOCTYPE html>') with tag('head'): with tag('title'): text(f"{contact.forename}'s Early Polling Sites") with tag('script'): text(''' const msg = ( 'The Board of Elections is encouraging all who have waited ' + 'over a week for their absentee ballot to consider voting ' + 'in person at _any_ early polling site through Sunday, June ' + '21. Click OK to see the closest site to your registered ' + 'address and a list of all early voting sites.'); alert(msg); ''') with tag('style'): text(''' a { text-decoration: none; color: #4287f5; font-size: xx-large; padding: 0.5em; font-family: Roboto, Arial, 'sans-serif'; } a:hover { color: #1bf5ee; } div.flex { display: flex; float: center; margin: auto; width: 80%; height: 100vh; padding: 2em; flex-direction: column; } ''') with tag('body'): closest_src = ( r'https://www.google.com/maps/embed/v1/directions' f'?origin={residence}&destination={closest}&key={DM_TOKEN}') center = urlencode({ 'house': contact.house, 'street': contact.street, 'zip': contact.zipcode }) browse_src = f'/earlybird_sites?{center}' with tag('div', klass='flex'): with tag('a', href=f'https://google.com/maps/place/{closest}'): text(r'Closest early polling to ' f'{contact.house} {contact.street}') with tag('iframe', src=closest_src, height='50%'): pass with tag('a', href=browse_src): text('Browse all early polling sites') with tag('iframe', src=browse_src, height='50%'): pass return web.Response(text=doc.getvalue(), content_type='text/html')
async def lastfm(self, ctx, user=None): """Show the users last played song from last.fm""" user = user or ctx.author_info.lastfm if not user: await ctx.send( "No user found - usage is `np <user>` or set one with `!set last.fm <user>`" ) return url = "http://ws.audioscrobbler.com/2.0/" params = { 'api_key': self.bot.config.lastfm_api_key, 'limit': 1, 'format': 'json', 'method': 'user.getRecentTracks', 'user': uriquote(user) } async with self.bot.session.get(url, params=params) as resp: npdata = await resp.json() if not 'recenttracks' in npdata or not npdata['recenttracks'][ 'track']: await ctx.send( f"Unable to find recent tracks for user `{user}`") return params['artist'] = artist = npdata['recenttracks']['track'][0][ 'artist']['#text'] params['track'] = trackname = npdata['recenttracks']['track'][0][ 'name'] params['method'] = "track.getInfo" async with self.bot.session.get(url, params=params) as resp: track = await resp.json() track = track.get('track', None) extended = "" if track: dmin, dsec = divmod((int(track.get('duration', 0)) / 1000), 60) duration = " [{:.0f}:{:02.0f}]".format(dmin, dsec) playcount = f" :: Playcount: {track['userplaycount']}" if 'userplaycount' in track else '' genres = [] for genre in track['toptags']['tag']: genres.append(genre['name']) genre = f" ({', '.join(genres)})" if genres else '' extended = f"{duration}{playcount}{genre}" ytkey = self.bot.config.gsearch2 url = "https://www.googleapis.com/youtube/v3/search" params = { 'part': 'snippet', 'q': f"{artist} - {trackname}", 'type': 'video', 'maxResults': 1, 'key': ytkey } async with self.bot.session.get(url, params=params) as resp: data = await resp.json() if data['items']: yt_id = data['items'][0]['id']['videoId'] link = f" - <https://youtu.be/{yt_id}>" else: link = "" if len(npdata['recenttracks']['track']) == 1: #User not currently playing track date = npdata['recenttracks']['track'][0]['date']['#text'] out = f"{user} last played: {artist} - {trackname} {extended} on {date}{link}" else: out = f"{user} np: {artist} - {trackname} {extended}{link}" await ctx.send(out)