async def id(self, ctx, object: converters.MentionConverter = None): if object is None: object = ctx.author await ctx.message.delete() await ctx.send(object.id, embed=Embed(title=f"{object} ID", colour=alg.rand_colour()))
async def define(self, ctx, *, phrase: str): try: with ctx.typing(): results: typing.List[dict] = await self._lookup(phrase) except errors.NotFound as ex: return await ctx.send(str(ex), delete_after=10) pages = [] for result in results: extended_text = ellipse(result.get("extendedText", None)) source = result.get("attributionText", result["sourceDictionary"]) citations = [ f'{quote(c["cite"])} - {c["source"]}' for c in result.get("citations", []) if c["cite"] ] citations = ellipse("\n\n".join(citations)) examples = [e["text"] for e in result.get("exampleUses", [])] # Capitalise each example, and quote it. examples = [quote(e) for e in examples] examples = ellipse("\n\n".join(examples)) title = result["word"].upper() part_of_speech = result.get("partOfSpeech", None) definition = result.get("text", "_No definition_") if part_of_speech: definition = "**" + part_of_speech + "**: " + definition definition = ellipse(definition, 2000) # Maps relation type to sets of words. related = {} for rel_word in result.get("relatedWords", []): rwl = related.setdefault(rel_word["relationshipType"], set()) for word in rel_word["words"]: rwl.add(word.title()) embed = embeds.Embed(title=title, description=definition, colour=alg.rand_colour()) embed.set_footer(text=source) if extended_text: embed.add_field(name="Extended definition", value=extended_text) if citations: embed.add_field(name="Citations", value=citations) if examples: embed.add_field(name="Examples", value=examples) if related: for relation, words in related.items(): embed.add_field(name=f"{relation}s".title(), value=", ".join(words)) pages.append(embed) book.EmbedBooklet(ctx=ctx, pages=pages).start()
async def f(self, ctx, *, reason=None): try: await ctx.message.delete() bucket = self.buckets.get(ctx.channel) # Get the last 10 recent messages. If the bucket message # is in there, then update, else, delete the old message if # possible and then resend the new one. If the bucket is too # old, start anew. if bucket: msg = bucket.message.id most_recent = await ctx.channel.history(limit=10).flatten() new_msg = alg.find(lambda m: m.id == msg, most_recent) if new_msg: bucket.message = new_msg else: try: await bucket.message.delete() bucket.message = None except: del self.buckets[ctx.channel] bucket = None else: return await self.append_to_bucket(bucket, ctx.author) if not bucket: colour = alg.rand_colour() if reason is None: message = await ctx.send(embed=discord.Embed( description=f"{ctx.author} paid their respects.", colour=colour, )) else: message = await ctx.send(embed=discord.Embed( description=f"{ctx.author} paid their respects for" f" {reason}")) if ENABLE_REACT: await message.add_reaction( "\N{REGIONAL INDICATOR SYMBOL LETTER F}") f_bucket = F(collections.MutableOrderedSet({ctx.author}), message, colour, ctx) self.buckets[ctx.channel] = f_bucket destroy_bucket_later(self, ctx.channel) else: await self.append_to_bucket(bucket, ctx.author) except BaseException: traceback.print_exc()
def worker(message): """Calculates all conversions on a separate thread.""" # Parse potential matches by pattern matching. tokens = list(lex.tokenize(message)) if not tokens: raise ValueError("No potential unit matches found.") # Parse real unit measurements that we can convert. quantities = list(parser.parse(*tokens)) if not quantities: raise ValueError("No actual unit matches found.") # Get any conversions equivalents = collections.OrderedDict() for quantity in quantities: compatible = conversions.get_compatible_models(quantity.unit, ignore_self=True) # Convert to SI first. si = quantity.unit.to_si(quantity.value) this_equivalents = tuple( models.ValueModel(c.from_si(si), c) for c in compatible) equivalents[quantity] = this_equivalents embed = discord.Embed(colour=alg.rand_colour()) mass_msg_added = False for original, equivalents in list(equivalents.items())[:20]: equiv_str = [] for equivalent in equivalents: equivalent = models.pretty_print( equivalent.value, equivalent.name, use_long_suffix=True, use_std_form=not original.unit.never_use_std_form, none_if_rounds_to_zero=True, ) equiv_str.append(equivalent) equiv_str = list(filter(bool, equiv_str)) if not equiv_str: continue embed.add_field( name=models.pretty_print( original.value, original.name, use_long_suffix=True, use_std_form=not original.unit.never_use_std_form, none_if_rounds_to_zero=False, ), value="\n".join(equiv_str), ) if original.unit.unit_type == models.UnitCategoryModel.FORCE_MASS: if not mass_msg_added: mass_msg_added = True embed.set_footer( text="This example assumes that mass measurements are " "accelerating at 1G. Likewise, acceleration " "assumes that it applies to 1kg mass.") if not len(embed.fields): del embed raise ValueError("No valid or non-zero conversions found.") return embed
async def info(self, ctx, package): """ Shows a summary for the given package name on PyPI, if there is one. """ url = f"https://pypi.org/pypi/{parse.quote(package)}/json" # Seems like aiohttp is screwed up and will not parse these URLS. # Requests is fine though. Guess I have to use that... with ctx.typing(): conn = await self.acquire_http() resp = await conn.get(url=url) result = (await resp.json()) if 200 <= resp.status < 300 else None if result: data = result["info"] name = f'{data["name"]} v{data["version"]}' url = data["package_url"] summary = data.get("summary", "_No summary was provided_") author = data.get("author", "Unknown") serial = result.get("last_serial", "No serial") if isinstance(serial, int): serial = f"Serial #{serial}" # Shortens the classifier strings. classifiers = data.get("classifiers", []) if classifiers: fixed_classifiers = [] for classifier in classifiers: print() if "::" in classifier: _, _, classifier = classifier.rpartition("::") classifier = f"`{classifier.strip()}`" fixed_classifiers.append(classifier) classifiers = ", ".join(sorted(fixed_classifiers)) other_attrs = { "License": data.get("license"), "Platform": data.get("platform"), "Homepage": data.get("home_page"), "Requires Python version": data.get("requires_python"), "Classifiers": classifiers, } embed = discord.Embed( title=name, description=string.trunc(summary, 2048), url=url, colour=alg.rand_colour(), ) embed.set_author(name=f"{author}") embed.set_footer(text=f"{serial}") for attr, value in other_attrs.items(): if not value: continue embed.add_field(name=attr, value=value) await ctx.send(embed=embed) else: await ctx.send(f"PyPI said: {resp.reason}", delete_after=10)
async def tldr(self, ctx, page: str, platform=None): """ Similar to man pages, this shows information on how to use a command, the difference being that this is designed to be human readable. Usage: - tldr gcc - tldr gcc <platform> `platform` can be any of the following: - common - linux - osx - sunos - windows If unspecified, we check all platforms. This will take a little longer to respond. """ platform = None if platform is None else platform.lower() supported_platforms = ("common", "linux", "osx", "sunos", "windows") if platform and platform not in supported_platforms: return await ctx.send("Invalid platform.", delete_after=10) elif any(x in page for x in "#?/"): return await ctx.send("Invalid page name.", delete_after=10) url = "https://raw.githubusercontent.com/tldr-pages/tldr/master/pages/" conn = await self.acquire_http() if platform is None: resp = None for platform in supported_platforms: resp = await conn.get(f"{url}{platform}/{page}.md") if 200 <= resp.status < 300: break else: ctx.bot.loop.create_task(resp.release()) else: url += f"{platform}/{page}.md" resp = await conn.get(url) if resp.status != 200: return await ctx.send(f"Error: {resp.reason}.", delete_after=10) content = "".join(await resp.text()).splitlines() if not content: raise RuntimeError("No response from GitHub. Is the page empty?") elif len(content) == 1: raise RuntimeError("No body, only a title. Is the page empty?") # First line is title if it starts with '#' if content[0].startswith("#"): title = content.pop(0)[1:].lstrip() + f" ({platform})" else: title = f"{page} ({platform.title()})" paginator = pag.Paginator() last_line_was_bullet = False for line in content: # Removes the blank line between bullets and code examples. if last_line_was_bullet and not line.lstrip().startswith("- "): if not line.strip(): last_line_was_bullet = False continue elif line.lstrip().startswith(" "): last_line_was_bullet = True paginator.add_line(line) pages = [] for page in paginator.pages: page = scrub_tags(page) if page.strip(): pages.append( discord.Embed(title=title, description=page, colour=alg.rand_colour())) booklet = book.EmbedBooklet(ctx=ctx, pages=pages) await booklet.start()
def new_embed(): embeds.append( discord.Embed(title=query, description="", colour=alg.rand_colour()))
async def tldrlegal_logic(self, ctx, query, verbose): """ Helper to prevent code duplication. """ http = await self.acquire_http() # Get search results async with http.get(f"{base_url}search", params={"q": query}) as resp: if resp.status != 200: return await ctx.send(f"tldrlegal said {resp.reason!r}") results = self.get_results_from_html(await resp.text()) count = len(results) if count == 0: return await ctx.send("Nothing was found.", delete_after=15) elif count == 1: # Get the URL page = results[0] else: page = await option_picker( ctx, *results, option_formatter=lambda o: o[0].replace("*", "∗")) if page is None: return await ctx.send("Took too long...") # Get the info into an object. async with http.get(page[1]) as resp: if resp.status != 200: return await ctx.send(f"tldrlegal said {resp.reason!r}") license_info = self.get_license_info(page[1], await resp.text()) # Generate embed and send. embed = discord.Embed( title=license_info.name, description=string.trunc(license_info.brief), colour=alg.rand_colour(), url=license_info.url, ) embed.set_footer(text="Disclaimer: This is only a short summary of the" " Full Text. No information on TLDRLegal is" " legal advice.") def fmt(prs): if verbose: s = string.trunc("\n".join(f"**{n}** {d}" for n, d in prs), 1024) else: s = string.trunc("\n".join(f"- {n}" for n, _ in prs), 1024) # Prevents errors for empty bodies. return s or "—" embed.add_field(name="__CAN__", value=fmt(license_info.can), inline=not verbose) embed.add_field(name="__CANNOT__", value=fmt(license_info.cant), inline=not verbose) embed.add_field(name="__MUST__", value=fmt(license_info.must), inline=not verbose) if not verbose: embed.add_field( name="\u200b", value="_Run again using `tldrlegal more <query>` " "to get a longer explanation!_", ) await ctx.send(embed=embed)