def bible(inp, bot=None): """bible <passage> -- gets <passage> from the Bible (ESV)""" API_KEY = bot.config['api_keys'].get('english_bible', None) if API_KEY is None: return 'Bible error: no API key configured' url = "https://api.esv.org/v3/passage/text/?q=" + request.urlencode(inp) json = request.get_json(url, headers={"Authorization": "Token " + API_KEY}) if 'detail' in json: return 'Bible error (lol): ' + json['detail'] if 'passages' in json and len(json['passages']) == 0: return '[Bible] Not found' output = '[Bible]' if 'canonical' in json: output = output + ' \x02' + json['canonical'] + '\x02:' if 'passages' in json: output = output + ' ' + compress_whitespace('. '.join(json['passages'])) if len(output) > 320: output = output[:320] + '...' return output
def define(inp): "define <word> -- Fetches definition of <word>." html = request.get(dict_url + request.urlencode(inp)) soup = BeautifulSoup(html, 'lxml') definitions = soup.find_all('dd') if len(definitions) == 0: return "Definition not found" output = 'Definition of "' + inp + '":' # used to number the many definitions i = 1 for definition in definitions: if 'article' in definition['class']: text = formatting.compress_whitespace(definition.text.strip()) output = output + ' \x02' + text + '\x02' i = 1 elif 'entry' in definition['class']: definition = definition.find('div', attrs={'class': 'definition'}) text = formatting.compress_whitespace(definition.text.strip()) output = output + text.replace(u'\xb0', ' \x02{}.\x02 '.format(i)) i = i + 1 # theres 'synonyms' and 'examples' too # arbitrary length limit if len(output) > 360: output = output[: 360] + '\x0f... More at https://en.wiktionary.org/wiki/' + inp return output
def anus_real(inp, nick=None): if not inp: inp = nick inp = request.urlencode(inp) html = request.get('http://en.inkei.net/anus/' + inp) soup = BeautifulSoup(html, 'lxml') details = soup.find(id='elmDescCmmn') if details is None: return 'Anus: http://en.inkei.net/anus/' + inp details = formatting.compress_whitespace(details.text) details = re.sub('Anus of [a-zA-Z0-9]+ ', 'Anus: ', details) return u'{} - http://en.inkei.net/anus/{}'.format(details, inp)
def koran(inp): "koran <chapter.verse> -- gets <chapter.verse> from the Koran. it can also search any text." url = 'https://quod.lib.umich.edu/cgi/k/koran/koran-idx?type=simple&q1=' + request.urlencode(inp) html = request.get(url) soup = BeautifulSoup(html, 'lxml') query = soup.find_all('li') if not query or len(query) == 0: return 'No results for ' + inp output = '[Koran] ' lines = [] for li in iterable.limit(4, query): lines.append(compress_whitespace(li.text)) output = output + ' '.join(lines) if len(output) > 320: output = output[:320] + '...' return output
def etymology(inp): "etymology <word> -- Retrieves the etymology of <word>." html = request.get(eth_url + request.urlencode(inp)) soup = BeautifulSoup(html, 'lxml') # the page uses weird class names like "section.word__definatieon--81fc4ae" # if it breaks change the selector to [class~="word_"] results = soup.select('div[class^="word"] section[class^="word__def"] > p') if len(results) == 0: return 'No etymology found for ' + inp output = u'Ethymology of "' + inp + '":' i = 1 for result in results: text = formatting.compress_whitespace(result.text.strip()) output = output + u' \x02{}.\x02 {}'.format(i, text) i = i + 1 if len(output) > 400: output = output[:400] + '\x0f... More at https://www.etymonline.com/word/select' return output
def clean_text(text): return formatting.compress_whitespace( text.replace('[', '').replace(']', ''))