示例#1
0
def fetch(start, dest):
    start = request.urlencode(start)
    dest = request.urlencode(dest)
    url = "http://www.travelmath.com/flying-distance/from/{}/to/{}".format(
        start, dest)
    html = request.get(url)
    return html
示例#2
0
def search(input):
    json = request.get_json(base_url + request.urlencode(input))

    if json is None or "error" in json or "errors" in json:
        return ["the server f****d up"]

    data = []
    for item in json['list']:
        definition = item['definition']
        word = item['word']
        example = item['example']
        votes_up = item['thumbs_up']
        votes_down = item['thumbs_down']

        output = '\x02' + word + '\x02 '

        try:
            votes = int(votes_up) - int(votes_down)
            if votes > 0:
                votes = '+' + str(votes)
        except:
            votes = 0

        if votes != 0:
            output = output + '(' + str(votes) + ') '

        output = output + clean_text(definition)

        if example:
            output = output + ' \x02Example:\x02 ' + clean_text(example)

        data.append(output)

    return data
示例#3
0
def bible(inp, bot=None):
    """bible <passage> -- gets <passage> from the Bible (ESV)"""

    API_KEY = bot.config['api_keys'].get('english_bible', None)

    if API_KEY is None:
        return 'Bible error: no API key configured'

    url = "https://api.esv.org/v3/passage/text/?q=" + request.urlencode(inp)
    json = request.get_json(url, headers={"Authorization": "Token " + API_KEY})

    if 'detail' in json:
        return 'Bible error (lol): ' + json['detail']

    if 'passages' in json and len(json['passages']) == 0:
        return '[Bible] Not found'

    output = '[Bible]'

    if 'canonical' in json:
        output = output + ' \x02' + json['canonical'] + '\x02:'

    if 'passages' in json:
        output = output + ' ' + compress_whitespace('. '.join(json['passages']))

    if len(output) > 320:
        output = output[:320] + '...'

    return output
示例#4
0
def refresh_cache(inp):
    print "[+] refreshing furry cache"

    global cache
    global lastsearch
    cache = []
    search = inp

    # these are special search queries in the booru
    for word in ['explicit', 'safe', 'nsfw', 'sfw']:
        search = search.replace(word, 'rating:' + word)

    lastsearch = search

    if inp == '':
        postjson = request.get_json('http://e621.net/posts.json?limit=10')
    else:
        postjson = request.get_json(
            'http://e621.net/posts.json?limit=10&tags={}'.format(
                request.urlencode(search)))
    posts = postjson["posts"]

    for i in range(len(posts)):
        post = posts[i]
        id = post["id"]
        score = post["score"]["total"]
        url = post["file"]["url"]
        rating = post["rating"]
        tags = ", ".join(post["tags"]["general"])
        cache.append((id, score, url, rating, tags))

    random.shuffle(cache)
    return
示例#5
0
def search(instance, query):
    if instance not in INSTANCES:
        return

    wiki = INSTANCES[instance]
    search = request.get_json(wiki['search'] + request.urlencode(query))

    titles = search[1]
    descriptions = search[2]
    urls = search[3]

    return (titles, descriptions, urls)
示例#6
0
def google_translate(to_translate, to_language="auto", from_language="auto"):
    url = 'https://translate.google.com/m?'
    url = url + "hl=" + to_language
    url = url + "&sl=" + from_language
    url = url + "&tl=" + to_language
    url = url + "&ie=UTF-8&oe=UTF-8"
    url = url + "&q=" + request.urlencode(to_translate)

    page = request.get_text(url)
    # this will break super badly if google changes their html
    before_trans = 'class="t0">'
    result = page[page.find(before_trans) + len(before_trans):]
    result = result.split("<")[0]
    return '%s' % (result)
示例#7
0
文件: bash.py 项目: nojusr/Taigabot
def get_bash_quote(inp):
    try:
        inp = request.urlencode(inp)
        html = request.get('http://bash.org/?' + inp)
        soup = BeautifulSoup(html, 'lxml')
        quote_info = soup.find('p', {'class': 'quote'})
        quote = soup.find('p', {
            'class': 'qt'
        }).text.replace('\n', ' ').replace('\r', ' |')

        id = quote_info.contents[0].text
        votes = quote_info.find('font').text
        return u'\x02{}\x02 ({} votes): {}'.format(id, votes, quote)
    except:
        return "No quote found."
示例#8
0
def anus_real(inp, nick=None):
    if not inp:
        inp = nick

    inp = request.urlencode(inp)
    html = request.get('http://en.inkei.net/anus/' + inp)
    soup = BeautifulSoup(html, 'lxml')

    details = soup.find(id='elmDescCmmn')
    if details is None:
        return 'Anus: http://en.inkei.net/anus/' + inp

    details = formatting.compress_whitespace(details.text)

    details = re.sub('Anus of [a-zA-Z0-9]+ ', 'Anus: ', details)
    return u'{} - http://en.inkei.net/anus/{}'.format(details, inp)
示例#9
0
def google(inp, bot=None):
    """google <query> -- Returns first google search result for <query>."""
    inp = request.urlencode(inp)

    url = API_URL + u'?key={}&cx={}&num=1&safe=off&q={}'
    cx = bot.config['api_keys']['googleimage']
    search = '+'.join(inp.split())
    key = bot.config['api_keys']['google']
    result = request.get_json(url.format(key, cx, search.encode('utf-8')))['items'][0]

    title = result['title']
    content = formatting.remove_newlines(result['snippet'])
    link = result['link']

    try:
        return u'{} -- \x02{}\x02: "{}"'.format(web.isgd(link), title, content)
    except Exception:
        return u'{} -- \x02{}\x02: "{}"'.format(link, title, content)
示例#10
0
def amazon(inp):
    """amazon [query] -- Searches amazon for query"""
    if not inp:
        return "usage: amazon <search>"

    inp = request.urlencode(inp)
    html = request.get('https://www.amazon.com/s?k=' + inp)
    results = parse(html)

    if len(results) == 0:
        return 'No results found'

    title, price, url = results[0]

    if len(title) > 80:
        title = title[:80] + '...'

    # \x03 = color, 03 = green
    return u'[Amazon] {} \x0303{}\x03 {}'.format(title, price, url)
示例#11
0
def validate(inp):
    """validate <url> -- Runs url through the w3c markup validator."""

    if not inp.startswith('http'):
        inp = 'https://' + inp

    url = 'https://validator.w3.org/nu/?doc=' + request.urlencode(inp)
    html = request.get(url)
    soup = BeautifulSoup(html, 'lxml')
    results = soup.find('div', attrs={'id': 'results'})

    errors = len(results.find_all('li', attrs={'class': 'error'}))
    warns = len(results.find_all('li', attrs={'class': 'warning'}))
    info = len(results.find_all('li', attrs={'class': 'info'}))

    if errors == 0 and warns == 0 and info == 0:
        return "[w3c] Successfully validated with no errors"

    return "[w3c] Found {} errors, {} warnings and {} notices.".format(
        errors, warns, info)
示例#12
0
def parse_ip(ip):
    ip = request.urlencode(ip)
    data = request.get_json('https://ipinfo.io/' + ip,
                            headers={'Accept': 'application/json'})

    if data.get('error') is not None:
        if data['error'].get('title') == 'Wrong ip':
            return '[IP] That IP is not valid'
        else:
            return '[IP] Some error ocurred'

    # example for 8.8.8.8
    loc = data.get('loc')  # 37.40, -122.07
    city = data.get('city')  # Mountain View
    country = data.get('country')  # US
    region = data.get('region')  # California
    hostname = data.get('hostname')  # dns.google
    timezone = data.get('timezone')  # unreliable
    ip = data.get('ip')  # 8.8.8.8
    org = data.get('org')  # Google LLC

    return u"[IP] {} - {}, {}, {}".format(org, city, region, country)
示例#13
0
def koran(inp):
    "koran <chapter.verse> -- gets <chapter.verse> from the Koran. it can also search any text."

    url = 'https://quod.lib.umich.edu/cgi/k/koran/koran-idx?type=simple&q1=' + request.urlencode(inp)
    html = request.get(url)
    soup = BeautifulSoup(html, 'lxml')
    query = soup.find_all('li')

    if not query or len(query) == 0:
        return 'No results for ' + inp

    output = '[Koran] '
    lines = []

    for li in iterable.limit(4, query):
        lines.append(compress_whitespace(li.text))

    output = output + ' '.join(lines)

    if len(output) > 320:
        output = output[:320] + '...'

    return output
示例#14
0
def etymology(inp):
    "etymology <word> -- Retrieves the etymology of <word>."

    html = request.get(eth_url + request.urlencode(inp))
    soup = BeautifulSoup(html, 'lxml')
    # the page uses weird class names like "section.word__definatieon--81fc4ae"
    # if it breaks change the selector to [class~="word_"]
    results = soup.select('div[class^="word"] section[class^="word__def"] > p')

    if len(results) == 0:
        return 'No etymology found for ' + inp

    output = u'Ethymology of "' + inp + '":'
    i = 1

    for result in results:
        text = formatting.compress_whitespace(result.text.strip())
        output = output + u' \x02{}.\x02 {}'.format(i, text)
        i = i + 1

    if len(output) > 400:
        output = output[:400] + '\x0f... More at https://www.etymonline.com/word/select'

    return output
示例#15
0
def define(inp):
    "define <word> -- Fetches definition of <word>."

    html = request.get(dict_url + request.urlencode(inp))
    soup = BeautifulSoup(html, 'lxml')

    definitions = soup.find_all('dd')

    if len(definitions) == 0:
        return "Definition not found"

    output = 'Definition of "' + inp + '":'

    # used to number the many definitions
    i = 1

    for definition in definitions:
        if 'article' in definition['class']:
            text = formatting.compress_whitespace(definition.text.strip())
            output = output + ' \x02' + text + '\x02'
            i = 1

        elif 'entry' in definition['class']:
            definition = definition.find('div', attrs={'class': 'definition'})
            text = formatting.compress_whitespace(definition.text.strip())
            output = output + text.replace(u'\xb0', ' \x02{}.\x02 '.format(i))
            i = i + 1

        # theres 'synonyms' and 'examples' too

    # arbitrary length limit
    if len(output) > 360:
        output = output[:
                        360] + '\x0f... More at https://en.wiktionary.org/wiki/' + inp

    return output