Beispiel #1
0
def google(inp):
    "google <query> -- Returns first google search result for <query>."

    parsed = api_get('web', inp)
    if not 200 <= parsed['responseStatus'] < 300:
        raise IOError('error searching for pages: %d: %s' %
                      (parsed['responseStatus'], ''))
    if not parsed['responseData']['results']:
        return 'No results found.'

    result = parsed['responseData']['results'][0]

    title = http.unescape(result['titleNoFormatting'])
    title = text.truncate_str(title, 60)
    content = http.unescape(result['content'])

    if not content:
        content = "No description available."
    else:
        content = http.html.fromstring(content).text_content()
        content = text.truncate_str(content, 150)

    out = '%s -- \x02%s\x02: "%s"' % (result['unescapedUrl'], title, content)

    return out
Beispiel #2
0
def google(inp):
    "google <query> -- Returns first google search result for <query>."

    parsed = api_get('web', inp)
    if not 200 <= parsed['responseStatus'] < 300:
        raise IOError('error searching for pages: %d: %s' % (
                      parsed['responseStatus'], ''))
    if not parsed['responseData']['results']:
        return 'No results found.'

    result = parsed['responseData']['results'][0]

    title = http.unescape(result['titleNoFormatting'])
    title = text.truncate_str(title, 60)
    content = http.unescape(result['content'])

    if not content:
        content = "No description available."
    else:
        content = http.html.fromstring(content).text_content()
        content = text.truncate_str(content, 150)

    out = '%s -- \x02%s\x02: "%s"' % (result['unescapedUrl'], title, content)

    return out
Beispiel #3
0
def urban(inp):
    """urban <phrase> [id] -- Looks up <phrase> on urbandictionary.com."""

    if inp:
        # clean and split the input
        inp = inp.lower().strip()
        parts = inp.split()

        # if the last word is a number, set the ID to that number
        if parts[-1].isdigit():
            id_num = int(parts[-1])
            # remove the ID from the input string
            del parts[-1]
            inp = " ".join(parts)
        else:
            id_num = 1

        # fetch the definitions
        page = http.get_json(define_url, term=inp, referer="http://m.urbandictionary.com")

        if page['result_type'] == 'no_results':
            return 'Not found.'
    else:
        # get a random definition!
        page = http.get_json(random_url, referer="http://m.urbandictionary.com")
        id_num = None

    definitions = page['list']

    if id_num:
        # try getting the requested definition
        try:
            definition = definitions[id_num - 1]

            def_text = " ".join(definition['definition'].split())  # remove excess spaces
            def_text = text.truncate_str(def_text, 200)
        except IndexError:
            return 'Not found.'

        url = definition['permalink']
        output = u"[%i/%i] %s :: %s" % \
                 (id_num, len(definitions), def_text, url)

    else:
        definition = random.choice(definitions)

        def_text = " ".join(definition['definition'].split())  # remove excess spaces
        def_text = text.truncate_str(def_text, 200)

        name = definition['word']
        url = definition['permalink']
        output = u"\x02{}\x02: {} :: {}".format(name, def_text, url)

    return output
def kb(inp):
    """kb <topic> -- Gets the first article available on <topic>."""

    x = http.get_xml(search_url, search=inp)

    ns = '{http://opensearch.org/searchsuggest2}'
    items = x.findall(ns + 'Section/' + ns + 'Item')

    if not items:
        if x.find('error') is not None:
            return 'error: %(code)s: %(info)s' % x.find('error').attrib
        else:
            return 'No results found.'

    def extract(item):
        return [item.find(ns + x).text for x in
                ('Text', 'Description', 'Url')]

    title, desc, url = extract(items[0])

    if 'may refer to' in desc:
        title, desc, url = extract(items[1])

    title = paren_re.sub('', title)

    if title.lower() not in desc.lower():
        desc = title + desc

    desc = u' '.join(desc.split())  # remove excess spaces

    desc = text.truncate_str(desc, 200)

    return u'{} :: {}'.format(desc, http.quote(url, ':/'))
Beispiel #5
0
def urban(inp):
    """.ud/.urban <phrase> - Looks up <phrase> on urbandictionary.com."""
    base_url = 'http://api.urbandictionary.com/v0'
    define_url = base_url + "/define"

    # fetch the definitions
    try:
        page = http.get_json(define_url, term=inp, referer="http://m.urbandictionary.com")
    except:
        return "Error reading the Urban Dictionary API; please try again later.."

    if page['result_type'] == 'no_results':
        return 'Not found.'

    definitions = page['list']
    definition = random.choice(definitions)

    def_text = " ".join(definition['definition'].split())  # remove excess spaces
    def_text = text.truncate_str(def_text, 200)

    name = definition['word']
    url = definition['permalink']
    out = u"\x02{}\x02: {}".format(name, def_text)

    return out
Beispiel #6
0
def rss(inp, say=None):
    "rss <feed> -- Gets the first three items from the RSS feed <feed>."
    limit = 3

    # preset news feeds
    strip = inp.lower().strip()
    if strip == "bukkit":
        feed = "http://dl.bukkit.org/downloads/craftbukkit/feeds/latest-rb.rss"
        limit = 1
    elif strip == "xkcd":
        feed = "http://xkcd.com/rss.xml"
    elif strip == "ars":
        feed = "http://feeds.arstechnica.com/arstechnica/index"
    else:
        feed = inp

    query = "SELECT title, link FROM rss WHERE url=@feed LIMIT @limit"
    result = web.query(query, {"feed": feed, "limit": limit})

    if not result.rows:
        return "Could not find/read RSS feed."

    for row in result.rows:
        title = text.truncate_str(row["title"], 100)
        try:
            link = web.isgd(row["link"])
        except (web.ShortenError, http.HTTPError, http.URLError):
            link = row["link"]
        say(u"{} - {}".format(title, link))
Beispiel #7
0
def wiki(inp):
    "wiki <phrase> -- Gets first sentence of Wikipedia article on <phrase>."

    x = http.get_xml(search_url, search=inp)

    ns = '{http://opensearch.org/searchsuggest2}'
    items = x.findall(ns + 'Section/' + ns + 'Item')

    if items == []:
        if x.find('error') is not None:
            return 'error: %(code)s: %(info)s' % x.find('error').attrib
        else:
            return 'No results found.'

    def extract(item):
        return [item.find(ns + x).text for x in
                            ('Text', 'Description', 'Url')]

    title, desc, url = extract(items[0])

    if 'may refer to' in desc:
        title, desc, url = extract(items[1])

    title = paren_re.sub('', title)

    if title.lower() not in desc.lower():
        desc = title + desc

    desc = re.sub('\s+', ' ', desc).strip()  # remove excess spaces

    desc = text.truncate_str(desc, 250)

    return '%s -- %s' % (desc, http.quote(url, ':/'))
Beispiel #8
0
def urban(inp):
    "urban <phrase> [id] -- Looks up <phrase> on urbandictionary.com."

    # clean and split the input
    input = inp.lower().strip()
    parts = input.split()

    # if the last word is a number, set the ID to that number
    if parts[-1].isdigit():
        id = int(parts[-1])
        # remove the ID from the input string
        del parts[-1]
        input = " ".join(parts)
    else:
        id = 1

    # fetch the definitions
    page = http.get_json(base_url,
                         term=input,
                         referer="http://m.urbandictionary.com")
    defs = page['list']

    if page['result_type'] == 'no_results':
        return 'Not found.'

    # try getting the requested definition
    try:
        output = u"[%i/%i] %s: %s" % \
              (id, len(defs), defs[id - 1]['word'],
              defs[id - 1]['definition'].replace('\r\n', ' '))
    except IndexError:
        return 'Not found.'

    return text.truncate_str(output, 250)
Beispiel #9
0
def format_output(data):
    """ takes plugin data and returns two strings representing information about that plugin """
    name = data["plugin_name"]
    description = text.truncate_str(data['description'], 30)
    url = data['website']
    authors = data['authors'][0]
    authors = authors[0] + u"\u200b" + authors[1:]
    stage = data['stage']

    current_version = data['versions'][0]

    last_update = time.strftime('%d %B %Y %H:%M',
                                time.gmtime(current_version['date']))
    version_number = data['versions'][0]['version']

    bukkit_versions = ", ".join(current_version['game_versions'])
    link = web.try_isgd(current_version['link'])

    if description:
        line_a = u"\x02{}\x02, by \x02{}\x02 - {} - ({}) \x02{}".format(name, authors, description, stage, url)
    else:
        line_a = u"\x02{}\x02, by \x02{}\x02 ({}) \x02{}".format(name, authors, stage, url)

    line_b = u"Last release: \x02v{}\x02 for \x02{}\x02 at {} \x02{}\x02".format(version_number, bukkit_versions,
                                                                                 last_update, link)

    return line_a, line_b
Beispiel #10
0
def rss(inp, message=None):
    """rss <feed> -- Gets the first three items from the RSS feed <feed>."""
    limit = 3

    # preset news feeds
    strip = inp.lower().strip()
    if strip == "bukkit":
        feed = "http://dl.bukkit.org/downloads/craftbukkit/feeds/latest-rb.rss"
        limit = 1
    elif strip == "xkcd":
        feed = "http://xkcd.com/rss.xml"
    elif strip == "ars":
        feed = "http://feeds.arstechnica.com/arstechnica/index"
    elif strip == "xda":
        feed = "http://feeds.feedburner.com/xda-developers/ShsH?format=xml"
    else:
        feed = inp

    query = "SELECT title, link FROM rss WHERE url=@feed LIMIT @limit"
    result = web.query(query, {"feed": feed, "limit": limit})
    print result.raw
    if not result.rows:
        return "Could not find/read RSS feed."

    for row in result.rows:
        title = text.truncate_str(row["title"], 100)
        try:
            link = web.isgd(row["link"])
        except (web.ShortenError, http.HTTPError, http.URLError):
            link = row["link"]
        message(u"{} - {}".format(title, link))
Beispiel #11
0
def soundcloudData(url, api_key):
    data = http.get_json(api_url + '/resolve.json?' +
                         urlencode({
                             'url': url,
                             'client_id': api_key
                         }))

    desc = ""
    if data['description']:
        desc = u": {} ".format(text.truncate_str(data['description'], 50))

    genre = ""
    if data['genre']: genre = u"- Genre: \x02{}\x02 ".format(data['genre'])

    duration = ""
    if data['duration']:
        tracklength = float(data['duration']) / 60000
        tracklength = re.match('(.*\...)', str(tracklength)).group(1)
        if tracklength: duration = u"{} mins".format(tracklength)

    url = web.try_isgd(data['permalink_url'])

    return u"\x02{}\x02 by \x02{}\x02 {}".format(data['title'],
                                                 data['user']['username'],
                                                 duration)
Beispiel #12
0
def wiki(inp):
    """wiki <phrase> -- Gets first sentence of Wikipedia article on <phrase>."""

    x = http.get_xml(search_url, search=inp)

    ns = '{http://opensearch.org/searchsuggest2}'
    items = x.findall(ns + 'Section/' + ns + 'Item')

    if not items:
        if x.find('error') is not None:
            return 'error: %(code)s: %(info)s' % x.find('error').attrib
        else:
            return 'No results found.'

    def extract(item):
        return [item.find(ns + x).text for x in ('Text', 'Description', 'Url')]

    title, desc, url = extract(items[0])

    if 'may refer to' in desc:
        title, desc, url = extract(items[1])

    title = paren_re.sub('', title)

    if title.lower() not in desc.lower():
        desc = title + desc

    desc = re.sub('\s+', ' ', desc).strip()  # remove excess spaces

    desc = text.truncate_str(desc, 200)

    return '{} :: {}'.format(desc, http.quote(url, ':/'))
Beispiel #13
0
def soundcloud(url, api_key):
    data = http.get_json(api_url + '/resolve.json?' +
                         urlencode({
                             'url': url,
                             'client_id': api_key
                         }))

    desc = ""
    if data['description']:
        desc = u": {} ".format(text.truncate_str(data['description'], 50))

    genre = ""
    if data['genre']: genre = u"- Genre: \x02{}\x02 ".format(data['genre'])

    duration = ""
    if data['duration']:
        tracklength = float(data['duration']) / 60000
        tracklength = re.match('(.*\...)', str(tracklength)).group(1)
        if tracklength: duration = u" {} mins -".format(tracklength)

    url = web.try_isgd(data['permalink_url'])

    return u"SoundCloud track: \x02{}\x02 by \x02{}\x02 {}{}-{} {} plays, {} downloads, {} comments - {}".format(
        data['title'], data['user']['username'], desc, genre, duration,
        data['playback_count'], data['download_count'], data['comment_count'],
        url)
def wolframalpha(inp, bot=None):
    "wa <query> -- Computes <query> using Wolfram Alpha."

    api_key = bot.config.get("api_keys", {}).get("wolframalpha", None)

    if not api_key:
        return "error: missing api key"

    url = 'http://api.wolframalpha.com/v2/query?format=plaintext'

    result = http.get_xml(url, input=inp, appid=api_key)

    # get the URL for a user to view this query in a browser
    query_url = "http://www.wolframalpha.com/input/?i=" + \
                http.quote_plus(inp.encode('utf-8'))
    try:
        short_url = web.isgd(query_url)
    except (web.ShortenError, http.HTTPError):
        short_url = query_url

    pod_texts = []
    for pod in result.xpath("//pod[@primary='true']"):
        title = pod.attrib['title']
        if pod.attrib['id'] == 'Input':
            continue

        results = []
        for subpod in pod.xpath('subpod/plaintext/text()'):
            subpod = subpod.strip().replace('\\n', '; ')
            subpod = re.sub(r'\s+', ' ', subpod)
            if subpod:
                results.append(subpod)
        if results:
            pod_texts.append(title + ': ' + ', '.join(results))

    ret = ' - '.join(pod_texts)

    if not pod_texts:
        return 'No results.'

    ret = re.sub(r'\\(.)', r'\1', ret)

    def unicode_sub(match):
        return unichr(int(match.group(1), 16))

    ret = re.sub(r'\\:([0-9a-z]{4})', unicode_sub, ret)

    ret = text.truncate_str(ret, 250)

    if not ret:
        return 'No results.'

    return "%s - %s" % (ret, short_url)
Beispiel #15
0
def answer(inp):
    "answer <query> -- find the answer to a question on Yahoo! Answers"

    query = "SELECT Subject, ChosenAnswer, Link FROM answers.search WHERE query=@query LIMIT 1"
    result = web.query(query, {"query": inp.strip()}).one()

    short_url = web.isgd(result["Link"])

    # we split the answer and .join() it to remove newlines/extra spaces
    answer = text.truncate_str(' '.join(result["ChosenAnswer"].split()), 80)

    return '\x02{}\x02 "{}" - {}'.format(result["Subject"], answer, short_url)
Beispiel #16
0
def get_steam_info(url):
    page = http.get(url)
    soup = BeautifulSoup(page, 'lxml', from_encoding="utf-8")

    data = {}

    data["name"] = soup.find('div', {'class': 'apphub_AppName'}).text
    data["desc"] = truncate_str(
        soup.find('meta', {'name': 'description'})['content'].strip(), 80)

    # get the element details_block
    details = soup.find('div', {'class': 'details_block'})

    # loop over every <b></b> tag in details_block
    for b in details.findAll('b'):
        # get the contents of the <b></b> tag, which is our title
        title = b.text.lower().replace(":", "")
        if title == "languages":
            # we have all we need!
            break

        # find the next element directly after the <b></b> tag
        next_element = b.nextSibling
        if next_element:
            # if the element is some text
            if isinstance(next_element, NavigableString):
                text = next_element.string.strip()
                if text:
                    # we found valid text, save it and continue the loop
                    data[title] = text
                    continue
                else:
                    # the text is blank - sometimes this means there are
                    # useless spaces or tabs between the <b> and <a> tags.
                    # so we find the next <a> tag and carry on to the next
                    # bit of code below
                    next_element = next_element.find_next('a', href=True)

            # if the element is an <a></a> tag
            if isinstance(next_element, Tag) and next_element.name == 'a':
                text = next_element.string.strip()
                if text:
                    # we found valid text (in the <a></a> tag),
                    # save it and continue the loop
                    data[title] = text
                    continue

    data["price"] = soup.find('div', {
        'class': 'game_purchase_price price'
    }).text.strip()

    return u"\x02{name}\x02: {desc}, \x02Genre\x02: {genre}, \x02Release Date\x02: {release date}," \
           u" \x02Price\x02: {price}".format(**data)
Beispiel #17
0
def answer(inp):
    ".answer <query> -- find the answer to a question on Yahoo! Answers"

    query = "SELECT Subject, ChosenAnswer, Link FROM answers.search WHERE query=@query LIMIT 1"
    result = web.query(query, {"query": inp.strip()}).one()

    short_url = web.isgd(result["Link"])

    # we split the answer and .join() it to remove newlines/extra spaces
    answer = text.truncate_str(' '.join(result["ChosenAnswer"].split()), 80)

    return u'\x02{}\x02 "{}" - {}'.format(result["Subject"], answer, short_url)
Beispiel #18
0
def gse(inp):
    """gsearch <query> -- Returns first google search result for <query>."""

    if inp == "!!info":
        return "FurCode gsearch.py for RoboCop Classic.";

    eval = urllib.quote(inp)
    parsed = api_get(eval)

    result = parsed['items'][0]

    title = http.unescape(result['title'])
    title = text.truncate_str(title, 60)
    content = http.unescape(result['snippet'])

    if not content:
        content = "No description available."
    else:
        content = http.html.fromstring(content).text_content()
        content = text.truncate_str(content, 150)

    return u'{} -- \x02{}\x02: "{}"'.format(result['link'], title, content)
Beispiel #19
0
def google(inp, bot=None):
    try:
        keys = get_keys(bot.config)
    except KeyError:
        return u"API Keys not configured"

    parsed = http.get_json(API_CS, q=inp, **keys)

    try:
        result = parsed['items'][0]
    except KeyError:
        return u"No results found."

    title = text.truncate_str(result['title'], 60)
    content = result['snippet']

    if not content:
        content = "No description available."
    else:
        content = text.truncate_str(content.replace('\n', ''), 150)

    return u'{} -- \x02{}\x02: "{}"'.format(result['link'], title, content)
Beispiel #20
0
def google(inp, say=None, api_key=None):
    """g[oogle] <query> - Returns first Google search result for <query>."""
    try:
        parsed = custom_get(inp, api_key)
    except Exception as e:
        return "Error: {}".format(e)
    if 'items' not in parsed:
        return "No results"

    link = web.try_googl(parsed['items'][0]['link'])
    title = text.truncate_str(parsed['items'][0]['title'], 250)
    title = u' '.join(re.sub(u'\r|\n', u' ', title).split()).strip('| ')
    say(u"{} - \x02{}\x02".format(link, title))
Beispiel #21
0
def reddit(inp):
    """reddit <subreddit> [n] -- Gets a random post from <subreddit>, or gets the [n]th post in the subreddit."""
    id_num = None

    if inp:
        # clean and split the input
        parts = inp.lower().strip().split()

        # find the requested post number (if any)
        if len(parts) > 1:
            url = base_url.format(parts[0].strip())
            try:
                id_num = int(parts[1]) - 1
            except ValueError:
                return "Invalid post number."
        else:
            url = base_url.format(parts[0].strip())
    else:
        url = "http://reddit.com/.json"

    try:
        data = http.get_json(url, user_agent=http.ua_chrome)
    except Exception as e:
        return "Error: " + str(e)
    data = data["data"]["children"]

    # get the requested/random post
    if id_num:
        try:
            item = data[id_num]["data"]
        except IndexError:
            length = len(data)
            return "Invalid post number. Number must be between 1 and {}.".format(
                length)
    else:
        item = random.choice(data)["data"]

    item["title"] = text.truncate_str(item["title"], 50)
    item["link"] = short_url.format(item["id"])

    rawtime = datetime.fromtimestamp(int(item["created_utc"]))
    item["timesince"] = timesince.timesince(rawtime)

    if item["over_18"]:
        item["warning"] = " \x02NSFW\x02"
    else:
        item["warning"] = ""

    return u'\x02{title} : {subreddit}\x02 - posted by \x02{author}\x02' \
    ' {timesince} ago - {ups} upvotes, {downs} downvotes -' \
    ' {link}{warning}'.format(**item)
Beispiel #22
0
def reddit(inp):
    """reddit <subreddit> [n] -- Gets a random post from <subreddit>, or gets the [n]th post in the subreddit."""
    id_num = None

    if inp:
        # clean and split the input
        parts = inp.lower().strip().split()

        # find the requested post number (if any)
        if len(parts) > 1:
            url = base_url.format(parts[0].strip())
            try: 
                id_num = int(parts[1]) - 1
            except ValueError:
                return "Invalid post number."
        else:
            url = base_url.format(parts[0].strip())
    else:
        url  = "http://reddit.com/.json"

    try:
        data = http.get_json(url, user_agent=http.ua_chrome)
    except Exception as e:
        return "Error: " + str(e)
    data = data["data"]["children"]


    # get the requested/random post
    if id_num:
        try:
            item = data[id_num]["data"]
        except IndexError:
            length = len(data)
            return "Invalid post number. Number must be between 1 and {}.".format(length)
    else:
        item = random.choice(data)["data"]

    item["title"] = text.truncate_str(item["title"], 50)
    item["link"] = short_url.format(item["id"])

    rawtime = datetime.fromtimestamp(int(item["created_utc"]))
    item["timesince"] = timesince.timesince(rawtime)

    if item["over_18"]:
        item["warning"] = " \x02NSFW\x02"
    else:
        item["warning"] = ""

    return u'\x02{title} : {subreddit}\x02 - posted by \x02{author}\x02' \
    ' {timesince} ago - {ups} upvotes, {downs} downvotes -' \
    ' {link}{warning}'.format(**item)
Beispiel #23
0
def suggest(inp):
    """suggest <phrase> -- Gets suggested phrases for a google search"""
    suggestions = http.get_json('http://suggestqueries.google.com/complete/search', client='firefox', q=inp)[1]

    if not suggestions:
        return 'no suggestions found'

    out = u", ".join(suggestions)

    # defuckify text (might not be needed now, but I'll keep it)
    soup = BeautifulSoup(out)
    out = soup.get_text()

    return text.truncate_str(out, 200)
Beispiel #24
0
def reddit(inp):
    """reddit <subreddit> [n] - Gets a random post from <subreddit>, or gets the [n]th post in the subreddit."""

    # clean and split the input
    parts = inp.lower().strip().split()
    id_num = None

    # find the requested post number (if any)
    if len(parts) > 1:
        inp = parts[0]
        try:
            id_num = int(parts[1]) - 1
        except ValueError:
            return "Invalid post number."

    try:
        data = http.get_json(base_url.format(inp.strip()))
    except Exception as e:
        return "Error: " + str(e)
    data = data["data"]["children"]

    try:
        # geit the requested/random post
        if id_num is not None:
            try:
                item = data[id_num]["data"]
            except IndexError:
                length = len(data)
                return ("Invalid post number. Number must " \
                    "be between 1 and {}.".format(length))
        else:
            item = random.choice(data)["data"]
    except:
        return "I couldn't find any data for \x02{}\x0F.".format(inp)

    item["title"] = text.truncate_str(item["title"], 100)
    item["link"] = short_url.format(item["id"])

    rawtime = datetime.fromtimestamp(int(item["created_utc"]))
    item["timesince"] = timesince.timesince(rawtime)

    if item["over_18"]:
        item["warning"] = " \x02NSFW\x02"
    else:
        item["warning"] = ""

    return u'{link}{warning} - \x02{title}\x02 - posted by' \
        ' \x02{author}\x02 {timesince} ago - {ups} upvotes,' \
        ' {downs} downvotes'.format(**item)
Beispiel #25
0
def get_steam_info(url):
    # we get the soup manually because the steam pages have some odd encoding troubles
    page = http.get(url)
    soup = BeautifulSoup(page, 'lxml', from_encoding="utf-8")

    name = soup.find('div', {'class': 'apphub_AppName'}).text
    desc = ": " + text.truncate_str(soup.find('div', {'class': 'game_description_snippet'}).text.strip())

    # the page has a ton of returns and tabs
    details = soup.find('div', {'class': 'glance_details'}).text.strip().split(u"\n\n\r\n\t\t\t\t\t\t\t\t\t")
    genre = " - Genre: " + details[0].replace(u"Genre: ", u"")
    date = " - Release date: " + details[1].replace(u"Release Date: ", u"")
    price = ""
    if not "Free to Play" in genre:
        price = " - Price: " + soup.find('div', {'class': 'game_purchase_price price'}).text.strip()

    return name + desc + genre + date + price
Beispiel #26
0
def soundcloud(url, api_key):
    data = http.get_json(api_url + '/resolve.json?' + urlencode({'url': url, 'client_id': api_key}))

    if data['description']:
        desc = u": {} ".format(text.truncate_str(data['description'], 50))
    else:
        desc = ""
    if data['genre']:
        genre = u"- Genre: \x02{}\x02 ".format(data['genre'])
    else:
        genre = ""

    url = web.try_isgd(data['permalink_url'])

    return u"SoundCloud track: \x02{}\x02 by \x02{}\x02 {}{}- {} plays, {} downloads, {} comments - {}".format(
        data['title'], data['user']['username'], desc, genre, data['playback_count'], data['download_count'],
        data['comment_count'], url)
Beispiel #27
0
def etymology(inp, say=None):
    """etymology <word> - Retrieves the etymology of chosen word."""
    url = 'http://www.etymonline.com/search'
    try:
        params = {'q': inp}
        h = http.get_html(url, query_params=params)
    except:
        return "Error fetching etymology."
    etym = h.xpath('//section')

    if not etym:
        return 'No etymology found for ' + inp

    etym = etym[0].text_content()
    etym = ' '.join(etym.split())

    say(text.truncate_str(etym, 450))
Beispiel #28
0
def googleimage(inp, bot=None):
    try:
        keys = get_keys(bot.config)
    except KeyError:
        return u"API Keys not configured"

    parsed = http.get_json(API_CS, q=inp, searchType="image", **keys)

    try:
        result = parsed['items'][0]
        metadata = parsed['items'][0]['image']
    except KeyError:
        return u"No results found."

    dimens = '{}x{}px'.format(metadata['width'], metadata['height'])
    title = text.truncate_str(result['title'], 60)

    return u'{} [{}, {}]'.format(result['link'], dimens, result['mime'])
Beispiel #29
0
def searchquote(inp, say=None, db=None):
    """searchquote <text> - Returns IDs for quotes matching <text>."""
    db_init(db)

    try:
        ids = db.execute("select id from quotefts where quotefts match ?",
                         ('{} AND active:"1"'.format(
                             tokenize.build_query(inp)), )).fetchall()
    except OperationalError:
        return "Error: must contain one inclusive match clause (+/=)."

    if ids:
        say(
            text.truncate_str(
                "Quotes: {}".format(', '.join([str(id[0]) for id in ids])),
                350))
    else:
        return "None found."
Beispiel #30
0
def format_item(item, show_url=True):
    """ takes a newegg API item object and returns a description """
    title = text.truncate_str(item["Title"], 50)

    # format the rating nicely if it exists
    if not item["ReviewSummary"]["TotalReviews"] == "[]":
        rating = "Rated {}/5 ({} ratings)".format(
            item["ReviewSummary"]["Rating"],
            item["ReviewSummary"]["TotalReviews"][1:-1])
    else:
        rating = "No Ratings"

    if not item["FinalPrice"] == item["OriginalPrice"]:
        price = "{FinalPrice}, was {OriginalPrice}".format(**item)
    else:
        price = item["FinalPrice"]

    tags = []

    if item["Instock"]:
        tags.append("\x02Stock Available\x02")
    else:
        tags.append("\x02Out Of Stock\x02")

    if item["FreeShippingFlag"]:
        tags.append("\x02Free Shipping\x02")

    if item["IsFeaturedItem"]:
        tags.append("\x02Featured\x02")

    if item["IsShellShockerItem"]:
        tags.append("\x02SHELL SHOCKER®\x02")

    # join all the tags together in a comma seperated string ("tag1, tag2, tag3")
    tag_text = u", ".join(tags)

    if show_url:
        # create the item URL and shorten it
        url = web.try_isgd(ITEM_URL.format(item["NeweggItemNumber"]))
        return u"\x02{}\x02 ({}) - {} - {} - {}".format(
            title, price, rating, tag_text, url)
    else:
        return u"\x02{}\x02 ({}) - {} - {}".format(title, price, rating,
                                                   tag_text)
Beispiel #31
0
def imdb_url(match):
    id = match.group(4).split('/')[-1]
    if id == "":
        id = match.group(4).split('/')[-2]
    content = http.get_json("http://www.omdbapi.com/", i=id)
    if content.get('Error', None) == 'Movie not found!':
        return 'Movie not found!'
    elif content['Response'] == 'True':
        content['URL'] = 'http://www.imdb.com/title/%(imdbID)s' % content
        content['Plot'] = text.truncate_str(content['Plot'], 50)
        out = '\x02%(Title)s\x02 (%(Year)s) (%(Genre)s): %(Plot)s'
        if content['Runtime'] != 'N/A':
            out += ' \x02%(Runtime)s\x02.'
        if content['imdbRating'] != 'N/A' and content['imdbVotes'] != 'N/A':
            out += ' \x02%(imdbRating)s/10\x02 with \x02%(imdbVotes)s\x02' \
                   ' votes.'
        return out % content
    else:
        return 'Unknown error.'
Beispiel #32
0
def imdb_url(match):
    imdb_id = match.group(4).split('/')[-1]
    if imdb_id == "":
        imdb_id = match.group(4).split('/')[-2]
    content = http.get_json("http://www.omdbapi.com/", i=imdb_id)
    if content.get('Error', None) == 'Movie not found!':
        return 'Movie not found!'
    elif content['Response'] == 'True':
        content['URL'] = 'http://www.imdb.com/title/%(imdbID)s' % content
        content['Plot'] = text.truncate_str(content['Plot'], 50)
        out = '\x02%(Title)s\x02 (%(Year)s) (%(Genre)s): %(Plot)s'
        if content['Runtime'] != 'N/A':
            out += ' \x02%(Runtime)s\x02.'
        if content['imdbRating'] != 'N/A' and content['imdbVotes'] != 'N/A':
            out += ' \x02%(imdbRating)s/10\x02 with \x02%(imdbVotes)s\x02' \
                   ' votes.'
        return out % content
    else:
        return 'Unknown error.'
Beispiel #33
0
def dataget(url,wikiurl,wikilen):
    pageid = url['pages'].keys()[0]
    name = url['pages'][pageid][u'title']
    url = url['pages'][pageid][u'revisions'][0]['*'].replace("\n","")
    if url.startswith("#REDIRECT "):
        return url
    textb = {}
    textb[regexmatch2(url,re.compile(r'.+?(\|sounds=.+?\}\}'+'\n\n'+')',re.DOTALL))] = ""
    page = http.get_html(wikiurl+name)
    for p in page.xpath('//div[@class="mw-content-ltr"]/p'):
        if p.text_content():
            plen = len(p.text_content())
            if plen>=wikilen:
                summary = " ".join(p.text_content().splitlines())
                summary = re.sub("\[\d+\]", "", summary)
                summary = text.truncate_str(summary, 250)
                return ("%s :: \x02%s\x02" % (summary, wikiurl+name)).encode('utf8')[:400]

    return "Unknown Error."
Beispiel #34
0
def format_item(item, show_url=True):
    """ takes a newegg API item object and returns a description """
    title = text.truncate_str(item["Title"], 50)

    # format the rating nicely if it exists
    if not item["ReviewSummary"]["TotalReviews"] == "[]":
        rating = "Rated {}/5 ({} ratings)".format(item["ReviewSummary"]["Rating"],
                                                          item["ReviewSummary"]["TotalReviews"][1:-1])
    else:
        rating = "No Ratings"

    if not item["FinalPrice"] ==  item["OriginalPrice"]:
        price = "{FinalPrice}, was {OriginalPrice}".format(**item)
    else:
        price = item["FinalPrice"]

    tags = []

    if item["Instock"]:
        tags.append("\x02Stock Available\x02")
    else:
        tags.append("\x02Out Of Stock\x02")

    if item["FreeShippingFlag"]:
        tags.append("\x02Free Shipping\x02")

    if item["IsFeaturedItem"]:
        tags.append("\x02Featured\x02")

    if item["IsShellShockerItem"]:
        tags.append("\x02SHELL SHOCKER®\x02")

    # join all the tags together in a comma seperated string ("tag1, tag2, tag3")
    tag_text = u", ".join(tags)

    if show_url:
        # create the item URL and shorten it
        url = web.try_isgd(ITEM_URL.format(item["NeweggItemNumber"]))
        return u"\x02{}\x02 ({}) - {} - {} - {}".format(title, price, rating,
                                                    tag_text, url)
    else:
        return u"\x02{}\x02 ({}) - {} - {}".format(title, price, rating,
                                                tag_text)
Beispiel #35
0
def wa(inp, api_key=None):
    """wa <query> - Computes <query> using Wolfram Alpha."""

    url = 'http://api.wolframalpha.com/v2/query'

    try:
        params = { 'input': inp, 'appid': api_key, 'output': 'json' }
        result = http.get_json(url, query_params=params)
    except:
        return "WolframAlpha API error, please try again in a few minutes."

    if result['queryresult']['success'] == False:
        return "WolframAlpha query failed."

    data = sorted([pod for pod in result['queryresult']['pods'] if 'Input' not in pod['title']], key= lambda x: x['position'])

    if len(data) == 0:
        return "No results."

    return text.truncate_str(data[0]['subpods'][0]['plaintext'], 230)
Beispiel #36
0
def wa(inp, api_key=None):
    """.wa/.calc <query> - Computes <query> using Wolfram Alpha."""

    url = 'http://api.wolframalpha.com/v2/query?format=plaintext'

    try:
        result = http.get_xml(url, input=inp, appid=api_key)
    except:
        return "WolframAlpha query timed out for '%s'" % inp

    pod_texts = []
    for pod in result.xpath("//pod"):
        title = pod.attrib['title']
        if pod.attrib['id'] == 'Input':
            continue

        results = []
        for subpod in pod.xpath('subpod/plaintext/text()'):
            subpod = subpod.strip().replace('\\n', '; ')
            subpod = re.sub(r'\s+', ' ', subpod)
            if subpod:
                results.append(subpod)
        if results:
            pod_texts.append(title + ': ' + '|'.join(results))

    ret = '. '.join(pod_texts)

    if not pod_texts:
        return 'No results'

    ret = re.sub(r'\\(.)', r'\1', ret)

    def unicode_sub(match):
        return unichr(int(match.group(1), 16))

    ret = re.sub(r'\\:([0-9a-z]{4})', unicode_sub, ret)

    if not ret:
        return 'No results'

    return text.truncate_str(ret.split('. ')[0], 230)
def mcwiki(inp):
    """mcwiki <phrase> -- Gets the first paragraph of
    the Minecraft Wiki article on <phrase>."""

    try:
        j = http.get_json(api_url, search=inp)
    except (http.HTTPError, http.URLError) as e:
        return "Error fetching search results: {}".format(e)
    except ValueError as e:
        return "Error reading search results: {}".format(e)

    if not j[1]:
        return "No results found."

    # we remove items with a '/' in the name, because
    # gamepedia uses sub-pages for different languages
    # for some stupid reason
    items = [item for item in j[1] if not "/" in item]

    if items:
        article_name = items[0].replace(' ', '_').encode('utf8')
    else:
        # there are no items without /, just return a / one
        article_name = j[1][0].replace(' ', '_').encode('utf8')

    url = mc_url + http.quote(article_name, '')

    try:
        page = http.get_html(url)
    except (http.HTTPError, http.URLError) as e:
        return "Error fetching wiki page: {}".format(e)

    for p in page.xpath('//div[@class="mw-content-ltr"]/p'):
        if p.text_content():
            summary = " ".join(p.text_content().splitlines())
            summary = re.sub("\[\d+\]", "", summary)
            summary = text.truncate_str(summary, 200)
            return u"{} :: {}".format(summary, url)

    # this shouldn't happen
    return "Unknown Error."
Beispiel #38
0
def soundcloud(url, api_key):
    data = http.get_json(api_url + '/resolve.json?' + urlencode({'url': url, 'client_id': api_key}))

    desc = ""
    if data['description']: desc = u": {} ".format(text.truncate_str(data['description'], 50))

    genre = ""
    if data['genre']: genre = u"- Genre: \x02{}\x02 ".format(data['genre'])
        
    duration = ""
    if data['duration']:
        tracklength = float(data['duration']) / 60000
        tracklength = re.match('(.*\...)', str(tracklength)).group(1)
        if tracklength: duration = u" {} mins -".format(tracklength)
        

    url = web.try_isgd(data['permalink_url'])

    return u"SoundCloud track: \x02{}\x02 by \x02{}\x02 {}{}-{} {} plays, {} downloads, {} comments - {}".format(
        data['title'], data['user']['username'], desc, genre, duration, data['playback_count'], data['download_count'],
        data['comment_count'], url)
Beispiel #39
0
def urban(inp, say=None):
    """ud <phrase> - Looks up <phrase> on Urban Dictionary."""
    base_url = 'http://api.urbandictionary.com/v0'
    define_url = base_url + "/define"

    try:
        page = http.get_json(define_url,
                             term=inp,
                             referer="http://m.urbandictionary.com")
    except:
        return "Error reading the Urban Dictionary API; please try again in a few minutes."

    if page['result_type'] == 'no_results':
        return 'Not found.'

    definition = random.choice(page['list'])
    def_text = " ".join(
        definition['definition'].split())  # remove excess spaces
    name = definition['word']

    say(text.truncate_str(u"\x02{}\x02: {}".format(name, def_text), 400))
Beispiel #40
0
def mcwiki(inp):
    """mcwiki <phrase> -- Gets the first paragraph of
    the Minecraft Wiki article on <phrase>."""

    j = http.get_json(api_url, search=inp)

    if not j[1]:
        return "No results found."
    article_name = j[1][0].replace(' ', '_').encode('utf8')

    url = mc_url + http.quote(article_name, '')
    page = http.get_html(url)

    for p in page.xpath('//div[@class="mw-content-ltr"]/p'):
        if p.text_content():
            summary = " ".join(p.text_content().splitlines())
            summary = re.sub("\[\d+\]", "", summary)
            summary = text.truncate_str(summary, 200)
            return "{} :: {}".format(summary, url)

    return "Unknown Error."
Beispiel #41
0
def mcwiki(inp):
    "mcwiki <phrase> -- Gets the first paragraph of" \
    " the Minecraft Wiki article on <phrase>."

    j = http.get_json(api_url, search=inp)

    if not j[1]:
        return "No results found."
    article_name = j[1][0].replace(' ', '_').encode('utf8')

    url = mc_url + http.quote(article_name, '')
    page = http.get_html(url)

    for p in page.xpath('//div[@class="mw-content-ltr"]/p'):
        if p.text_content():
            summary = " ".join(p.text_content().splitlines())
            summary = re.sub("\[\d+\]", "", summary)
            summary = text.truncate_str(summary, 250)
            return "%s :: \x02%s\x02" % (summary, url)

    return "Unknown Error."
Beispiel #42
0
def drama(inp):
    """drama <phrase> -- Gets the first paragraph of
    the Encyclopedia Dramatica article on <phrase>."""

    j = http.get_json(api_url, search=inp)

    if not j[1]:
        return "No results found."
    article_name = j[1][0].replace(' ', '_').encode('utf8')

    url = ed_url + http.quote(article_name, '')
    page = http.get_html(url)

    for p in page.xpath('//div[@id="bodyContent"]/p'):
        if p.text_content():
            summary = " ".join(p.text_content().splitlines())
            summary = re.sub("\[\d+\]", "", summary)
            summary = text.truncate_str(summary, 220)
            return "{} :: {}".format(summary, url)

    return "Unknown Error."
Beispiel #43
0
def urban(inp):
    """urban <phrase> [id] -- Looks up <phrase> on urbandictionary.com."""

    # clean and split the input
    inp = inp.lower().strip()
    parts = inp.split()

    # if the last word is a number, set the ID to that number
    if parts[-1].isdigit():
        id_num = int(parts[-1])
        # remove the ID from the input string
        del parts[-1]
        inp = " ".join(parts)
    else:
        id_num = 1

    # fetch the definitions
    page = http.get_json(base_url,
                         term=inp,
                         referer="http://m.urbandictionary.com")
    definitions = page['list']

    if page['result_type'] == 'no_results':
        return 'Not found.'

    # try getting the requested definition
    try:
        definition = definitions[id_num - 1]['definition'].replace('\r\n', ' ')
        definition = re.sub('\s+', ' ',
                            definition).strip()  # remove excess spaces
        definition = text.truncate_str(definition, 200)
    except IndexError:
        return 'Not found.'

    url = definitions[id_num - 1]['permalink']

    output = u"[%i/%i] %s :: %s" % \
             (id_num, len(definitions), definition, url)

    return output
Beispiel #44
0
def soundcloud(url, api_key):
    data = http.get_json(api_url + '/resolve.json?' +
                         urlencode({
                             'url': url,
                             'client_id': api_key
                         }))

    if data['description']:
        desc = u": {} ".format(text.truncate_str(data['description'], 50))
    else:
        desc = ""
    if data['genre']:
        genre = u"- Genre: \x02{}\x02 ".format(data['genre'])
    else:
        genre = ""

    url = web.try_isgd(data['permalink_url'])

    return u"SoundCloud track: \x02{}\x02 by \x02{}user\x02 {}{}- {} plays, {} downloads, {} comments - {}".format(
        data['title'], data['user']['username'], desc, genre,
        data['playback_count'], data['download_count'], data['comment_count'],
        url)
Beispiel #45
0
def suggest(inp):
    """suggest <phrase> -- Gets suggested phrases for a google search"""

    page = http.get('http://google.com/complete/search',
                    output='json',
                    client='hp',
                    q=inp)
    page_json = page.split('(', 1)[1][:-1]

    suggestions = json.loads(page_json)[1]
    suggestions = [suggestion[0] for suggestion in suggestions]

    if not suggestions:
        return 'no suggestions found'

    out = u", ".join(suggestions)

    # defuckify text
    soup = BeautifulSoup(out)
    out = soup.get_text()

    return text.truncate_str(out, 200)
Beispiel #46
0
def urban(inp):
    """urban <phrase> [id] -- Looks up <phrase> on urbandictionary.com."""

    # clean and split the input
    input = inp.lower().strip()
    parts = input.split()

    # if the last word is a number, set the ID to that number
    if parts[-1].isdigit():
        id = int(parts[-1])
        # remove the ID from the input string
        del parts[-1]
        input = " ".join(parts)
    else:
        id = 1

    # fetch the definitions
    page = http.get_json(base_url, term=input, referer="http://m.urbandictionary.com")
    defs = page['list']
    print page

    if page['result_type'] == 'no_results':
        return 'Not found.'

    # try getting the requested definition
    try:
        definition = defs[id - 1]['definition'].replace('\r\n', ' ')
        definition = re.sub('\s+', ' ', definition).strip()  # remove excess spaces
        definition = text.truncate_str(definition, 200)
    except IndexError:
        return 'Not found.'

    url = defs[id - 1]['permalink']

    output = u"[%i/%i] %s :: %s" % \
             (id, len(defs), definition, url)

    return output
Beispiel #47
0
    try:
        data = http.get_json(url, user_agent=http.ua_chrome)
    except Exception as e:
        return "Error: " + str(e)
    data = data["data"]["children"]

    # get the requested/random post
    if id_num is not None:
        try:
            item = data[id_num]["data"]
        except IndexError:
            length = len(data)
            return "Invalid post number. Number must be between 1 and {}.".format(length)
    else:
        item = random.choice(data)["data"]

    item["title"] = text.truncate_str(item["title"], 50)
    item["link"] = short_url.format(item["id"])

    raw_time = datetime.fromtimestamp(int(item["created_utc"]))
    item["timesince"] = timesince.timesince(raw_time)

    if item["over_18"]:
        item["warning"] = " \x02NSFW\x02"
    else:
        item["warning"] = ""

    return u"\x02{title} : {subreddit}\x02 - posted by \x02{author}\x02" \
           " {timesince} ago -" \
           " {link}{warning}".format(**item)