Exemple #1
0
def urban(text):
    """urban <phrase> [id] -- Looks up <phrase> on urbandictionary.com."""

    if text:
        # clean and split the input
        text = text.lower().strip()
        parts = text.split()

        # if the last word is a number, set the ID to that number
        if parts[-1].isdigit():
            id_num = int(parts[-1])
            # remove the ID from the input string
            del parts[-1]
            text = " ".join(parts)
        else:
            id_num = 1

        # fetch the definitions
        page = http.get_json(define_url,
                             term=text,
                             referer="http://m.urbandictionary.com")

        if page['result_type'] == 'no_results':
            return 'Not found.'
    else:
        # get a random definition!
        page = http.get_json(random_url,
                             referer="http://m.urbandictionary.com")
        id_num = None

    definitions = page['list']

    if id_num:
        # try getting the requested definition
        try:
            definition = definitions[id_num - 1]

            def_text = " ".join(
                definition['definition'].split())  # remove excess spaces
            def_text = formatting.truncate_str(def_text, 200)
        except IndexError:
            return 'Not found.'

        url = definition['permalink']

        output = "[{}/{}] {} :: {}".format(id_num, len(definitions), def_text,
                                           url)

    else:
        definition = random.choice(definitions)

        def_text = " ".join(
            definition['definition'].split())  # remove excess spaces
        def_text = formatting.truncate_str(def_text, 200)

        name = definition['word']
        url = definition['permalink']
        output = "\x02{}\x02: {} :: {}".format(name, def_text, url)

    return output
Exemple #2
0
def urban(text):
    """urban <phrase> [id] -- Looks up <phrase> on urbandictionary.com."""

    if text:
        # clean and split the input
        text = text.lower().strip()
        parts = text.split()

        # if the last word is a number, set the ID to that number
        if parts[-1].isdigit():
            id_num = int(parts[-1])
            # remove the ID from the input string
            del parts[-1]
            text = " ".join(parts)
        else:
            id_num = 1

        # fetch the definitions
        page = http.get_json(define_url, term=text, referer="http://m.urbandictionary.com")

        if page['result_type'] == 'no_results':
            return 'Not found.'
    else:
        # get a random definition!
        page = http.get_json(random_url, referer="http://m.urbandictionary.com")
        id_num = None

    definitions = page['list']

    if id_num:
        # try getting the requested definition
        try:
            definition = definitions[id_num - 1]

            def_text = " ".join(definition['definition'].split())  # remove excess spaces
            def_text = formatting.truncate_str(def_text, 200)
        except IndexError:
            return 'Not found.'

        url = definition['permalink']

        output = "[{}/{}] {} :: {}".format(id_num, len(definitions), def_text, url)

    else:
        definition = random.choice(definitions)

        def_text = " ".join(definition['definition'].split())  # remove excess spaces
        def_text = formatting.truncate_str(def_text, 200)

        name = definition['word']
        url = definition['permalink']
        output = "\x02{}\x02: {} :: {}".format(name, def_text, url)

    return output
Exemple #3
0
def wiki(inp):
    """wiki <phrase> -- Gets first sentence of Wikipedia article on <phrase>."""

    x = http.get_xml(search_url, search=inp)

    ns = '{http://opensearch.org/searchsuggest2}'
    items = x.findall(ns + 'Section/' + ns + 'Item')

    if not items:
        if x.find('error') is not None:
            return 'error: %(code)s: %(info)s' % x.find('error').attrib
        else:
            return 'No results found.'

    def extract(item):
        return [item.find(ns + x).text for x in ('Text', 'Description', 'Url')]

    title, desc, url = extract(items[0])

    if 'may refer to' in desc:
        title, desc, url = extract(items[1])

    title = paren_re.sub('', title)

    if title.lower() not in desc.lower():
        desc = title + desc

    desc = ' '.join(desc.split())  # remove excess spaces

    desc = formatting.truncate_str(desc, 200)

    return '{} :: {}'.format(desc, http.quote(url, ':/'))
Exemple #4
0
def format_output(data):
    """ takes plugin data and returns two strings representing information about that plugin """
    name = data["plugin_name"]
    description = formatting.truncate_str(data['description'], 30)
    url = data['website']
    authors = data['authors'][0]
    authors = authors[0] + "\u200b" + authors[1:]
    stage = data['stage']

    current_version = data['versions'][0]

    last_update = time.strftime('%d %B %Y %H:%M',
                                time.gmtime(current_version['date']))
    version_number = data['versions'][0]['version']

    bukkit_versions = ", ".join(current_version['game_versions'])
    link = web.try_isgd(current_version['link'])

    if description:
        line_a = "\x02{}\x02, by \x02{}\x02 - {} - ({}) \x02{}".format(
            name, authors, description, stage, url)
    else:
        line_a = "\x02{}\x02, by \x02{}\x02 ({}) \x02{}".format(
            name, authors, stage, url)

    line_b = "Last release: \x02v{}\x02 for \x02{}\x02 at {} \x02{}\x02".format(
        version_number, bukkit_versions, last_update, link)

    return line_a, line_b
Exemple #5
0
def rss(text, message):
    """rss <feed> -- Gets the first three items from the RSS feed <feed>."""
    limit = 3

    # preset news feeds
    strip = text.lower().strip()
    if strip == "bukkit":
        feed = "http://dl.bukkit.org/downloads/craftbukkit/feeds/latest-rb.rss"
        limit = 1
    elif strip == "xkcd":
        feed = "http://xkcd.com/rss.xml"
    elif strip == "ars":
        feed = "http://feeds.arstechnica.com/arstechnica/index"
    else:
        feed = text

    query = "SELECT title, link FROM rss WHERE url=@feed LIMIT @limit"
    result = web.query(query, {"feed": feed, "limit": limit})

    if not result.rows:
        return "Could not find/read RSS feed."

    for row in result.rows:
        title = formatting.truncate_str(row["title"], 100)
        try:
            link = web.isgd(row["link"])
        except (web.ShortenError, http.HTTPError, http.URLError):
            link = row["link"]
        message("{} - {}".format(title, link))
Exemple #6
0
def format_output(data):
    """ takes plugin data and returns two strings representing information about that plugin """
    name = data["plugin_name"]
    description = formatting.truncate_str(data['description'], 30)
    url = data['website']
    authors = data['authors'][0]
    authors = authors[0] + "\u200b" + authors[1:]
    stage = data['stage']

    current_version = data['versions'][0]

    last_update = time.strftime('%d %B %Y %H:%M',
                                time.gmtime(current_version['date']))
    version_number = data['versions'][0]['version']

    bukkit_versions = ", ".join(current_version['game_versions'])
    link = web.try_isgd(current_version['link'])

    if description:
        line_a = "\x02{}\x02, by \x02{}\x02 - {} - ({}) \x02{}".format(name, authors, description, stage, url)
    else:
        line_a = "\x02{}\x02, by \x02{}\x02 ({}) \x02{}".format(name, authors, stage, url)

    line_b = "Last release: \x02v{}\x02 for \x02{}\x02 at {} \x02{}\x02".format(version_number, bukkit_versions,
                                                                                last_update, link)

    return line_a, line_b
Exemple #7
0
def wiki(inp):
    """wiki <phrase> -- Gets first sentence of Wikipedia article on <phrase>."""

    x = http.get_xml(search_url, search=inp)

    ns = '{http://opensearch.org/searchsuggest2}'
    items = x.findall(ns + 'Section/' + ns + 'Item')

    if not items:
        if x.find('error') is not None:
            return 'error: %(code)s: %(info)s' % x.find('error').attrib
        else:
            return 'No results found.'

    def extract(item):
        return [item.find(ns + x).text for x in
                ('Text', 'Description', 'Url')]

    title, desc, url = extract(items[0])

    if 'may refer to' in desc:
        title, desc, url = extract(items[1])

    title = paren_re.sub('', title)

    if title.lower() not in desc.lower():
        desc = title + desc

    desc = ' '.join(desc.split())  # remove excess spaces

    desc = formatting.truncate_str(desc, 200)

    return '{} :: {}'.format(desc, http.quote(url, ':/'))
Exemple #8
0
def get_steam_info(url):
    page = http.get(url)
    soup = BeautifulSoup(page, 'lxml', from_encoding="utf-8")

    data = {
        "name":
        soup.find('div', {
            'class': 'apphub_AppName'
        }).text,
        "desc":
        truncate_str(
            soup.find('meta', {'name': 'description'})['content'].strip(), 80)
    }

    # get the element details_block
    details = soup.find('div', {'class': 'details_block'})

    # loop over every <b></b> tag in details_block
    for b in details.findAll('b'):
        # get the contents of the <b></b> tag, which is our title
        title = b.text.lower().replace(":", "")
        if title == "languages":
            # we have all we need!
            break

        # find the next element directly after the <b></b> tag
        next_element = b.nextSibling
        if next_element:
            # if the element is some text
            if isinstance(next_element, NavigableString):
                text = next_element.string.strip()
                if text:
                    # we found valid text, save it and continue the loop
                    data[title] = text
                    continue
                else:
                    # the text is blank - sometimes this means there are
                    # useless spaces or tabs between the <b> and <a> tags.
                    # so we find the next <a> tag and carry on to the next
                    # bit of code below
                    next_element = next_element.find_next('a', href=True)

            # if the element is an <a></a> tag
            if isinstance(next_element, Tag) and next_element.name == 'a':
                text = next_element.string.strip()
                if text:
                    # we found valid text (in the <a></a> tag),
                    # save it and continue the loop
                    data[title] = text
                    continue

    data["price"] = soup.find('div', {
        'class': 'game_purchase_price price'
    }).text.strip()

    return "\x02{name}\x02: {desc}, \x02Genre\x02: {genre}, \x02Release Date\x02: {release date}," \
           " \x02Price\x02: {price}".format(**data)
Exemple #9
0
def answer(text):
    """answer <query> -- find the answer to a question on Yahoo! Answers"""

    query = "SELECT Subject, ChosenAnswer, Link FROM answers.search WHERE query=@query LIMIT 1"
    result = web.query(query, {"query": text.strip()}).one()

    short_url = web.try_isgd(result["Link"])

    # we split the answer and .join() it to remove newlines/extra spaces
    answer_text = formatting.truncate_str(' '.join(result["ChosenAnswer"].split()), 80)

    return '\x02{}\x02 "{}" - {}'.format(result["Subject"], answer_text, short_url)
Exemple #10
0
def reddit(inp):
    """reddit <subreddit> [n] -- Gets a random post from <subreddit>, or gets the [n]th post in the subreddit."""
    id_num = None

    if inp:
        # clean and split the input
        parts = inp.lower().strip().split()

        # find the requested post number (if any)
        if len(parts) > 1:
            url = base_url.format(parts[0].strip())
            try:
                id_num = int(parts[1]) - 1
            except ValueError:
                return "Invalid post number."
        else:
            url = base_url.format(parts[0].strip())
    else:
        url = "http://reddit.com/.json"

    try:
        data = http.get_json(url, user_agent=http.ua_chrome)
    except Exception as e:
        return "Error: " + str(e)
    data = data["data"]["children"]

    # get the requested/random post
    if id_num is not None:
        try:
            item = data[id_num]["data"]
        except IndexError:
            length = len(data)
            return "Invalid post number. Number must be between 1 and {}.".format(length)
    else:
        item = random.choice(data)["data"]

    item["title"] = formatting.truncate_str(item["title"], 50)
    item["link"] = short_url.format(item["id"])

    raw_time = datetime.fromtimestamp(int(item["created_utc"]))
    item["timesince"] = timesince.timesince(raw_time)

    if item["over_18"]:
        item["warning"] = " \x02NSFW\x02"
    else:
        item["warning"] = ""

    return (
        "\x02{title} : {subreddit}\x02 - posted by \x02{author}\x02"
        " {timesince} ago - {ups} upvotes, {downs} downvotes -"
        " {link}{warning}".format(**item)
    )
Exemple #11
0
def google(text):
    """google <query> -- Returns first google search result for <query>."""

    parsed = api_get('web', text)
    if not 200 <= parsed['responseStatus'] < 300:
        raise IOError('error searching for pages: {}: {}'.format(parsed['responseStatus'], ''))
    if not parsed['responseData']['results']:
        return 'No results found.'

    result = parsed['responseData']['results'][0]

    title = http.unescape(result['titleNoFormatting'])
    title = formatting.truncate_str(title, 60)
    content = http.unescape(result['content'])

    if not content:
        content = "No description available."
    else:
        content = http.html.fromstring(content).text_content()
        content = formatting.truncate_str(content, 150)

    return '{} -- \x02{}\x02: "{}"'.format(result['unescapedUrl'], title, content)
Exemple #12
0
def reddit(inp):
    """reddit <subreddit> [n] -- Gets a random post from <subreddit>, or gets the [n]th post in the subreddit."""
    id_num = None

    if inp:
        # clean and split the input
        parts = inp.lower().strip().split()

        # find the requested post number (if any)
        if len(parts) > 1:
            url = base_url.format(parts[0].strip())
            try:
                id_num = int(parts[1]) - 1
            except ValueError:
                return "Invalid post number."
        else:
            url = base_url.format(parts[0].strip())
    else:
        url = "http://reddit.com/.json"

    try:
        data = http.get_json(url, user_agent=http.ua_chrome)
    except Exception as e:
        return "Error: " + str(e)
    data = data["data"]["children"]

    # get the requested/random post
    if id_num is not None:
        try:
            item = data[id_num]["data"]
        except IndexError:
            length = len(data)
            return "Invalid post number. Number must be between 1 and {}.".format(
                length)
    else:
        item = random.choice(data)["data"]

    item["title"] = formatting.truncate_str(item["title"], 50)
    item["link"] = short_url.format(item["id"])

    raw_time = datetime.fromtimestamp(int(item["created_utc"]))
    item["timesince"] = timesince.timesince(raw_time)

    if item["over_18"]:
        item["warning"] = " \x02NSFW\x02"
    else:
        item["warning"] = ""

    return "\x02{title} : {subreddit}\x02 - posted by \x02{author}\x02" \
           " {timesince} ago - {ups} upvotes, {downs} downvotes -" \
           " {link}{warning}".format(**item)
Exemple #13
0
def answer(text):
    """answer <query> -- find the answer to a question on Yahoo! Answers"""

    query = "SELECT Subject, ChosenAnswer, Link FROM answers.search WHERE query=@query LIMIT 1"
    result = web.query(query, {"query": text.strip()}).one()

    short_url = web.try_isgd(result["Link"])

    # we split the answer and .join() it to remove newlines/extra spaces
    answer_text = formatting.truncate_str(
        ' '.join(result["ChosenAnswer"].split()), 80)

    return '\x02{}\x02 "{}" - {}'.format(result["Subject"], answer_text,
                                         short_url)
Exemple #14
0
def wolframalpha(text, bot):
    """wa <query> -- Computes <query> using Wolfram Alpha."""
    api_key = bot.config.get("api_keys", {}).get("wolframalpha", None)

    if not api_key:
        return "error: missing api key"

    url = 'http://api.wolframalpha.com/v2/query?format=plaintext'

    result = http.get_xml(url, input=text, appid=api_key)

    # get the URL for a user to view this query in a browser
    query_url = "http://www.wolframalpha.com/input/?i=" + \
                http.quote_plus(text.encode('utf-8'))
    short_url = web.try_isgd(query_url)

    pod_texts = []
    for pod in result.xpath("//pod[@primary='true']"):
        title = pod.attrib['title']
        if pod.attrib['id'] == 'Input':
            continue

        results = []
        for subpod in pod.xpath('subpod/plaintext/text()'):
            subpod = subpod.strip().replace('\\n', '; ')
            subpod = re.sub(r'\s+', ' ', subpod)
            if subpod:
                results.append(subpod)
        if results:
            pod_texts.append(title + ': ' + ', '.join(results))

    ret = ' - '.join(pod_texts)

    if not pod_texts:
        return 'No results.'

    ret = re.sub(r'\\(.)', r'\1', ret)

    def unicode_sub(match):
        return chr(int(match.group(1), 16))

    ret = re.sub(r'\\:([0-9a-z]{4})', unicode_sub, ret)

    ret = formatting.truncate_str(ret, 250)

    if not ret:
        return 'No results.'

    return "{} - {}".format(ret, short_url)
Exemple #15
0
def google(text):
    """google <query> -- Returns first google search result for <query>."""

    parsed = api_get('web', text)
    if not 200 <= parsed['responseStatus'] < 300:
        raise IOError('error searching for pages: {}: {}'.format(
            parsed['responseStatus'], ''))
    if not parsed['responseData']['results']:
        return 'No results found.'

    result = parsed['responseData']['results'][0]

    title = http.unescape(result['titleNoFormatting'])
    title = formatting.truncate_str(title, 60)
    content = http.unescape(result['content'])

    if not content:
        content = "No description available."
    else:
        content = http.html.fromstring(content).text_content()
        content = formatting.truncate_str(content, 150)

    return '{} -- \x02{}\x02: "{}"'.format(result['unescapedUrl'], title,
                                           content)
Exemple #16
0
def get_steam_info(url):
    page = http.get(url)
    soup = BeautifulSoup(page, 'lxml', from_encoding="utf-8")

    data = {"name": soup.find('div', {'class': 'apphub_AppName'}).text,
            "desc": truncate_str(soup.find('meta', {'name': 'description'})['content'].strip(), 80)}

    # get the element details_block
    details = soup.find('div', {'class': 'details_block'})

    # loop over every <b></b> tag in details_block
    for b in details.findAll('b'):
        # get the contents of the <b></b> tag, which is our title
        title = b.text.lower().replace(":", "")
        if title == "languages":
            # we have all we need!
            break

        # find the next element directly after the <b></b> tag
        next_element = b.nextSibling
        if next_element:
            # if the element is some text
            if isinstance(next_element, NavigableString):
                text = next_element.string.strip()
                if text:
                    # we found valid text, save it and continue the loop
                    data[title] = text
                    continue
                else:
                    # the text is blank - sometimes this means there are
                    # useless spaces or tabs between the <b> and <a> tags.
                    # so we find the next <a> tag and carry on to the next
                    # bit of code below
                    next_element = next_element.find_next('a', href=True)

            # if the element is an <a></a> tag
            if isinstance(next_element, Tag) and next_element.name == 'a':
                text = next_element.string.strip()
                if text:
                    # we found valid text (in the <a></a> tag),
                    # save it and continue the loop
                    data[title] = text
                    continue

    data["price"] = soup.find('div', {'class': 'game_purchase_price price'}).text.strip()

    return "\x02{name}\x02: {desc}, \x02Genre\x02: {genre}, \x02Release Date\x02: {release date}," \
           " \x02Price\x02: {price}".format(**data)
Exemple #17
0
def format_item(item, show_url=True):
    """ takes a newegg API item object and returns a description """
    title = formatting.truncate_str(item["Title"], 50)

    # format the rating nicely if it exists
    if not item["ReviewSummary"]["TotalReviews"] == "[]":
        rating = "Rated {}/5 ({} ratings)".format(
            item["ReviewSummary"]["Rating"],
            item["ReviewSummary"]["TotalReviews"][1:-1])
    else:
        rating = "No Ratings"

    if not item["FinalPrice"] == item["OriginalPrice"]:
        price = "{FinalPrice}, was {OriginalPrice}".format(**item)
    else:
        price = item["FinalPrice"]

    tags = []

    if item["Instock"]:
        tags.append("\x02Stock Available\x02")
    else:
        tags.append("\x02Out Of Stock\x02")

    if item["FreeShippingFlag"]:
        tags.append("\x02Free Shipping\x02")

    if item["IsFeaturedItem"]:
        tags.append("\x02Featured\x02")

    if item["IsShellShockerItem"]:
        tags.append("\x02SHELL SHOCKER\u00AE\x02")

    # join all the tags together in a comma separated string ("tag1, tag2, tag3")
    tag_text = ", ".join(tags)

    if show_url:
        # create the item URL and shorten it
        url = web.try_isgd(ITEM_URL.format(item["NeweggItemNumber"]))
        return "\x02{}\x02 ({}) - {} - {} - {}".format(title, price, rating,
                                                       tag_text, url)
    else:
        return "\x02{}\x02 ({}) - {} - {}".format(title, price, rating,
                                                  tag_text)
Exemple #18
0
def imdb_url(match):
    imdb_id = match.group(4).split('/')[-1]
    if imdb_id == "":
        imdb_id = match.group(4).split('/')[-2]
    content = http.get_json("http://www.omdbapi.com/", i=imdb_id)
    if content.get('Error', None) == 'Movie not found!':
        return 'Movie not found!'
    elif content['Response'] == 'True':
        content['URL'] = 'http://www.imdb.com/title/%(imdbID)s' % content
        content['Plot'] = formatting.truncate_str(content['Plot'], 50)
        out = '\x02%(Title)s\x02 (%(Year)s) (%(Genre)s): %(Plot)s'
        if content['Runtime'] != 'N/A':
            out += ' \x02%(Runtime)s\x02.'
        if content['imdbRating'] != 'N/A' and content['imdbVotes'] != 'N/A':
            out += ' \x02%(imdbRating)s/10\x02 with \x02%(imdbVotes)s\x02' \
                   ' votes.'
        return out % content
    else:
        return 'Unknown error.'
Exemple #19
0
def format_item(item, show_url=True):
    """ takes a newegg API item object and returns a description """
    title = formatting.truncate_str(item["Title"], 50)

    # format the rating nicely if it exists
    if not item["ReviewSummary"]["TotalReviews"] == "[]":
        rating = "Rated {}/5 ({} ratings)".format(item["ReviewSummary"]["Rating"],
                                                  item["ReviewSummary"]["TotalReviews"][1:-1])
    else:
        rating = "No Ratings"

    if not item["FinalPrice"] == item["OriginalPrice"]:
        price = "{FinalPrice}, was {OriginalPrice}".format(**item)
    else:
        price = item["FinalPrice"]

    tags = []

    if item["Instock"]:
        tags.append("\x02Stock Available\x02")
    else:
        tags.append("\x02Out Of Stock\x02")

    if item["FreeShippingFlag"]:
        tags.append("\x02Free Shipping\x02")

    if item["IsFeaturedItem"]:
        tags.append("\x02Featured\x02")

    if item["IsShellShockerItem"]:
        tags.append("\x02SHELL SHOCKER\u00AE\x02")

    # join all the tags together in a comma separated string ("tag1, tag2, tag3")
    tag_text = ", ".join(tags)

    if show_url:
        # create the item URL and shorten it
        url = web.try_isgd(ITEM_URL.format(item["NeweggItemNumber"]))
        return "\x02{}\x02 ({}) - {} - {} - {}".format(title, price, rating,
                                                       tag_text, url)
    else:
        return "\x02{}\x02 ({}) - {} - {}".format(title, price, rating,
                                                  tag_text)
Exemple #20
0
def imdb_url(match):
    imdb_id = match.group(4).split('/')[-1]
    if imdb_id == "":
        imdb_id = match.group(4).split('/')[-2]
    content = http.get_json("http://www.omdbapi.com/", i=imdb_id)
    if content.get('Error', None) == 'Movie not found!':
        return 'Movie not found!'
    elif content['Response'] == 'True':
        content['URL'] = 'http://www.imdb.com/title/%(imdbID)s' % content
        content['Plot'] = formatting.truncate_str(content['Plot'], 50)
        out = '\x02%(Title)s\x02 (%(Year)s) (%(Genre)s): %(Plot)s'
        if content['Runtime'] != 'N/A':
            out += ' \x02%(Runtime)s\x02.'
        if content['imdbRating'] != 'N/A' and content['imdbVotes'] != 'N/A':
            out += ' \x02%(imdbRating)s/10\x02 with \x02%(imdbVotes)s\x02' \
                   ' votes.'
        return out % content
    else:
        return 'Unknown error.'
Exemple #21
0
def suggest(text):
    """suggest <phrase> -- Gets suggested phrases for a google search"""

    page = http.get('http://google.com/complete/search',
                    output='json', client='hp', q=text)
    page_json = page.split('(', 1)[1][:-1]

    suggestions = json.loads(page_json)[1]
    suggestions = [suggestion[0] for suggestion in suggestions]

    if not suggestions:
        return 'no suggestions found'

    out = ", ".join(suggestions)

    # defuckify text (might not be needed now, but I'll keep it)
    soup = BeautifulSoup(out)
    out = soup.get_text()

    return formatting.truncate_str(out, 200)
Exemple #22
0
def mcwiki(text):
    """mcwiki <phrase> -- Gets the first paragraph of
    the Minecraft Wiki article on <phrase>."""

    try:
        j = http.get_json(api_url, search=text)
    except (http.HTTPError, http.URLError) as e:
        return "Error fetching search results: {}".format(e)
    except ValueError as e:
        return "Error reading search results: {}".format(e)

    if not j[1]:
        return "No results found."

    # we remove items with a '/' in the name, because
    # gamepedia uses sub-pages for different languages
    # for some stupid reason
    items = [item for item in j[1] if not "/" in item]

    if items:
        article_name = items[0].replace(' ', '_').encode('utf8')
    else:
        # there are no items without /, just return a / one
        article_name = j[1][0].replace(' ', '_').encode('utf8')

    url = mc_url + http.quote(article_name, '')

    try:
        page = http.get_html(url)
    except (http.HTTPError, http.URLError) as e:
        return "Error fetching wiki page: {}".format(e)

    for p in page.xpath('//div[@class="mw-content-ltr"]/p'):
        if p.text_content():
            summary = " ".join(p.text_content().splitlines())
            summary = re.sub("\[\d+\]", "", summary)
            summary = formatting.truncate_str(summary, 200)
            return "{} :: {}".format(summary, url)

    # this shouldn't happen
    return "Unknown Error."
Exemple #23
0
def mcwiki(text):
    """mcwiki <phrase> -- Gets the first paragraph of
    the Minecraft Wiki article on <phrase>."""

    try:
        j = http.get_json(api_url, search=text)
    except (http.HTTPError, http.URLError) as e:
        return "Error fetching search results: {}".format(e)
    except ValueError as e:
        return "Error reading search results: {}".format(e)

    if not j[1]:
        return "No results found."

    # we remove items with a '/' in the name, because
    # gamepedia uses sub-pages for different languages
    # for some stupid reason
    items = [item for item in j[1] if not "/" in item]

    if items:
        article_name = items[0].replace(' ', '_').encode('utf8')
    else:
        # there are no items without /, just return a / one
        article_name = j[1][0].replace(' ', '_').encode('utf8')

    url = mc_url + http.quote(article_name, '')

    try:
        page = http.get_html(url)
    except (http.HTTPError, http.URLError) as e:
        return "Error fetching wiki page: {}".format(e)

    for p in page.xpath('//div[@class="mw-content-ltr"]/p'):
        if p.text_content():
            summary = " ".join(p.text_content().splitlines())
            summary = re.sub("\[\d+\]", "", summary)
            summary = formatting.truncate_str(summary, 200)
            return "{} :: {}".format(summary, url)

    # this shouldn't happen
    return "Unknown Error."
Exemple #24
0
def drama(text):
    """drama <phrase> -- Gets the first paragraph of
    the Encyclopedia Dramatica article on <phrase>."""

    data = http.get_json(api_url, search=text)

    if not data[1]:
        return "No results found."
    article_name = data[1][0].replace(' ', '_')

    url = ed_url + parse.quote(article_name, '')
    page = http.get_html(url)

    for p in page.xpath('//div[@id="bodyContent"]/p'):
        if p.text_content():
            summary = " ".join(p.text_content().splitlines())
            summary = re.sub("\[\d+\]", "", summary)
            summary = formatting.truncate_str(summary, 220)
            return "{} - {}".format(summary, url)

    return "Unknown Error."
Exemple #25
0
def drama(text):
    """drama <phrase> -- Gets the first paragraph of
    the Encyclopedia Dramatica article on <phrase>."""

    data = http.get_json(api_url, search=text)

    if not data[1]:
        return "No results found."
    article_name = data[1][0].replace(' ', '_')

    url = ed_url + parse.quote(article_name, '')
    page = http.get_html(url)

    for p in page.xpath('//div[@id="bodyContent"]/p'):
        if p.text_content():
            summary = " ".join(p.text_content().splitlines())
            summary = re.sub("\[\d+\]", "", summary)
            summary = formatting.truncate_str(summary, 220)
            return "{} - {}".format(summary, url)

    return "Unknown Error."
Exemple #26
0
def suggest(text):
    """suggest <phrase> -- Gets suggested phrases for a google search"""

    page = http.get('http://google.com/complete/search',
                    output='json',
                    client='hp',
                    q=text)
    page_json = page.split('(', 1)[1][:-1]

    suggestions = json.loads(page_json)[1]
    suggestions = [suggestion[0] for suggestion in suggestions]

    if not suggestions:
        return 'no suggestions found'

    out = ", ".join(suggestions)

    # defuckify text (might not be needed now, but I'll keep it)
    soup = BeautifulSoup(out)
    out = soup.get_text()

    return formatting.truncate_str(out, 200)
Exemple #27
0
def soundcloud(url, api_key):
    data = http.get_json(api_url + '/resolve.json?' +
                         urlencode({
                             'url': url,
                             'client_id': api_key
                         }))

    if data['description']:
        desc = ": {} ".format(formatting.truncate_str(data['description'], 50))
    else:
        desc = ""
    if data['genre']:
        genre = "- Genre: \x02{}\x02 ".format(data['genre'])
    else:
        genre = ""

    url = web.try_isgd(data['permalink_url'])

    return "SoundCloud track: \x02{}\x02 by \x02{}\x02 {}{}- {} plays, {} downloads, {} comments - {}".format(
        data['title'], data['user']['username'], desc, genre,
        data['playback_count'], data['download_count'], data['comment_count'],
        url)
Exemple #28
0
def soundcloud(url, api_key):
    data = http.get_json(api_url + "/resolve.json?" + urlencode({"url": url, "client_id": api_key}))

    if data["description"]:
        desc = ": {} ".format(formatting.truncate_str(data["description"], 50))
    else:
        desc = ""
    if data["genre"]:
        genre = "- Genre: \x02{}\x02 ".format(data["genre"])
    else:
        genre = ""

    url = web.try_isgd(data["permalink_url"])

    return "SoundCloud track: \x02{}\x02 by \x02{}\x02 {}{}- {} plays, {} downloads, {} comments - {}".format(
        data["title"],
        data["user"]["username"],
        desc,
        genre,
        data["playback_count"],
        data["download_count"],
        data["comment_count"],
        url,
    )