Пример #1
0
def imgur(inp):
    "imgur [subreddit] -- Gets the first page of imgur images from [subreddit] and returns a link to them. If [subreddit] is undefined, return any imgur images"
    if inp:
        # see if the input ends with "nsfw"
        show_nsfw = inp.endswith(" nsfw")

        # remove "nsfw" from the input string after checking for it
        if show_nsfw:
            inp = inp[:-5].strip().lower()

        url = base_url.format(inp.strip())
    else:
        url = "http://www.reddit.com/domain/imgur.com/.json"
        show_nsfw = False

    try:
        data = http.get_json(url, user_agent=http.ua_chrome)
    except Exception as e:
        return "Error: " + str(e)

    data = data["data"]["children"]
    random.shuffle(data)

    # filter list to only have imgur links
    filtered_posts = [i["data"] for i in data if is_valid(i["data"])]

    if not filtered_posts:
        return "No images found."

    items = []

    headers = {"Authorization": "Client-ID b5d127e6941b07a"}

    # loop over the list of posts
    for post in filtered_posts:
        if post["over_18"] and not show_nsfw:
            continue

        match = imgur_re.search(post["url"])
        if match.group(1) == 'a/':
            # post is an album
            url = album_api.format(match.group(2))
            images = http.get_json(url, headers=headers)["data"]

            # loop over the images in the album and add to the list
            for image in images:
                items.append(image["id"])

        elif match.group(2) is not None:
            # post is an image
            items.append(match.group(2))

    if not items:
        return "No images found (use .imgur <subreddit> nsfw to show explicit content)"

    if show_nsfw:
        return "{} \x02NSFW\x02".format(
            web.isgd("http://imgur.com/" + ','.join(items)))
    else:
        return web.isgd("http://imgur.com/" + ','.join(items))
Пример #2
0
def get_series_info(seriesname):
    res = {"error": None, "ended": False, "episodes": None, "name": None}
    # http://thetvdb.com/wiki/index.php/API:GetSeries
    try:
        query = http.get_xml(base_url + 'GetSeries.php', seriesname=seriesname)
    except URLError:
        res["error"] = "error contacting thetvdb.com"
        return res
    series_id = ""
    try: series_id = query.xpath('//id/text()')
    except: print "Failed"


    if not series_id:
        result = "\x02Could not find show:\x02 %s" % seriesname
    else:
        series_name = query.xpath('//SeriesName/text()')[0]
        overview = query.xpath('//Overview/text()')[0]
        firstaired = query.xpath('//FirstAired/text()')[0]
        #imdb_id = query.xpath('//IMDB_ID/text()')[0]
        #imdb_url = web.isgd("http://www.imdb.com/title/%s" % imdb_id)
        tvdb_url = web.isgd("http://thetvdb.com/?tab=series&id=%s" % series_id[0])
        status = tv_next(seriesname)
        result = '\x02%s\x02 (%s) \x02-\x02 \x02%s\x02 - [%s] - %s' % (series_name, firstaired, status, tvdb_url, overview)

    return result
Пример #3
0
def shorten(inp):
    "shorten <url> - Makes an is.gd shortlink to the url provided."

    try:
        return web.isgd(inp)
    except (web.ShortenError, http.HTTPError) as error:
        return error
Пример #4
0
def rss(inp, message=None):
    """rss <feed> -- Gets the first three items from the RSS feed <feed>."""
    limit = 3

    # preset news feeds
    strip = inp.lower().strip()
    if strip == "bukkit":
        feed = "http://dl.bukkit.org/downloads/craftbukkit/feeds/latest-rb.rss"
        limit = 1
    elif strip == "xkcd":
        feed = "http://xkcd.com/rss.xml"
    elif strip == "ars":
        feed = "http://feeds.arstechnica.com/arstechnica/index"
    elif strip == "xda":
        feed = "http://feeds.feedburner.com/xda-developers/ShsH?format=xml"
    else:
        feed = inp

    query = "SELECT title, link FROM rss WHERE url=@feed LIMIT @limit"
    result = web.query(query, {"feed": feed, "limit": limit})
    print result.raw
    if not result.rows:
        return "Could not find/read RSS feed."

    for row in result.rows:
        title = text.truncate_str(row["title"], 100)
        try:
            link = web.isgd(row["link"])
        except (web.ShortenError, http.HTTPError, http.URLError):
            link = row["link"]
        message(u"{} - {}".format(title, link))
Пример #5
0
def randombukkitplugin(inp, reply=None):
    if not path.exists("plugins/data/bukgetplugins"):
        with open("plugins/data/bukgetplugins", "w") as f:
            f.write(http.get("http://api.bukget.org/3/plugins/bukkit"))
    jsahn = json.loads(open("plugins/data/bukgetplugins", "r").read())
    pickslug = random.choice(jsahn)['slug']
    data = getplugininfo(pickslug)
    name = data['plugin_name']
    description = data['description']
    url = data['website']
    authors = data['authors'][0]
    authors = authors[0] + u"\u200b" + authors[1:]
    stage = data['stage']
    lastUpdate = time.strftime('%d %B %Y %H:%M',
                               time.gmtime(data['versions'][0]['date']))
    lastVersion = data['versions'][0]['version']
    bukkitver = ", ".join(data['versions'][0]['game_versions'])
    link = web.isgd(data['versions'][0]['link'])
    if description != "":
        reply("\x02{}\x02, by \x02{}\x02 - {} - ({}) \x02{}".format(
            name, authors, description, stage, url))
    else:
        reply("\x02{}\x02, by \x02{}\x02 ({}) \x02{}".format(
            name, authors, stage, url))
    reply("Last release: \x02v{}\x02 for \x02{}\x02 at {} \x02{}\x02".format(
        lastVersion, bukkitver, lastUpdate, link))
Пример #6
0
def bukkitplugin(inp, reply=None):
    """plugin <bukkit plugin slug> - Look up a plugin on dev.bukkit.org"""
    data = getplugininfo(inp.lower())
    try:
        name = data['plugin_name']
    except ValueError:
        return data
    description = data['description']
    url = data['website']
    authors = data['authors'][0]
    authors = authors[0] + u"\u200b" + authors[1:]
    stage = data['stage']
    lastUpdate = time.strftime('%d %B %Y %H:%M',
                               time.gmtime(data['versions'][0]['date']))
    lastVersion = data['versions'][0]['version']
    bukkitver = ", ".join(data['versions'][0]['game_versions'])
    link = web.isgd(data['versions'][0]['link'])
    if description != "":
        reply("\x02{}\x02, by \x02{}\x02 - {} - ({}) \x02{}".format(
            name, authors, description, stage, url))
    else:
        reply("\x02{}\x02, by \x02{}\x02 ({}) \x02{}".format(
            name, authors, stage, url))
    reply("Last release: \x02v{}\x02 for \x02{}\x02 at {} \x02{}\x02".format(
        lastVersion, bukkitver, lastUpdate, link))
Пример #7
0
def get_series_info(seriesname):
    res = {"error": None, "ended": False, "episodes": None, "name": None}
    # http://thetvdb.com/wiki/index.php/API:GetSeries
    try:
        query = http.get_xml(base_url + 'GetSeries.php', seriesname=seriesname)
    except URLError:
        res["error"] = "error contacting thetvdb.com"
        return res
    series_id = ""
    try:
        series_id = query.xpath('//id/text()')
    except:
        print "Failed"

    if not series_id:
        result = "\x02Could not find show:\x02 %s" % seriesname
    else:
        series_name = query.xpath('//SeriesName/text()')[0]
        overview = query.xpath('//Overview/text()')[0]
        firstaired = query.xpath('//FirstAired/text()')[0]
        #imdb_id = query.xpath('//IMDB_ID/text()')[0]
        #imdb_url = web.isgd("http://www.imdb.com/title/%s" % imdb_id)
        tvdb_url = web.isgd("http://thetvdb.com/?tab=series&id=%s" %
                            series_id[0])
        status = tv_next(seriesname)
        result = '\x02%s\x02 (%s) \x02-\x02 \x02%s\x02 - [%s] - %s' % (
            series_name, firstaired, status, tvdb_url, overview)

    return result
Пример #8
0
def shorten(inp):
    "shorten <url> - Makes an is.gd shortlink to the url provided."

    try:
        return web.isgd(inp)
    except (web.ShortenError, http.HTTPError) as error:
        return error
Пример #9
0
def image(inp, reply=None, bot=None):
    """image <query> -- Returns the first Google Image result for <query>."""
    try:
        cx = bot.config['api_keys']['googleimage']
        key = bot.config['api_keys']['google']
        query = [key, cx, '+'.join(inp.split())]
        image = api_get('image', query)['items'][0]['link']
    except:
        cx = bot.config['api_keys']['googleimage']
        key = bot.config['api_keys']['google2']
        query = [key, cx, '+'.join(inp.split())]
        image = api_get('image', query)['items'][0]['link']
        #url = ('https://www.google.co.uk/search?tbm=isch&hl=en-GB&source=hp&bi'
        #       'w=&bih=&q={0}&gbv=2&oq={0}&gs_l=img.3..0l10.1471.1673.0.1792.3'
        #       '.3.0.0.0.0.86.231.3.3.0....0...1ac.1.34.img..0.3.231.tFWV7YPBE'
        #       'c8')
        #url = url.format('+'.join(inp.split()))
        #soup = http.get_soup(url)
        #for link in soup.find_all('a'):
        #    try:
        #        if link.get('class')[0] == 'rg_l':
        #            image = link.get('href')[15:].split('&')[0]
        #            break
        #    except TypeError:
        #        pass
    try:
        reply(web.isgd(image))
    except:
        reply(image)
Пример #10
0
def rss(inp, say=None):
    "rss <feed> -- Gets the first three items from the RSS feed <feed>."
    limit = 3

    # preset news feeds
    strip = inp.lower().strip()
    if strip == "bukkit":
        feed = "http://dl.bukkit.org/downloads/craftbukkit/feeds/latest-rb.rss"
        limit = 1
    elif strip == "xkcd":
        feed = "http://xkcd.com/rss.xml"
    elif strip == "ars":
        feed = "http://feeds.arstechnica.com/arstechnica/index"
    else:
        feed = inp

    query = "SELECT title, link FROM rss WHERE url=@feed LIMIT @limit"
    result = web.query(query, {"feed": feed, "limit": limit})

    if not result.rows:
        return "Could not find/read RSS feed."

    for row in result.rows:
        title = text.truncate_str(row["title"], 100)
        try:
            link = web.isgd(row["link"])
        except (web.ShortenError, http.HTTPError, http.URLError):
            link = row["link"]
        say(u"{} - {}".format(title, link))
Пример #11
0
def get_yandere_tags(inp):
    url = "https://yande.re/post?tags=%s" % inp.replace(" ", "_")
    soup = http.get_soup(url)
    imagelist = soup.find("ul", {"id": "post-list-posts"}).findAll("li")
    image = imagelist[random.randint(0, len(imagelist) - 1)]
    imageid = image["id"].replace("p", "")
    title = image.find("img")["title"]
    src = image.find("a", {"class": "directlink"})["href"]
    return "\x034NSFW\x03: \x02({})\x02 {}: {}".format(imageid, title, web.isgd(http.unquote(src)))
Пример #12
0
def yandere(inp, reply=None):
    "tandere [tags] -- Yande.re -- Gets a random image from Yande.re."

    if inp: return get_yandere_tags(inp)

    id, title, image = yandere_cache.pop()
    reply(u'\x034NSFW\x03: \x02(%s)\x02 %s: %s' % (id, title[:75], web.isgd(image)))
    if len(yandere_cache) < 3:
        refresh_cache()
Пример #13
0
def get_yandere_tags(inp):
    url = 'https://yande.re/post?tags=%s' % inp.replace(' ','_')
    soup = http.get_soup(url)
    imagelist = soup.find('ul', {'id': 'post-list-posts'}).findAll('li')
    image = imagelist[random.randint(0,len(imagelist)-1)]
    imageid = image["id"].replace('p','')
    title = image.find('img')['title']
    src = image.find('a', {'class': 'directlink'})["href"]
    return u"\x034NSFW\x03: \x02({})\x02 {}: {}".format(imageid, title, web.isgd(http.unquote(src)))
Пример #14
0
def lmgtfy(inp, bot=None):
    "lmgtfy [phrase] - Posts a google link for the specified phrase"

    link = "http://lmgtfy.com/?q=%s" % http.quote_plus(inp)

    try:
        return web.isgd(link)
    except (web.ShortenError, http.HTTPError):
        return link
Пример #15
0
def lmgtfy(inp, bot=None):
    "lmgtfy [phrase] - Posts a google link for the specified phrase"

    link = "http://lmgtfy.com/?q=%s" % http.quote_plus(inp)

    try:
        return web.isgd(link)
    except (web.ShortenError, http.HTTPError):
        return link
Пример #16
0
def lmgtfy(inp):
    """lmgtfy [phrase] - Posts a google link for the specified phrase"""

    link = "http://lmgtfy.com/?q={}".format(http.quote_plus(inp))

    try:
        return web.isgd(link)
    except (web.ShortenError, http.HTTPError):
        return link
Пример #17
0
def yandere(inp, reply=None):
    "tandere [tags] -- Yande.re -- Gets a random image from Yande.re."

    if inp: return get_yandere_tags(inp)

    id, title, image = yandere_cache.pop()
    reply(u'\x034NSFW\x03: \x02(%s)\x02 %s: %s' %
          (id, title[:75], web.isgd(image)))
    if len(yandere_cache) < 3:
        refresh_cache()
Пример #18
0
def get_yandere_tags(inp):
    url = 'https://yande.re/post?tags=%s' % inp.replace(' ', '_')
    soup = http.get_soup(url)
    imagelist = soup.find('ul', {'id': 'post-list-posts'}).findAll('li')
    image = imagelist[random.randint(0, len(imagelist) - 1)]
    imageid = image["id"].replace('p', '')
    title = image.find('img')['title']
    src = image.find('a', {'class': 'directlink'})["href"]
    return u"\x034NSFW\x03: \x02({})\x02 {}: {}".format(
        imageid, title, web.isgd(http.unquote(src)))
Пример #19
0
def answer(inp):
    "answer <query> -- find the answer to a question on Yahoo! Answers"

    query = "SELECT Subject, ChosenAnswer, Link FROM answers.search WHERE query=@query LIMIT 1"
    result = web.query(query, {"query": inp.strip()}).one()

    short_url = web.isgd(result["Link"])

    # we split the answer and .join() it to remove newlines/extra spaces
    answer = text.truncate_str(' '.join(result["ChosenAnswer"].split()), 80)

    return '\x02{}\x02 "{}" - {}'.format(result["Subject"], answer, short_url)
Пример #20
0
def wolframalpha(inp, bot=None):
    "wa <query> -- Computes <query> using Wolfram Alpha."

    api_key = bot.config.get("api_keys", {}).get("wolframalpha", None)

    if not api_key:
        return "error: missing api key"

    url = 'http://api.wolframalpha.com/v2/query?format=plaintext'

    result = http.get_xml(url, input=inp, appid=api_key)

    # get the URL for a user to view this query in a browser
    query_url = "http://www.wolframalpha.com/input/?i=" + \
                http.quote_plus(inp.encode('utf-8'))
    try:
        short_url = web.isgd(query_url)
    except (web.ShortenError, http.HTTPError):
        short_url = query_url

    pod_texts = []
    for pod in result.xpath("//pod[@primary='true']"):
        title = pod.attrib['title']
        if pod.attrib['id'] == 'Input':
            continue

        results = []
        for subpod in pod.xpath('subpod/plaintext/text()'):
            subpod = subpod.strip().replace('\\n', '; ')
            subpod = re.sub(r'\s+', ' ', subpod)
            if subpod:
                results.append(subpod)
        if results:
            pod_texts.append(title + ': ' + ', '.join(results))

    ret = ' - '.join(pod_texts)

    if not pod_texts:
        return 'No results.'

    ret = re.sub(r'\\(.)', r'\1', ret)

    def unicode_sub(match):
        return unichr(int(match.group(1), 16))

    ret = re.sub(r'\\:([0-9a-z]{4})', unicode_sub, ret)

    ret = text.truncate_str(ret, 250)

    if not ret:
        return 'No results.'

    return "%s - %s" % (ret, short_url)
Пример #21
0
def implying(inp, reply=None, bot=None):
    """><query>.jpg -- Returns the first Google Image result for <query>.jpg"""
    inp = inp.string[1:].split('.')
    filetype = inp[1]
    cx = bot.config['api_keys']['googleimage']
    key = bot.config['api_keys']['google']
    query = [key, cx, '+'.join(inp[0].split()), filetype]
    image = api_get('images', query)['items'][0]['link']
    try:
        reply(web.isgd(image))
    except:
        reply(image)
Пример #22
0
def answer(inp):
    ".answer <query> -- find the answer to a question on Yahoo! Answers"

    query = "SELECT Subject, ChosenAnswer, Link FROM answers.search WHERE query=@query LIMIT 1"
    result = web.query(query, {"query": inp.strip()}).one()

    short_url = web.isgd(result["Link"])

    # we split the answer and .join() it to remove newlines/extra spaces
    answer = text.truncate_str(' '.join(result["ChosenAnswer"].split()), 80)

    return u'\x02{}\x02 "{}" - {}'.format(result["Subject"], answer, short_url)
Пример #23
0
def google(inp, bot=None):
    """google <query> -- Returns first google search result for <query>."""
    inp = request.urlencode(inp)

    url = API_URL + u'?key={}&cx={}&num=1&safe=off&q={}'
    cx = bot.config['api_keys']['googleimage']
    search = '+'.join(inp.split())
    key = bot.config['api_keys']['google']
    result = request.get_json(url.format(key, cx, search.encode('utf-8')))['items'][0]

    title = result['title']
    content = formatting.remove_newlines(result['snippet'])
    link = result['link']

    try:
        return u'{} -- \x02{}\x02: "{}"'.format(web.isgd(link), title, content)
    except Exception:
        return u'{} -- \x02{}\x02: "{}"'.format(link, title, content)
Пример #24
0
def message(post):
    if post['rating'] == u'e':
        rating = '\x02\x034NSFW\x03\x02'
    elif post['rating'] == u'q':
        rating = '\x02\x037questionable\x03\x02'
    elif post['rating'] == u's':
        rating = '\x02\x033safe\x03\x02'
    else:
        rating = 'unknown'

    id = '\x02#{}\x02'.format(post['id'])
    score = post['score']
    url = web.isgd(post['file_url'])
    size = formatting.filesize(post['file_size'])
    tags = post['tags']
    if len(tags) > 80:
        tags = '{}... (and {} more)'.format(tags[:80], tags.count(' '))  # this count() is wrong lol, close enough

    return "[{}] {} ({}) - Score: {} - Rating: {} - Tags: {}".format(id, url, size, score, rating, tags)
Пример #25
0
def google(inp, bot=None, db=None, chan=None):
    """google <query> -- Returns first google search result for <query>."""
    trimlength = database.get(db,'channels','trimlength','chan',chan)
    if not trimlength: trimlength = 9999
    try:
        cx = bot.config['api_keys']['googleimage']
        key = bot.config['api_keys']['google']
        query = [key, cx, '+'.join(inp.split())]
        result = api_get('None', query)['items'][0]
    except:
        cx = bot.config['api_keys']['googleimage']
        key = bot.config['api_keys']['google2']
        query = [key, cx, '+'.join(inp.split())]
        result = api_get('None', query)['items'][0]
    title = result['title']
    content = http.html.fromstring(result['snippet'].replace('\n', '')).text_content()
    link = result['link']
    try:
        return u'{} -- \x02{}\x02: "{}"'.format(web.isgd(link), title, content)
    except:
        return u'{} -- \x02{}\x02: "{}"'.format(link, title, content)
Пример #26
0
def steamcalc(inp, db=None):
    "steamcalc <user> -- Check the value of <user>s steam account."
    db_init(db)

    if " " in inp:
        return "Invalid Steam ID"

    uid = inp.strip().lower()
    url = "http://steamcalculator.com/id/{}".format(http.quote_plus(uid))

    # get the web page
    try:
        page = http.get_html(url)
    except Exception as e:
        return "Could not get Steam game listing: {}".format(e)

    # extract the info we need
    try:
        count_text = page.xpath("//div[@id='rightdetail']/text()")[0]
        count = int(count_re.findall(count_text)[0])

        value_text = page.xpath("//div[@id='rightdetail']/h1/text()")[0]
        value = float(value_re.findall(value_text)[0])
    except IndexError:
        return "Could not get Steam game listing."

    # save the info in the DB for steam rankings
    db.execute(
        "insert or replace into steam_rankings(id, value, count)"
        "values(?,?,?)", (uid, value, count))
    db.commit()

    # shorten the URL
    try:
        short_url = web.isgd(url)
    except web.ShortenError:
        short_url = url

    return u"\x02Games:\x02 {}, \x02Total Value:\x02 ${:.2f} USD - {}".format(
        count, value, short_url)
Пример #27
0
def steamcalc(inp, db=None):
    "steamcalc <user> -- Check the value of <user>s steam account."
    db_init(db)

    if " " in inp:
        return "Invalid Steam ID"

    uid = inp.strip().lower()
    url = "http://steamcalculator.com/id/{}".format(http.quote_plus(uid))

    # get the web page
    try:
        page = http.get_html(url)
    except Exception as e:
        return "Could not get Steam game listing: {}".format(e)

    # extract the info we need
    try:
        count_text = page.xpath("//div[@id='rightdetail']/text()")[0]
        count = int(count_re.findall(count_text)[0])

        value_text = page.xpath("//div[@id='rightdetail']/h1/text()")[0]
        value = float(value_re.findall(value_text)[0])
    except IndexError:
        return "Could not get Steam game listing."

    # save the info in the DB for steam rankings
    db.execute("insert or replace into steam_rankings(id, value, count)"
            "values(?,?,?)", (uid, value, count))
    db.commit()

    # shorten the URL
    try:
        short_url = web.isgd(url)
    except web.ShortenError:
        short_url = url

    return u"\x02Games:\x02 {}, \x02Total Value:\x02 ${:.2f} USD - {}".format(count, value, short_url)
Пример #28
0
def image(inp, bot=None):
    """image <query> -- Returns the first Google Image result for <query>."""
    if type(inp) is unicode:
        filetype = None
    else:
        inp, filetype = inp.string[1:].split('.')

    cx = bot.config['api_keys']['googleimage']
    search = '+'.join(inp.split())
    key = bot.config['api_keys']['google']

    if filetype:
        url = API_URL + u'?key={}&cx={}&searchType=image&num=1&safe=off&q={}&fileType={}'
        result = request.get_json(url.format(key, cx, search.encode('utf-8'),
                                             filetype))['items'][0]['link']
    else:
        url = API_URL + u'?key={}&cx={}&searchType=image&num=1&safe=off&q={}'
        result = request.get_json(url.format(key, cx, search.encode('utf-8')))['items'][0]['link']

    try:
        return web.isgd(result)
    except Exception as e:
        print '[!] Error while shortening:', e
        return result
Пример #29
0
def fact(inp, say=False, nick=False):
    "fact -- Gets a random fact from OMGFACTS."

    attempts = 0

    # all of this is because omgfacts is fail
    while True:
        try:
            soup = http.get_soup('http://www.omg-facts.com/random')
        except:
            if attempts > 2:
                return "Could not find a fact!"
            else:
                attempts += 1
                continue

        response = soup.find('a', {'class': 'surprise'})
        link = response['href']
        fact = ''.join(response.find(text=True))

        if fact:
            fact = fact.strip()
            break
        else:
            if attempts > 2:
                return "Could not find a fact!"
            else:
                attempts += 1
                continue

    try:
        url = web.isgd(link)
    except (web.ShortenError, http.HTTPError):
        url = link

    return "%s - %s" % (fact, url)
Пример #30
0
def wow_armoury_format(data, link):
    """Format armoury data into a human readable string"""

    if data.status_code != 200 and data.status_code != 404:
        # The page returns 404 if the character or realm is not found.
        try:
            data.raise_for_status()
        except Exception as e:
            return 'An error occured while trying to fetch the data. ({})'.format(
                str(e))

    data = data.json()

    if len(data) == 0:
        return 'Could not find any results.'

    if 'reason' in data:
        # Something went wrong (i.e. realm does not exist, character does not exist, or page not found).
        return data['reason']

    if 'name' in data:
        niceurl = link.replace('/api/wow/', '/wow/en/') + '/simple'

        try:
            return '{0} is a level \x0307{1}\x0F {2} {3} on {4} with \x0307{5}\x0F achievement points and \x0307{6}' \
                   '\x0F honourable kills. Armoury Profile: {7}' \
                .format(data['name'], data['level'], wow_get_gender(data['gender']), wow_get_class(data['class'], True),
                        data['realm'], data['achievementPoints'], data['totalHonorableKills'], web.isgd(niceurl))
        except Exception as e:
            return 'Unable to fetch information for {}. Does the realm or character exist? ({})'.format(
                niceurl, str(e))

    return 'An unexpected error occured.'
Пример #31
0
def genre(inp, nick='', db=None, bot=None, notice=None):
    """genre -- Displays information for specified genre
    from last.fm db. """

    api_key = bot.config.get("api_keys", {}).get("lastfm")
    if not api_key:
        return "error: no api key set"

    genretag = inp

    response = http.get_json(api_url, method="tag.search",
                             api_key=api_key, tag=genretag, limit=1)

    if 'error' in response:
        return "Error: {}.".format(response["message"])

    tagdetails = response["results"]["tagmatches"]

    
    try:
        if "url" in tagdetails["tag"]:
            link = tagdetails["tag"]["url"]
            linkshort = web.isgd(link)
            tagname = response["results"]["opensearch:Query"]["searchTerms"]
            tagname = tagname.title()
        else:
            return "Error: No such genre, check spelling."
    except TypeError:
        return "Error: No description found of this genre."

    responsesimilar = http.get_json(api_url, method="tag.getsimilar",
                                    api_key=api_key, tag=genretag)

    tagsimilar = responsesimilar["similartags"]["tag"]

    simgenstr = str(tagsimilar)
    
    if "name" in simgenstr:
        #First genre
        simgen1 = simgenstr.split("u'name': u'" ,1)[1]
        simgen2 = simgen1.split("'",1)[0]
        #Second genre
        simgen3 = simgen1.split("u'name': u'", 1)[1]
        simgen4 = simgen3.split("'",1)[0]
        #Third genre
        simgen5 = simgen3.split("u'name': u'", 1)[1]
        simgen6 = simgen5.split("'",1)[0]
        similartag = '{}, {}, {}'.format(simgen2, simgen4, simgen6)
    else: 
        return "Error: No such genre, check spelling."


    responsetop = http.get_json(api_url, method="tag.gettopartists",
                                api_key=api_key, tag=genretag)

    tagtopartist = responsetop["topartists"]["artist"]

    topartstr = str(tagtopartist)
    #First artist
    topart1 = topartstr.split("u'name': u'" ,1)[1]
    topart2 = topart1.split("'",1)[0]
    #Second artist
    topart3 = topart1.split("u'name': u'", 1)[1]
    topart4 = topart3.split("'",1)[0]
    #Third artist
    topart5 = topart3.split("u'name': u'", 1)[1]
    topart6 = topart5.split("'",1)[0]
    #Fourth artist
    topart7 = topart5.split("u'name': u'", 1)[1]
    topart8 = topart7.split("'",1)[0]
    #Fifth artist
    topart9 = topart7.split("u'name': u'", 1)[1]
    topart10 = topart9.split("'",1)[0]
    topartists = '{}, {}, {}, {}, {}'.format(topart2, topart4, topart6, topart8, topart10)


    responsedesc = http.get_json(api_url, method="tag.getInfo",
                                 api_key=api_key, tag=genretag)

    tagdesc = responsedesc["tag"]["wiki"]

    try:
        genredesc = tagdesc["summary"]
        genredesc = re.sub('<[^>]*>', '', genredesc)
        #genredesc = genredesc.split(".", 1)[0]
        genredesc = genredesc.replace("&quot;", "")
        genredesc = (genredesc[:225] + '...') if len(genredesc) > 225 else genredesc
    except TypeError:
        return "Error: No summary found for this genre, check spelling."

    out = ''

    if tagname:
        out += u'\x02{}\x0f: '.format(tagname)
    if genredesc:
        out += u'{}'.format(genredesc)
    if similartag:
        out += u' \x02Similar genres\x0f: ({})'.format(similartag)
    if topartists:
        out += u' \x02Top artists\x0f: ({})'.format(topartists)
    if linkshort:
        out += u' ({})'.format(linkshort)

    return out
Пример #32
0
def danbooru(inp, reply=None, input=None):
    "gelbooru <tags> -- Gets a random image from gelbooru.com"
    global db_lastsearch
    global danbooru_cache
    inp = inp.split(' ')
    filetype = inp[-1]
    filetypes = ['png', 'jpg', 'jpeg']
    if filetype not in filetypes:
        filetype = None
    try:
        inp.pop(inp.index(filetype))
    except ValueError:
        pass
    if len(inp) >= 2:
        inp = ' '.join(inp)
    else:
        inp = ''.join(inp)

    search = inp.lower().split()
    for i, n in enumerate(search):
        if n == u'gif':
            search[i] = 'animated_gif'
    if len(search) >= 2:
        search = ' '.join(search)
    else:
        search = ''.join(search)
    if not search in db_lastsearch or len(danbooru_cache) < 2: db_refresh_cache(search)
    db_lastsearch = search

    if len(danbooru_cache) == 0:
        reply('No results')
        return

    id, score, url, rating, tags = danbooru_cache.pop()
    if filetype:
        counter = 0
        while not url.endswith(filetype):
            try:
                if counter == 5:
                    reply('No results')
                    return
                id, score, url, rating, tags = danbooru_cache.pop()
            except IndexError:
                counter += 1
                db_refresh_cache(search)

    if rating == u'e': rating = "\x02\x034NSFW\x03\x02"
    elif rating == u'q': rating = "\x02\x037Questionable\x03\x02"
    elif rating == u's': rating = "\x02\x033Safe\x03\x02"
    url = 'http://danbooru.donmai.us' + url

    try:
        return u'\x02[{}]\x02 Score: \x02{}\x02 - Rating: {} - {}'.format(id, score, rating, web.isgd(url))
    except:
        return u'\x02[{}]\x02 Score: \x02{}\x02 - Rating: {} - {}'.format(id, score, rating, url)
Пример #33
0
def gelbooru(inp, reply=None, input=None):
    "gelbooru <tags> -- Gets a random image from gelbooru.com"
    global gb_lastsearch
    global gelbooru_cache
    inp = inp.split(' ')
    filetype = inp[-1]
    filetypes = ['png', 'jpg', 'jpeg']
    if filetype not in filetypes:
        filetype = None
    try:
        inp.pop(inp.index(filetype))
    except ValueError:
        pass
    if len(inp) >= 2:
        inp = ' '.join(inp)
    else:
        inp = ''.join(inp)

    if input.trigger == u'loli':
        search = 'loli' + '+' + inp.lower()
    elif input.trigger == u'shota':
        search = 'shota' + '+' + inp.lower()
    elif input.trigger == u'futa' or input.trigger == u'futanari':
        search = 'futanari' + '+' + inp.lower()
    elif input.trigger == u'trap':
        search = 'trap' + '+' + inp.lower()
    else:
        search = inp.lower()
    search = search.split(' ')
    for i, n in enumerate(search):
        if n == u'gif':
            search[i] = 'animated_gif'
    if len(search) >= 2:
        search = ' '.join(search)
    else:
        search = ''.join(search)
    if not search in gb_lastsearch or len(gelbooru_cache) < 2: gb_refresh_cache(search)
    gb_lastsearch = search

    if len(gelbooru_cache) == 0:
        reply('No results')
        return

    id, score, url, rating, tags = gelbooru_cache.pop()
    if filetype:
        counter = 0
        while not url.endswith(filetype):
            try:
                if counter == 5:
                    reply('No results')
                    return
                id, score, url, rating, tags = gelbooru_cache.pop()
            except IndexError:
                counter += 1
                gb_refresh_cache(search)

    if rating is 'e': rating = "\x02\x034NSFW\x03\x02"
    elif rating is 'q': rating = "\x02\x037Questionable\x03\x02"
    elif rating is 's': rating = "\x02\x033Safe\x03\x02"

    try:
        return u'\x02[{}]\x02 Score: \x02{}\x02 - Rating: {} - {}'.format(id, score, rating, web.isgd(url))
    except:
        return u'\x02[{}]\x02 Score: \x02{}\x02 - Rating: {} - {}'.format(id, score, rating, url)
Пример #34
0
def genre(inp, nick='', db=None, bot=None, notice=None):
    """genre -- Displays information for specified genre
    from last.fm db. """

    api_key = bot.config.get("api_keys", {}).get("lastfm")
    if not api_key:
        return "error: no api key set"

    genretag = inp

    response = http.get_json(api_url,
                             method="tag.search",
                             api_key=api_key,
                             tag=genretag,
                             limit=1)

    if 'error' in response:
        return "Error: {}.".format(response["message"])

    tagdetails = response["results"]["tagmatches"]

    try:
        if "url" in tagdetails["tag"]:
            link = tagdetails["tag"]["url"]
            linkshort = web.isgd(link)
            tagname = response["results"]["opensearch:Query"]["searchTerms"]
            tagname = tagname.title()
        else:
            return "Error: No such genre, check spelling."
    except TypeError:
        return "Error: No description found of this genre."

    responsesimilar = http.get_json(api_url,
                                    method="tag.getsimilar",
                                    api_key=api_key,
                                    tag=genretag)

    tagsimilar = responsesimilar["similartags"]["tag"]

    simgenstr = str(tagsimilar)

    if "name" in simgenstr:
        #First genre
        simgen1 = simgenstr.split("u'name': u'", 1)[1]
        simgen2 = simgen1.split("'", 1)[0]
        #Second genre
        simgen3 = simgen1.split("u'name': u'", 1)[1]
        simgen4 = simgen3.split("'", 1)[0]
        #Third genre
        simgen5 = simgen3.split("u'name': u'", 1)[1]
        simgen6 = simgen5.split("'", 1)[0]
        similartag = '{}, {}, {}'.format(simgen2, simgen4, simgen6)
    else:
        return "Error: No such genre, check spelling."

    responsetop = http.get_json(api_url,
                                method="tag.gettopartists",
                                api_key=api_key,
                                tag=genretag)

    tagtopartist = responsetop["topartists"]["artist"]

    topartstr = str(tagtopartist)
    #First artist
    topart1 = topartstr.split("u'name': u'", 1)[1]
    topart2 = topart1.split("'", 1)[0]
    #Second artist
    topart3 = topart1.split("u'name': u'", 1)[1]
    topart4 = topart3.split("'", 1)[0]
    #Third artist
    topart5 = topart3.split("u'name': u'", 1)[1]
    topart6 = topart5.split("'", 1)[0]
    #Fourth artist
    topart7 = topart5.split("u'name': u'", 1)[1]
    topart8 = topart7.split("'", 1)[0]
    #Fifth artist
    topart9 = topart7.split("u'name': u'", 1)[1]
    topart10 = topart9.split("'", 1)[0]
    topartists = '{}, {}, {}, {}, {}'.format(topart2, topart4, topart6,
                                             topart8, topart10)

    responsedesc = http.get_json(api_url,
                                 method="tag.getInfo",
                                 api_key=api_key,
                                 tag=genretag)

    tagdesc = responsedesc["tag"]["wiki"]

    try:
        genredesc = tagdesc["summary"]
        genredesc = re.sub('<[^>]*>', '', genredesc)
        #genredesc = genredesc.split(".", 1)[0]
        genredesc = genredesc.replace("&quot;", "")
        genredesc = (genredesc[:225] +
                     '...') if len(genredesc) > 225 else genredesc
    except TypeError:
        return "Error: No summary found for this genre, check spelling."

    out = ''

    if tagname:
        out += u'\x02{}\x0f: '.format(tagname)
    if genredesc:
        out += u'{}'.format(genredesc)
    if similartag:
        out += u' \x02Similar genres\x0f: ({})'.format(similartag)
    if topartists:
        out += u' \x02Top artists\x0f: ({})'.format(topartists)
    if linkshort:
        out += u' ({})'.format(linkshort)

    return out
Пример #35
0
def weather(inp, reply=None, db=None, nick=None, bot=None, notice=None):
    """weather <location> [dontsave] -- Gets weather data
    for <location> from Wunderground."""

    api_key = bot.config.get("api_keys", {}).get("wunderground")

    if not api_key:
        return "Error: No wunderground API details."

    # initialise weather DB
    db.execute("create table if not exists weather(nick primary key, loc)")

    # if there is no input, try getting the users last location from the DB
    if not inp:
        location = db.execute("select loc from weather where nick=lower(:nick)",
                              {"nick": nick}).fetchone()
        print(location)
        if not location:
            # no location saved in the database, send the user help text
            notice(weather.__doc__)
            return
        loc = location[0]

        # no need to save a location, we already have it
        dontsave = True
    else:
        # see if the input ends with "dontsave"
        dontsave = inp.endswith(" dontsave")

        # remove "dontsave" from the input string after checking for it
        if dontsave:
            loc = inp[:-9].strip().lower()
        else:
            loc = inp

    location = http.quote_plus(loc)

    request_url = base_url.format(api_key, "geolookup/forecast/conditions", location)
    response = http.get_json(request_url)

    if 'location' not in response:
        try:
            location_id = response['response']['results'][0]['zmw']
        except KeyError:
            return "Could not get weather for that location."

        # get the weather again, using the closest match
        request_url = base_url.format(api_key, "geolookup/forecast/conditions", "zmw:" + location_id)
        response = http.get_json(request_url)

    if response['location']['state']:
        place_name = "\x02{}\x02, \x02{}\x02 (\x02{}\x02)".format(response['location']['city'],
                                                                  response['location']['state'],
                                                                  response['location']['country'])
    else:
        place_name = "\x02{}\x02 (\x02{}\x02)".format(response['location']['city'],
                                                      response['location']['country'])

    forecast_today = response["forecast"]["simpleforecast"]["forecastday"][0]
    forecast_tomorrow = response["forecast"]["simpleforecast"]["forecastday"][1]

    # put all the stuff we want to use in a dictionary for easy formatting of the output
    weather_data = {
        "place": place_name,
        "conditions": response['current_observation']['weather'],
        "temp_f": response['current_observation']['temp_f'],
        "temp_c": response['current_observation']['temp_c'],
        "humidity": response['current_observation']['relative_humidity'],
        "wind_kph": response['current_observation']['wind_kph'],
        "wind_mph": response['current_observation']['wind_mph'],
        "wind_direction": response['current_observation']['wind_dir'],
        "today_conditions": forecast_today['conditions'],
        "today_high_f": forecast_today['high']['fahrenheit'],
        "today_high_c": forecast_today['high']['celsius'],
        "today_low_f": forecast_today['low']['fahrenheit'],
        "today_low_c": forecast_today['low']['celsius'],
        "tomorrow_conditions": forecast_tomorrow['conditions'],
        "tomorrow_high_f": forecast_tomorrow['high']['fahrenheit'],
        "tomorrow_high_c": forecast_tomorrow['high']['celsius'],
        "tomorrow_low_f": forecast_tomorrow['low']['fahrenheit'],
        "tomorrow_low_c": forecast_tomorrow['low']['celsius'],
        "url": web.isgd(response["current_observation"]['forecast_url'] + "?apiref=e535207ff4757b18")
    }

    reply("{place} - \x02Current:\x02 {conditions}, {temp_f}F/{temp_c}C, {humidity}, "
          "Wind: {wind_kph}KPH/{wind_mph}MPH {wind_direction}, \x02Today:\x02 {today_conditions}, "
          "High: {today_high_f}F/{today_high_c}C, Low: {today_low_f}F/{today_low_c}C. "
          "\x02Tomorrow:\x02 {tomorrow_conditions}, High: {tomorrow_high_f}F/{tomorrow_high_c}C, "
          "Low: {tomorrow_low_f}F/{tomorrow_low_c}C - {url}".format(**weather_data))

    if location and not dontsave:
        db.execute("insert or replace into weather(nick, loc) values (:nick, :loc)",
                   {"nick": nick.lower(), "loc": loc})
        db.commit()
Пример #36
0
def steam(inp):
    """steam [search] - Search for specified game/trailer/DLC"""
    page = http.get("http://store.steampowered.com/search/?term=" + inp)
    soup = BeautifulSoup(page, 'lxml', from_encoding="utf-8")
    result = soup.find('a', {'class': 'search_result_row'})
    return get_steam_info(result['href']) + " - " + web.isgd(result['href'])
Пример #37
0
def profile(inp, nick='', db=None, bot=None, notice=None):
    """profile -- Displays information for selected profile
    from last.fm db. """

    api_key = bot.config.get("api_keys", {}).get("lastfm")
    if not api_key:
        return "error: no api key set"


    fetchprof = db.execute("select acc from lastfm where nick=lower(?)",
                       (inp,)).fetchone()

    fetchprof = fetchprof[0] if fetchprof else inp
    

    response = http.get_json(api_url, method="user.getinfo",
                             api_key=api_key, user=fetchprof)

    if 'error' in response:
        return "Error: {}.".format(response["message"])

    userprof = response["user"]


    username = userprof["name"]
    userreal = userprof["realname"]
    userreal = userreal.strip(' ')
    usercntry = userprof["country"]
    userage = userprof["age"]
    userplays = userprof["playcount"]
    usergendr = userprof["gender"]
    usertime = userprof["registered"]["#text"]
    userurl = userprof["url"]
    urlshort = web.isgd(userurl)


    #If name not given, outputs 'Unknown'
    if userreal == '':
        userreal = "Unknown"

    #Converts country code to word
    if str(usercntry) == "NZ":
        usercntry = "New Zealand"
    elif str(usercntry) == "CA":
        usercntry = "Canada"
    elif str(usercntry) == "US":
        usercntry = "the United States"
    elif str(usercntry) == "UK":
        usercntry = "the United Kingdom"
    elif str(usercntry) == "AU":
        usercntry = "Australia"

    #Converts gender to word
    if str(usergendr) == "m":
        usergendr = "man"
    elif str(usergendr) == "f":
        usergendr = "female"
    else:
        usergendr = "unknown gender"

    #Account age
    date_method = '%Y-%m-%d %H:%M'
    regstrdate = datetime.strptime(usertime, date_method) 
    todaysdate = datetime.now()
    ##todaysdate = todaysdate.strftime(date_method)
    accage = todaysdate - regstrdate
    accage = accage.days
    accageyrs = int(accage / 365.25)
    accagemnths = int(accage / 30.33)
    if accagemnths > 12:
        accagemnths = int(accagemnths % 12)
    accage = int(accage % 30.33)
        
        

    out = ''
        
    if username:
        out += u'\x02Last.fm profile for {}:\x0f'.format(username)
    if userreal:
        out += u' {}'.format(userreal)
    if usergendr:
        out += u' is a {}'.format(usergendr)
    if usercntry:
        out += u' from {}'.format(usercntry)
    if userage:
        out += u' aged {}'.format(userage)
    if userplays:
        out += u' who has played {} songs'.format(userplays)
    if accageyrs > 1:
        out += u' in {} years and {} months.'.format(accageyrs, accagemnths)
    else:
        out += u' in {} months and {} days.'.format(accagemnths, accage)
    if urlshort:
        out += u' ({}).'.format(urlshort)

    return out
Пример #38
0
def steam(inp):
    """steam [search] - Search for specified game/trailer/DLC"""
    page = http.get("http://store.steampowered.com/search/?term=" + inp)
    soup = BeautifulSoup(page, 'lxml', from_encoding="utf-8")
    result = soup.find('a', {'class': 'search_result_row'})
    return get_steam_info(result['href']) + " - " + web.isgd(result['href'])
Пример #39
0
def imgur(inp):
    """imgur [subreddit] -- Gets the first page of imgur images from [subreddit] and returns a link to them.
     If [subreddit] is undefined, return any imgur images"""
    if inp:
        # see if the input ends with "nsfw"
        show_nsfw = inp.endswith(" nsfw")

        # remove "nsfw" from the input string after checking for it
        if show_nsfw:
            inp = inp[:-5].strip().lower()

        url = base_url.format(inp.strip())
    else:
        url = "http://www.reddit.com/domain/imgur.com/.json"
        show_nsfw = False

    try:
        data = http.get_json(url, user_agent=http.ua_chrome)
    except Exception as e:
        return "Error: " + str(e)

    data = data["data"]["children"]
    random.shuffle(data)

    # filter list to only have imgur links
    filtered_posts = [i["data"] for i in data if is_valid(i["data"])]

    if not filtered_posts:
        return "No images found."

    items = []

    headers = {
        "Authorization": "Client-ID b5d127e6941b07a"
    }

    # loop over the list of posts
    for post in filtered_posts:
        if post["over_18"] and not show_nsfw:
            continue

        match = imgur_re.search(post["url"])
        if match.group(1) == 'a/':
            # post is an album
            url = album_api.format(match.group(2))
            images = http.get_json(url, headers=headers)["data"]

            # loop over the images in the album and add to the list
            for image in images:
                items.append(image["id"])

        elif match.group(2) is not None:
            # post is an image
            items.append(match.group(2))

    if not items:
        return "No images found (use .imgur <subreddit> nsfw to show explicit content)"

    if show_nsfw:
        return "{} \x02NSFW\x02".format(web.isgd("http://imgur.com/" + ','.join(items)))
    else:
        return web.isgd("http://imgur.com/" + ','.join(items))
Пример #40
0
def gelbooru(inp, reply=None, input=None):
    "gelbooru <tags> -- Gets a random image from gelbooru.com"
    global gb_lastsearch
    global gelbooru_cache
    inp = inp.split(' ')
    filetype = inp[-1]
    filetypes = ['png', 'jpg', 'jpeg']
    if filetype not in filetypes:
        filetype = None
    try:
        inp.pop(inp.index(filetype))
    except ValueError:
        pass
    if len(inp) >= 2:
        inp = ' '.join(inp)
    else:
        inp = ''.join(inp)

    if input.trigger == u'loli':
        search = 'loli' + '+' + inp.lower()
    elif input.trigger == u'shota':
        search = 'shota' + '+' + inp.lower()
    elif input.trigger == u'futa' or input.trigger == u'futanari':
        search = 'futanari' + '+' + inp.lower()
    elif input.trigger == u'trap':
        search = 'trap' + '+' + inp.lower()
    else:
        search = inp.lower()
    search = search.split(' ')
    for i, n in enumerate(search):
        if n == u'gif':
            search[i] = 'animated_gif'
    if len(search) >= 2:
        search = ' '.join(search)
    else:
        search = ''.join(search)
    if not search in gb_lastsearch or len(gelbooru_cache) < 2:
        gb_refresh_cache(search)
    gb_lastsearch = search

    if len(gelbooru_cache) == 0:
        reply('No results')
        return

    id, score, url, rating, tags = gelbooru_cache.pop()
    if filetype:
        counter = 0
        while not url.endswith(filetype):
            try:
                if counter == 5:
                    reply('No results')
                    return
                id, score, url, rating, tags = gelbooru_cache.pop()
            except IndexError:
                counter += 1
                gb_refresh_cache(search)

    if rating == 'e':
        rating = "\x02\x034NSFW\x03\x02"
    elif rating == 'q':
        rating = "\x02\x037Questionable\x03\x02"
    elif rating == 's':
        rating = "\x02\x033Safe\x03\x02"

    try:
        return u'\x02[{}]\x02 Score: \x02{}\x02 - Rating: {} - {}'.format(
            id, score, rating, web.isgd(url))
    except:
        return u'\x02[{}]\x02 Score: \x02{}\x02 - Rating: {} - {}'.format(
            id, score, rating, url)
Пример #41
0
def lastfm(inp, nick='', db=None, bot=None, notice=None):
    """lastfm [user] [dontsave] -- Displays the now playing (or last played)
     track of LastFM user [user]."""
    api_key = bot.config.get("api_keys", {}).get("lastfm")
    if not api_key:
        return "error: no api key set"

    # check if the user asked us not to save his details
    dontsave = inp.endswith(" dontsave")
    if dontsave:
        user = inp[:-9].strip().lower()
    else:
        user = inp

    db.execute("create table if not exists lastfm(nick primary key, acc)")

    if not user:
        user = db.execute("select acc from lastfm where nick=lower(?)",
                          (nick,)).fetchone()
        if not user:
            notice(lastfm.__doc__)
            return
        user = user[0]

    response = http.get_json(api_url, method="user.getrecenttracks",
                             api_key=api_key, user=user, limit=1)

    if 'error' in response:
        return "Error: {}.".format(response["message"])

    if not "track" in response["recenttracks"] or len(response["recenttracks"]["track"]) == 0:
        return 'No recent tracks for user "{}" found.'.format(user)

    tracks = response["recenttracks"]["track"]

    if type(tracks) == list:
        # if the user is listening to something, the tracks entry is a list
        # the first item is the current track
        track = tracks[0]
        status = u'is listening to'
        ending = '.'
    elif type(tracks) == dict:
        # otherwise, they aren't listening to anything right now, and
        # the tracks entry is a dict representing the most recent track
        track = tracks
        status = u'last listened to'
        # lets see how long ago they listened to it
        time_listened = datetime.fromtimestamp(int(track["date"]["uts"]))
        time_since = timesince.timesince(time_listened)
        ending = u' ({} ago)'.format(time_since)

    else:
        return "error: could not parse track listing"

    title = track["name"]
    album = track["album"]["#text"]
    artist = track["artist"]["#text"]
    link = track["url"]

    try:
        link = web.isgd(link)
    except:
        print "Error shortening link"

    title2 = unicode(title)
    artist2 = unicode(artist)

    response2 = http.get_json(api_url, method="track.getinfo",
                              api_key=api_key,track=title2, artist=artist2, username=user,  autocorrect=1)

    trackdetails = response2["track"]
    
    if type(trackdetails) == list:
        track2 = trackdetails[0]
    elif type(trackdetails) == dict:
        track2 = trackdetails

    if "userplaycount" in trackdetails:
        playcounts = trackdetails["userplaycount"]
    else:
        playcounts = 0

    toptags = http.get_json(api_url, method="artist.gettoptags",
                              api_key=api_key, artist=artist)
    genreList = []
    genres = "("

    if "tag" in toptags["toptags"]:
        for(i, tag) in enumerate(toptags["toptags"]["tag"]):
            genreList.append(tag["name"])
            if(i == 2):
                break
        for singleGenre in genreList:
            if(singleGenre == genreList[-1]):
                genres += u"{}".format(singleGenre)
                genres += ")"
            else:
		genres += u"{}, ".format(singleGenre)
    else:
        genres = "(No tags)"

    length1 = track2["duration"]
    lengthsec = float(length1) / 1000
    length = time.strftime('%M:%S', time.gmtime(lengthsec))
    length = length.lstrip("0")

    out = u'{} {} "{}"'.format(user, status, title)
    
    if artist:
        out += u' by \x02{}\x0f'.format(artist)
    if album:
        out += u' from the album \x02{}\x0f'.format(album)
    if length:
        out += u' [{}]'.format(length)
    if playcounts:
        out += u' [plays: {}]'.format(playcounts)
    if playcounts == 0:
        out += u' [plays: {}]'.format(playcounts)
    if genres:
        out += u' {}'.format(genres)

    # append ending based on what type it was
    out += ending

    if inp and not dontsave:
        db.execute("insert or replace into lastfm(nick, acc) values (?,?)",
                     (nick.lower(), user))
        db.commit()

    return out
Пример #42
0
def lastfm(inp, nick='', db=None, bot=None, notice=None):
    """lastfm [user] [dontsave] -- Displays the now playing (or last played) track of LastFM user [user]. Other commands are: .gi .genre .profile .top .b .compare"""
    api_key = bot.config.get("api_keys", {}).get("lastfm")
    if not api_key:
        return "error: no api key set"

    # check if the user asked us not to save his details
    dontsave = inp.endswith(" dontsave")
    if dontsave:
        user = inp[:-9].strip().lower()
    else:
        user = inp

    db.execute("create table if not exists lastfm(nick primary key, acc)")

    if not user:
        user = db.execute("select acc from lastfm where nick=lower(?)",
                          (nick,)).fetchone()
        if not user:
            notice(lastfm.__doc__)
            return
        user = user[0]

    response = http.get_json(api_url, method="user.getrecenttracks",
                             api_key=api_key, user=user, limit=1)

    

    if 'error' in response:
        return "Error: {}.".format(response["message"])

    if not "track" in response["recenttracks"] or len(response["recenttracks"]["track"]) == 0:
        return 'No recent tracks for user "{}" found.'.format(user)

    tracks = response["recenttracks"]["track"]

    if type(tracks) == list:
        # if the user is listening to something, the tracks entry is a list
        # the first item is the current track
        track = tracks[0]
        status = u'is listening to'
        ending = '.'
    elif type(tracks) == dict:
        # otherwise, they aren't listening to anything right now, and
        # the tracks entry is a dict representing the most recent track
        track = tracks
        status = u'last listened to'
        # lets see how long ago they listened to it
        time_listened = datetime.fromtimestamp(int(track["date"]["uts"]))
        time_since = timesince.timesince(time_listened)
        ending = u' ({} ago)'.format(time_since)

    else:
        return "error: could not parse track listing"

    title = track["name"]
    album = track["album"]["#text"]
    artist = track["artist"]["#text"]
    link = track["url"]
    linkshort = web.isgd(link)

    title2 = unicode(title)
    artist2 = unicode(artist)

    response2 = http.get_json(api_url, method="track.getinfo",
                              api_key=api_key,track=title2, artist=artist2, username=user,  autocorrect=1)

    trackdetails = response2["track"]
    
    if type(trackdetails) == list:
        track2 = trackdetails[0]
    elif type(trackdetails) == dict:
        track2 = trackdetails

    if "userplaycount" in trackdetails:
        playcounts = trackdetails["userplaycount"]
    else:
        playcounts = 0


    if "tag" in track2["toptags"]:
        genres1 = track2["toptags"]["tag"]
        genresstr = str(genres1)
        #First genre
        genres3 = genresstr.split("u'name': u'" ,1)[1]
        genres4 = genres3.split("'",1)[0]
        genres = genres4
        
    else:
        genres = "(No tags found)"
    try:
        #Second genre
        genres5 = genres3.split("u'name': u'", 1)[1]
        genres6 = genres5.split("'",1)[0]
        genres = genres4, genres6
        genres = str(genres)
        genres = genres.replace("'", "")
    except UnboundLocalError:
        genres = "(No tags found)"
    except IndexError:
        genres = '({})'.format(genres)
        
    try:
        #Third genre
        genres7 = genres5.split("u'name': u'", 1)[1]
        genres8 = genres7.split("'",1)[0]
        genres = genres4, genres6, genres8
        genres = str(genres)
        genres = genres.replace("'", "")
    except UnboundLocalError:
        genres = '{}'.format(genres)
    except IndexError:
        genres = '{}'.format(genres)
    

        
    length1 = track2["duration"]
    lengthsec = float(length1) / 1000
    length = time.strftime('%H:%M:%S', time.gmtime(lengthsec))
    length = length.lstrip("0:")
    ##length = length.split(":",1)[1]
    if len(length) == 2:
        length = '0:' + length
    elif len(length) == 1:
        length = '0:0' + length
    

    
    
    out = u'{} {} "{}"'.format(user, status, title)
    
    if artist:
        out += u' by \x02{}\x0f'.format(artist)
    if album:
        out += u' from the album \x02{}\x0f'.format(album)
    if length:
        out += u' [{}]'.format(length)
    if playcounts:
        out += u' [plays: {}]'.format(playcounts)
    if playcounts == 0:
        out += u' [plays: {}]'.format(playcounts)
    if genres:
        out += u' {}'.format(genres)
    if linkshort:
        out += u' ({})'.format(linkshort)

    # append ending based on what type it was
    out += ending

    if inp and not dontsave:
        db.execute("insert or replace into lastfm(nick, acc) values (?,?)",
                     (nick.lower(), user))
        db.commit()

    return out
Пример #43
0
def lastfm(inp, nick='', db=None, bot=None, notice=None):
    """lastfm [user] [dontsave] -- Displays the now playing (or last played) track of LastFM user [user]. Other commands are: .gi .genre .profile .top .b .compare"""
    api_key = bot.config.get("api_keys", {}).get("lastfm")
    if not api_key:
        return "error: no api key set"

    # check if the user asked us not to save his details
    dontsave = inp.endswith(" dontsave")
    if dontsave:
        user = inp[:-9].strip().lower()
    else:
        user = inp

    db.execute("create table if not exists lastfm(nick primary key, acc)")

    if not user:
        user = db.execute("select acc from lastfm where nick=lower(?)",
                          (nick, )).fetchone()
        if not user:
            notice(lastfm.__doc__)
            return
        user = user[0]

    response = http.get_json(api_url,
                             method="user.getrecenttracks",
                             api_key=api_key,
                             user=user,
                             limit=1)

    if 'error' in response:
        return "Error: {}.".format(response["message"])

    if not "track" in response["recenttracks"] or len(
            response["recenttracks"]["track"]) == 0:
        return 'No recent tracks for user "{}" found.'.format(user)

    tracks = response["recenttracks"]["track"]

    if type(tracks) == list:
        # if the user is listening to something, the tracks entry is a list
        # the first item is the current track
        track = tracks[0]
        status = u'is listening to'
        ending = '.'
    elif type(tracks) == dict:
        # otherwise, they aren't listening to anything right now, and
        # the tracks entry is a dict representing the most recent track
        track = tracks
        status = u'last listened to'
        # lets see how long ago they listened to it
        time_listened = datetime.fromtimestamp(int(track["date"]["uts"]))
        time_since = timesince.timesince(time_listened)
        ending = u' ({} ago)'.format(time_since)

    else:
        return "error: could not parse track listing"

    title = track["name"]
    album = track["album"]["#text"]
    artist = track["artist"]["#text"]
    link = track["url"]
    linkshort = web.isgd(link)

    title2 = unicode(title)
    artist2 = unicode(artist)

    response2 = http.get_json(api_url,
                              method="track.getinfo",
                              api_key=api_key,
                              track=title2,
                              artist=artist2,
                              username=user,
                              autocorrect=1)

    trackdetails = response2["track"]

    if type(trackdetails) == list:
        track2 = trackdetails[0]
    elif type(trackdetails) == dict:
        track2 = trackdetails

    if "userplaycount" in trackdetails:
        playcounts = trackdetails["userplaycount"]
    else:
        playcounts = 0

    if "tag" in track2["toptags"]:
        genres1 = track2["toptags"]["tag"]
        genresstr = str(genres1)
        #First genre
        genres3 = genresstr.split("u'name': u'", 1)[1]
        genres4 = genres3.split("'", 1)[0]
        genres = genres4

    else:
        genres = "(No tags found)"
    try:
        #Second genre
        genres5 = genres3.split("u'name': u'", 1)[1]
        genres6 = genres5.split("'", 1)[0]
        genres = genres4, genres6
        genres = str(genres)
        genres = genres.replace("'", "")
    except UnboundLocalError:
        genres = "(No tags found)"
    except IndexError:
        genres = '({})'.format(genres)

    try:
        #Third genre
        genres7 = genres5.split("u'name': u'", 1)[1]
        genres8 = genres7.split("'", 1)[0]
        genres = genres4, genres6, genres8
        genres = str(genres)
        genres = genres.replace("'", "")
    except UnboundLocalError:
        genres = '{}'.format(genres)
    except IndexError:
        genres = '{}'.format(genres)

    length1 = track2["duration"]
    lengthsec = float(length1) / 1000
    length = time.strftime('%H:%M:%S', time.gmtime(lengthsec))
    length = length.lstrip("0:")
    ##length = length.split(":",1)[1]
    if len(length) == 2:
        length = '0:' + length
    elif len(length) == 1:
        length = '0:0' + length

    out = u'{} {} "{}"'.format(user, status, title)

    if artist:
        out += u' by \x02{}\x0f'.format(artist)
    if album:
        out += u' from the album \x02{}\x0f'.format(album)
    if length:
        out += u' [{}]'.format(length)
    if playcounts:
        out += u' [plays: {}]'.format(playcounts)
    if playcounts == 0:
        out += u' [plays: {}]'.format(playcounts)
    if genres:
        out += u' {}'.format(genres)
    if linkshort:
        out += u' ({})'.format(linkshort)

    # append ending based on what type it was
    out += ending

    if inp and not dontsave:
        db.execute("insert or replace into lastfm(nick, acc) values (?,?)",
                   (nick.lower(), user))
        db.commit()

    return out
Пример #44
0
def profile(inp, nick='', db=None, bot=None, notice=None):
    """profile -- Displays information for selected profile
    from last.fm db. """

    api_key = bot.config.get("api_keys", {}).get("lastfm")
    if not api_key:
        return "error: no api key set"

    fetchprof = db.execute("select acc from lastfm where nick=lower(?)",
                           (inp, )).fetchone()

    fetchprof = fetchprof[0] if fetchprof else inp

    response = http.get_json(api_url,
                             method="user.getinfo",
                             api_key=api_key,
                             user=fetchprof)

    if 'error' in response:
        return "Error: {}.".format(response["message"])

    userprof = response["user"]

    username = userprof["name"]
    userreal = userprof["realname"]
    userreal = userreal.strip(' ')
    usercntry = userprof["country"]
    userage = userprof["age"]
    userplays = userprof["playcount"]
    usergendr = userprof["gender"]
    usertime = userprof["registered"]["#text"]
    userurl = userprof["url"]
    urlshort = web.isgd(userurl)

    #If name not given, outputs 'Unknown'
    if userreal == '':
        userreal = "Unknown"

    #Converts country code to word
    if str(usercntry) == "NZ":
        usercntry = "New Zealand"
    elif str(usercntry) == "CA":
        usercntry = "Canada"
    elif str(usercntry) == "US":
        usercntry = "the United States"
    elif str(usercntry) == "UK":
        usercntry = "the United Kingdom"
    elif str(usercntry) == "AU":
        usercntry = "Australia"

    #Converts gender to word
    if str(usergendr) == "m":
        usergendr = "man"
    elif str(usergendr) == "f":
        usergendr = "female"
    else:
        usergendr = "unknown gender"

    #Account age
    date_method = '%Y-%m-%d %H:%M'
    regstrdate = datetime.strptime(usertime, date_method)
    todaysdate = datetime.now()
    ##todaysdate = todaysdate.strftime(date_method)
    accage = todaysdate - regstrdate
    accage = accage.days
    accageyrs = int(accage / 365.25)
    accagemnths = int(accage / 30.33)
    if accagemnths > 12:
        accagemnths = int(accagemnths % 12)
    accage = int(accage % 30.33)

    out = ''

    if username:
        out += u'\x02Last.fm profile for {}:\x0f'.format(username)
    if userreal:
        out += u' {}'.format(userreal)
    if usergendr:
        out += u' is a {}'.format(usergendr)
    if usercntry:
        out += u' from {}'.format(usercntry)
    if userage:
        out += u' aged {}'.format(userage)
    if userplays:
        out += u' who has played {} songs'.format(userplays)
    if accageyrs > 1:
        out += u' in {} years and {} months.'.format(accageyrs, accagemnths)
    else:
        out += u' in {} months and {} days.'.format(accagemnths, accage)
    if urlshort:
        out += u' ({}).'.format(urlshort)

    return out
Пример #45
0
def weather(inp, reply=None, db=None, nick=None, bot=None, notice=None):
    """weather <location> [dontsave] -- Gets weather data
    for <location> from Wunderground."""

    api_key = bot.config.get("api_keys", {}).get("wunderground")

    if not api_key:
        return "Error: No wunderground API details."

    # initialise weather DB
    db.execute("create table if not exists weather(nick primary key, loc)")

    # if there is no input, try getting the users last location from the DB
    if not inp:
        location = db.execute(
            "select loc from weather where nick=lower(:nick)", {
                "nick": nick
            }).fetchone()
        print(location)
        if not location:
            # no location saved in the database, send the user help text
            notice(weather.__doc__)
            return
        loc = location[0]

        # no need to save a location, we already have it
        dontsave = True
    else:
        # see if the input ends with "dontsave"
        dontsave = inp.endswith(" dontsave")

        # remove "dontsave" from the input string after checking for it
        if dontsave:
            loc = inp[:-9].strip().lower()
        else:
            loc = inp

    location = http.quote_plus(loc)

    request_url = base_url.format(api_key, "geolookup/forecast/conditions",
                                  location)
    response = http.get_json(request_url)

    if 'location' not in response:
        try:
            location_id = response['response']['results'][0]['zmw']
        except KeyError:
            return "Could not get weather for that location."

        # get the weather again, using the closest match
        request_url = base_url.format(api_key, "geolookup/forecast/conditions",
                                      "zmw:" + location_id)
        response = http.get_json(request_url)

    if response['location']['state']:
        place_name = "\x02{}\x02, \x02{}\x02 (\x02{}\x02)".format(
            response['location']['city'], response['location']['state'],
            response['location']['country'])
    else:
        place_name = "\x02{}\x02 (\x02{}\x02)".format(
            response['location']['city'], response['location']['country'])

    forecast_today = response["forecast"]["simpleforecast"]["forecastday"][0]
    forecast_tomorrow = response["forecast"]["simpleforecast"]["forecastday"][
        1]

    # put all the stuff we want to use in a dictionary for easy formatting of the output
    weather_data = {
        "place":
        place_name,
        "conditions":
        response['current_observation']['weather'],
        "temp_f":
        response['current_observation']['temp_f'],
        "temp_c":
        response['current_observation']['temp_c'],
        "humidity":
        response['current_observation']['relative_humidity'],
        "wind_kph":
        response['current_observation']['wind_kph'],
        "wind_mph":
        response['current_observation']['wind_mph'],
        "wind_direction":
        response['current_observation']['wind_dir'],
        "today_conditions":
        forecast_today['conditions'],
        "today_high_f":
        forecast_today['high']['fahrenheit'],
        "today_high_c":
        forecast_today['high']['celsius'],
        "today_low_f":
        forecast_today['low']['fahrenheit'],
        "today_low_c":
        forecast_today['low']['celsius'],
        "tomorrow_conditions":
        forecast_tomorrow['conditions'],
        "tomorrow_high_f":
        forecast_tomorrow['high']['fahrenheit'],
        "tomorrow_high_c":
        forecast_tomorrow['high']['celsius'],
        "tomorrow_low_f":
        forecast_tomorrow['low']['fahrenheit'],
        "tomorrow_low_c":
        forecast_tomorrow['low']['celsius'],
        "url":
        web.isgd(response["current_observation"]['forecast_url'] +
                 "?apiref=e535207ff4757b18")
    }

    reply(
        "{place} - \x02Current:\x02 {conditions}, {temp_f}F/{temp_c}C, {humidity}, "
        "Wind: {wind_kph}KPH/{wind_mph}MPH {wind_direction}, \x02Today:\x02 {today_conditions}, "
        "High: {today_high_f}F/{today_high_c}C, Low: {today_low_f}F/{today_low_c}C. "
        "\x02Tomorrow:\x02 {tomorrow_conditions}, High: {tomorrow_high_f}F/{tomorrow_high_c}C, "
        "Low: {tomorrow_low_f}F/{tomorrow_low_c}C - {url}".format(
            **weather_data))

    if location and not dontsave:
        db.execute(
            "insert or replace into weather(nick, loc) values (:nick, :loc)", {
                "nick": nick.lower(),
                "loc": loc
            })
        db.commit()