Exemplo n.º 1
0
def animetake(inp):
    "anime [list|get] <anime name> - Searches Animetake for the latest updates"
    error = u'not so lucky today..'
    try:
        inp_array = inp.split(' ')
        command = inp_array[0]
        query = inp_array[1]
    except:
        pass

    url = "http://www.animetake.com/"  #% (urllib.quote_plus(query))
    anime_updates = []
    response = ""

    soup = http.get_soup(url)
    page = soup.find('div', id='mainContent').ul

    for li in page.findAll('li'):
        anime_link = li.find('div', 'updateinfo').h4.a
        anime_updates.append('%s : %s' %
                             (anime_link['title'], anime_link['href']))

    if command == 'list':
        count = 1
        response = "Latest Anime Updates: "
        for anime_title in anime_updates:
            response += ("%s | " % (anime_title.split(' : ')[0]))
            count += 1
            if count == 11:
                break
    elif command == 'get':
        indices = [i for i, x in enumerate(anime_updates) if query in x]
        for index in indices:
            response += ("%s " % (anime_updates[index]))
    return response
Exemplo n.º 2
0
def animetake(inp):
    "animetake <list> | <get [query]> - searches animetake for the latest updates"
    error = u"not so lucky today.."
    try:
        inp_array = inp.split(" ")
        command = inp_array[0]
        query = inp_array[1]
    except:
        pass

    url = "http://www.animetake.com/"  #% (urllib.quote_plus(query))
    anime_updates = []
    response = ""

    soup = http.get_soup(url)
    page = soup.find("div", id="mainContent").ul

    for li in page.findAll("li"):
        anime_link = li.find("div", "updateinfo").h4.a
        anime_updates.append("%s : %s" % (anime_link["title"], anime_link["href"]))

    if command == "list":
        count = 1
        response = "Latest Anime Updates: "
        for anime_title in anime_updates:
            response += "%s | " % (anime_title.split(" : ")[0])
            count += 1
            if count == 11:
                break
    elif command == "get":
        indices = [i for i, x in enumerate(anime_updates) if query in x]
        for index in indices:
            response += "%s " % (anime_updates[index])
    return response
Exemplo n.º 3
0
def fourchan_url(match):
    soup = http.get_soup(match)
    title = soup.title.renderContents().strip()
    post = soup.find('div', {'class': 'opContainer'})
    comment = post.find('blockquote', {'class': 'postMessage'})
    author = post.find_all('span', {'class': 'nameBlock'})[1]
    return http.process_text('\x02%s\x02 - posted by \x02%s\x02: %s' % (title, author, comment))
Exemplo n.º 4
0
def horoscope(inp, db=None, notice=None, nick=None):
    """horoscope <sign> [save] -- Get your horoscope."""
    save = False
    database.init(db)
    
    if '@' in inp:
        nick = inp.split('@')[1].strip()
        sign = database.get(db,'users','horoscope','nick',nick)
        if not sign: return "No horoscope sign stored for {}.".format(nick)
    else:
        sign = database.get(db,'users','horoscope','nick',nick)
        if not inp: 
            if not sign:
                notice(horoscope.__doc__)
                return
        else:
            if not sign: save = True
            if " save" in inp: save = True
            sign = inp.split()[0]

    url = "http://my.horoscope.com/astrology/free-daily-horoscope-%s.html" % sign
    try:
        result = http.get_soup(url)
        title = result.find_all('h1', {'class': 'h1b'})[1].text
        horoscopetxt = result.find('div', {'id': 'textline'}).text
    except: return "Could not get the horoscope for {}.".format(sign.encode('utf8'))

    if sign and save: database.set(db,'users','horoscope',sign,'nick',nick)
    
    return u"\x02{}\x02 {}".format(title, horoscopetxt)
Exemplo n.º 5
0
def sptfy(inp, sptfy=False):
    if sptfy:
        shortenurl = "http://sptfy.com/index.php"
        data = urlencode({
            'longUrl': inp,
            'shortUrlDomain': 1,
            'submitted': 1,
            "shortUrlFolder": 6,
            "customUrl": "",
            "shortUrlPassword": "",
            "shortUrlExpiryDate": "",
            "shortUrlUses": 0,
            "shortUrlType": 0
        })
        try:
            soup = http.get_soup(shortenurl, post_data=data, cookies=True)
        except:
            return inp
        try:
            link = soup.find('div', {'class': 'resultLink'}).text.strip()
            return link
        except:
            message = "Unable to shorten URL: {}".format(
                soup.find('div', {
                    'class': 'messagebox_text'
                }).find('p').text.split("<br/>")[0])
            return message
    else:
        return web.try_isgd(inp)
Exemplo n.º 6
0
def fact(inp, say=False, nick=False):
    """fact -- Gets a random fact from OMGFACTS."""

    attempts = 0

    # all of this is because omgfacts is fail
    while True:
        try:
            soup = http.get_soup('http://www.omg-facts.com/random')
        except:
            if attempts > 2:
                return "Could not find a fact!"
            else:
                attempts += 1
                continue

        response = soup.find('a', {'class': 'surprise'})
        link = response['href']
        fact = ''.join(response.find(text=True))

        if fact:
            fact = fact.strip()
            break
        else:
            if attempts > 2:
                return "Could not find a fact!"
            else:
                attempts += 1
                continue

    url = web.try_isgd(link)

    return "{} - {}".format(fact, url)
Exemplo n.º 7
0
def animetake(inp):
    "animetake <list> | <get [query]> - searches animetake for the latest updates"
    error = u'not so lucky today..'
    try:
        inp_array = inp.split(' ')
        command = inp_array[0]
        query = inp_array[1]
    except:
        pass

    url = "http://www.animetake.com/" #% (urllib.quote_plus(query))
    anime_updates = []
    response = "" 

    soup = http.get_soup(url)
    page = soup.find('div', id='mainContent').ul
 
    for li in page.findAll('li'):
      anime_link = li.find('div', 'updateinfo').h4.a
      anime_updates.append('%s : %s' % (anime_link['title'], anime_link['href']))

    if command == 'list':
      count = 1
      response = "Latest Anime Updates: "       
      for anime_title in anime_updates:
        response += ("%s | " % (anime_title.split(' : ')[0]))
        count+=1
        if count == 11:
          break
    elif command == 'get':
      indices = [i for i, x in enumerate(anime_updates) if query in x]
      for index in indices:
        response += ("%s " % (anime_updates[index]))
    return response
Exemplo n.º 8
0
def fact():
    """fact -- Gets a random fact from OMGFACTS."""

    attempts = 0

    # all of this is because omgfacts is fail
    while True:
        try:
            soup = http.get_soup("http://www.omg-facts.com/random")
        except (http.HTTPError, http.URLError):
            if attempts > 2:
                return "Could not find a fact!"
            else:
                attempts += 1
                continue

        response = soup.find("a", {"class": "surprise"})
        link = response["href"]
        fact_data = "".join(response.find(text=True))

        if fact_data:
            fact_data = fact_data.strip()
            break
        else:
            if attempts > 2:
                return "Could not find a fact!"
            else:
                attempts += 1
                continue

    url = web.try_isgd(link)

    return "{} - {}".format(fact_data, url)
Exemplo n.º 9
0
def fourchanthread_url(match):
    soup = http.get_soup(match)
    title = soup.title.renderContents().strip()
    post = soup.find('div', {'class': 'opContainer'})
    comment = post.find('blockquote', {'class': 'postMessage'}).renderContents().strip()
    author = post.find_all('span', {'class': 'nameBlock'})[1]
    return http.process_text("\x02{}\x02 - posted by \x02{}\x02: {}".format(title, author, comment[:trimlength]))
Exemplo n.º 10
0
def wikipedia_url(match):
    soup = http.get_soup(match)
    title = soup.find('h1', {'id': 'firstHeading'}).renderContents().strip()
    post = soup.find('p').renderContents().strip().replace('\n', '').replace(
        '\r', '')
    return http.process_text("\x02Wikipedia.org: {}\x02 - {}...".format(
        title, post[:trimlength]))
Exemplo n.º 11
0
def horoscope(inp, db=None, notice=None, nick=None):
    """horoscope <sign> [save] -- Get your horoscope."""
    save = False
    database.init(db)

    if '@' in inp:
        nick = inp.split('@')[1].strip()
        sign = database.get(db, 'users', 'horoscope', 'nick', nick)
        if not sign: return "No horoscope sign stored for {}.".format(nick)
    else:
        sign = database.get(db, 'users', 'horoscope', 'nick', nick)
        if not inp:
            if not sign:
                notice(horoscope.__doc__)
                return
        else:
            if not sign: save = True
            if " save" in inp: save = True
            sign = inp.split()[0]

    url = "http://my.horoscope.com/astrology/free-daily-horoscope-%s.html" % sign
    try:
        result = http.get_soup(url)
        title = result.find_all('h1', {'class': 'h1b'})[1].text
        horoscopetxt = result.find('div', {'id': 'textline'}).text
    except:
        return "Could not get the horoscope for {}.".format(
            sign.encode('utf8'))

    if sign and save:
        database.set(db, 'users', 'horoscope', sign, 'nick', nick)

    return u"\x02{}\x02 {}".format(title, horoscopetxt)
Exemplo n.º 12
0
def isup(inp):
    """isup -- uses isup.me to see if a site is up or not
    :type inp: str
    """

    # slightly overcomplicated, esoteric URL parsing
    scheme, auth, path, query, fragment = urllib.parse.urlsplit(inp.strip())

    domain = auth or path
    url = urlnorm.normalize(domain, assume_scheme="http")

    try:
        soup = http.get_soup('http://isup.me/' + domain)
    except http.HTTPError:
        return "Failed to get status."

    content = soup.find('div').text.strip()

    if "not just you" in content:
        return "It's not just you. {} looks \x02\x034down\x02\x0f from here!".format(
            url)
    elif "is up" in content:
        return "It's just you. {} is \x02\x033up\x02\x0f.".format(url)
    else:
        return "Huh? That doesn't look like a site on the interweb."
Exemplo n.º 13
0
Arquivo: woot.py Projeto: Cameri/Gary
def get_woots(inp):
    woots = {}
    for k, v in inp.items():
        try:
            w = {}
            soup = http.get_soup(api + v)

            w['product'] = soup.find('woot:product').text
            w['wootoff'] = soup.find('woot:wootoff').text
            w['price'] = soup.find('woot:price').text
            w['pricerange'] = soup.find('woot:pricerange').text
            w['shipping'] = soup.find('woot:shipping').text
            w['url'] = "http://{}".format(v)
            w['soldout'] = soup.find('woot:soldout').text
            w['soldoutpercent'] =  soup.find('woot:soldoutpercentage').text

            category = text.capitalize_first(k if k == 'woot' else "%s woot" % k)
            if w['wootoff'] != "false":
                category += "off!"

            woots[category] = w
        except:
            continue

    return woots
Exemplo n.º 14
0
def newgrounds_url(match):
    location = match.group(4).split("/")[-1]
    if not test(location):
        print(
            "Not a valid Newgrounds portal ID. Example: http://www.newgrounds.com/portal/view/593993"
        )
        return None
    soup = http.get_soup("http://www.newgrounds.com/portal/view/" + location)

    title = "\x02{}\x02".format(soup.find('title').text)

    # get author
    try:
        author_info = soup.find('ul', {
            'class': 'authorlinks'
        }).find('img')['alt']
        author = " - \x02{}\x02".format(author_info)
    except:
        author = ""

    # get rating
    try:
        rating_info = soup.find('dd',
                                {'class': 'star-variable'
                                 })['title'].split("Stars &ndash;")[0].strip()
        rating = " - rated \x02{}\x02/\x025.0\x02".format(rating_info)
    except:
        rating = ""

    # get amount of ratings
    try:
        ratings_info = soup.find('dd',
                                 {'class': 'star-variable'})['title'].split(
                                     "Stars"
                                     " &ndsh;")[1].replace("Votes",
                                                           "").strip()
        numofratings = " ({})".format(ratings_info)
    except:
        numofratings = ""

    # get amount of views
    try:
        views_info = soup.find('dl', {
            'class': 'contentdata'
        }).findAll('dd')[1].find('strong').text
        views = " - \x02{}\x02 views".format(views_info)
    except:
        views = ""

    # get upload data
    try:
        date = "on \x02{}\x02".format(
            soup.find('dl', {
                'class': 'sidestats'
            }).find('dd').text)
    except:
        date = ""

    return title + rating + numofratings + views + author + date
Exemplo n.º 15
0
def refresh_cache():
    """ gets a page of random FMLs and puts them into a dictionary """
    soup = http.get_soup('http://www.fmylife.com/random/')

    for e in soup.find_all('p', attrs={'class': 'block'}):
        id = int(e.find_all('a', href=True)[0]['href'].split('_')[1].split('.')[0])
        text = e.find_all('a')[0].text.strip()
        fml_cache.append((id, text))
Exemplo n.º 16
0
def get_bash_quote(inp):
    try:
        soup = http.get_soup('http://bash.org/?%s' % inp)
        quote_info = soup.find('p', {'class': 'quote'}).text
        quote = soup.find('p', {'class': 'qt'}).text
        return ('\x02#{}\x02 ({}): {}'.format(quote_info.split()[0].replace('#',''), quote_info.split()[1].split('(')[1].split(')')[0].strip(), quote.replace('\n', ' ').replace('\r', ' |')))
    except:
        return "No quote found."
Exemplo n.º 17
0
def fourchanthread_url(match):
    soup = http.get_soup(match)
    title = soup.title.renderContents().strip()
    post = soup.find('div', {'class': 'opContainer'})
    comment = post.find('blockquote', {'class': 'postMessage'}).renderContents().strip()
    author = post.find_all('span', {'class': 'nameBlock'})[1]
    return http.process_text("\x02{}\x02 - posted by \x02{}\x02: {}".format(
        title, author, comment[:trimlength]))
Exemplo n.º 18
0
def lyrics(inp):
    """lyrics <search> - Search AZLyrics.com for song lyrics"""
    if "pastelyrics" in inp:
        dopaste = True
        inp = inp.replace("pastelyrics", "").strip()
    else:
        dopaste = False
    soup = http.get_soup(url + inp.replace(" ", "+"))
    if "Try to compose less restrictive search query" in soup.find(
            'div', {
                'id': 'inn'
            }).text:
        return "No results. Check spelling."
    div = None
    for i in soup.findAll('div', {'class': 'sen'}):
        if "/lyrics/" in i.find('a')['href']:
            div = i
            break
    if div:
        title = div.find('a').text
        link = div.find('a')['href']
        if dopaste:
            newsoup = http.get_soup(link)
            try:
                lyrics = newsoup.find(
                    'div', {
                        'style': 'margin-left:10px;margin-right:10px;'
                    }).text.strip()
                pasteurl = " " + web.haste(lyrics)
            except Exception as e:
                pasteurl = " (\x02Unable to paste lyrics\x02 [{}])".format(
                    str(e))
        else:
            pasteurl = ""
        artist = div.find('b').text.title()
        lyricsum = div.find('div').text
        if "\r\n" in lyricsum.strip():
            lyricsum = " / ".join(
                lyricsum.strip().split("\r\n")[0:4])  # truncate, format
        else:
            lyricsum = " / ".join(
                lyricsum.strip().split("\n")[0:4])  # truncate, format
        return u"\x02{}\x02 by \x02{}\x02 {}{} - {}".format(
            title, artist, web.try_isgd(link), pasteurl, lyricsum[:-3])
    else:
        return "No song results. " + url + inp.replace(" ", "+")
Exemplo n.º 19
0
def refresh_cache():
    """ gets a page of random FMLs and puts them into a dictionary """
    soup = http.get_soup('http://www.fmylife.com/random/')

    for e in soup.find_all('div', {'class': 'post article'}):
        id = int(e['id'])
        text = ''.join(e.find('p').find_all(text=True))
        fml_cache.append((id, text))
Exemplo n.º 20
0
def refresh_cache():
    """ gets a page of random FMLs and puts them into a dictionary """
    soup = http.get_soup('http://www.fmylife.com/random/')

    for e in soup.find_all('div', {'class': 'post article'}):
        fml_id = int(e['id'])
        text = ''.join(e.find('p').find_all(text=True))
        fml_cache.append((fml_id, text))
Exemplo n.º 21
0
def refresh_cache():
    """gets a page of random MLIAs and puts them into a dictionary """
    url = 'http://mylifeisaverage.com/{}'.format(random.randint(1, 11000))
    soup = http.get_soup(url)

    for story in soup.find_all('div', {'class': 'story '}):
        mlia_id = story.find('span', {'class': 'left'}).a.text
        mlia_text = story.find('div', {'class': 'sc'}).text.strip()
        mlia_cache.append((mlia_id, mlia_text))
Exemplo n.º 22
0
def get_yandere_tags(inp):
    url = "https://yande.re/post?tags=%s" % inp.replace(" ", "_")
    soup = http.get_soup(url)
    imagelist = soup.find("ul", {"id": "post-list-posts"}).findAll("li")
    image = imagelist[random.randint(0, len(imagelist) - 1)]
    imageid = image["id"].replace("p", "")
    title = image.find("img")["title"]
    src = image.find("a", {"class": "directlink"})["href"]
    return "\x034NSFW\x03: \x02({})\x02 {}: {}".format(imageid, title, web.isgd(http.unquote(src)))
Exemplo n.º 23
0
def refresh_cache():
    "gets a page of random MLIAs and puts them into a dictionary "
    url = 'http://mylifeisaverage.com/%s' % random.randint(1,11000)
    soup = http.get_soup(url)
    
    for story in soup.find_all('div', {'class': 'story '}):
        mlia_id = story.find('span', {'class': 'left'}).a.text
        mlia_text = story.find('div', {'class': 'sc'}).text.strip()
        mlia_cache.append((mlia_id, mlia_text))
Exemplo n.º 24
0
def get_yandere_tags(inp):
    url = 'https://yande.re/post?tags=%s' % inp.replace(' ','_')
    soup = http.get_soup(url)
    imagelist = soup.find('ul', {'id': 'post-list-posts'}).findAll('li')
    image = imagelist[random.randint(0,len(imagelist)-1)]
    imageid = image["id"].replace('p','')
    title = image.find('img')['title']
    src = image.find('a', {'class': 'directlink'})["href"]
    return u"\x034NSFW\x03: \x02({})\x02 {}: {}".format(imageid, title, web.isgd(http.unquote(src)))
Exemplo n.º 25
0
def get_yandere_tags(inp):
    url = 'https://yande.re/post?tags=%s' % inp.replace(' ', '_')
    soup = http.get_soup(url)
    imagelist = soup.find('ul', {'id': 'post-list-posts'}).findAll('li')
    image = imagelist[random.randint(0, len(imagelist) - 1)]
    imageid = image["id"].replace('p', '')
    title = image.find('img')['title']
    src = image.find('a', {'class': 'directlink'})["href"]
    return u"\x034NSFW\x03: \x02({})\x02 {}: {}".format(
        imageid, title, web.isgd(http.unquote(src)))
Exemplo n.º 26
0
def refresh_cache():
    "gets a page of random yande.re posts and puts them into a dictionary "
    url = "https://yande.re/post?page=%s" % random.randint(1, 11000)
    soup = http.get_soup(url)

    for result in soup.findAll("li"):
        title = result.find("img", {"class": re.compile(r"\bpreview\b")})  # ['title']
        img = result.find("a", {"class": re.compile(r"\bdirectlink\b")})  # ['href']
        if img and title:
            yandere_cache.append((result["id"].replace("p", ""), title["title"].split(" User")[0], img["href"]))
Exemplo n.º 27
0
Arquivo: steam.py Projeto: Cameri/Gary
def steam(inp):
    """.steam [search] - Search for specified game/trailer/DLC."""
    soup = http.get_soup("http://store.steampowered.com/search/?term={}".format(inp))
    result = soup.find('a', {'class': 'search_result_row'})
    try:
        return (get_steam_info(result['href']) +
            " - " + web.try_googl(result['href']))
    except Exception as e:
        print "Steam search error: {}".format(e)
        return "Steam API error, please try again later."
Exemplo n.º 28
0
def calc(inp):
    "calc <term> -- Calculate <term> with Google Calc."

    soup = http.get_soup('http://www.google.com/search', q=inp)

    result = soup.find('h2', {'class': 'r'})
    if not result:
        return "Could not calculate '%s'" % inp

    return result.contents[0]
Exemplo n.º 29
0
def refresh_cache():
    "gets a page of random yande.re posts and puts them into a dictionary "
    url = 'https://yande.re/post?page=%s' % random.randint(1,11000)
    soup = http.get_soup(url)

    for result in soup.findAll('li'):
        title = result.find('img', {'class': re.compile(r'\bpreview\b')}) #['title']
        img = result.find('a', {'class': re.compile(r'\bdirectlink\b')}) #['href']
        if img and title:
            yandere_cache.append((result['id'].replace('p','') ,title['title'].split(' User')[0], img['href']))
Exemplo n.º 30
0
def gcalc(inp):
    "gcalc <term> -- Calculate <term> with Google Calc."
    soup = http.get_soup('http://www.google.com/search', q=inp)

    result = soup.find('span', {'class': 'cwcot'})
    formula = soup.find('span', {'class': 'cwclet'})
    if not result:
        return "Could not calculate '{}'".format(inp)

    return u"{} {}".format(formula.contents[0].strip(),result.contents[0].strip())
Exemplo n.º 31
0
def calc(inp):
    "calc <term> -- Calculate <term> with Google Calc."

    soup = http.get_soup("http://www.google.com/search", q=inp)

    result = soup.find("h2", {"class": "r"})
    if not result:
        return "Could not calculate '%s'" % inp

    return result.contents[0]
Exemplo n.º 32
0
def get_title(url):
    soup = http.get_soup(url)

    if '#' in url:
        postid = url.split('#')[1]
        post = soup.find('div', {'id': postid})
    else:
        post = soup.find('div', {'class': 'opContainer'})
    
    comment = http.process_text(post.find('blockquote', {'class': 'postMessage'}).renderContents().strip())
    return u"{} - {}".format(url, comment) #
Exemplo n.º 33
0
def get_bash_quote(inp):
    try:
        soup = http.get_soup('http://bash.org/?%s' % inp)
        quote_info = soup.find('p', {'class': 'quote'}).text
        quote = soup.find('p', {'class': 'qt'}).text
        return (u'\x02#{}\x02 ({}): {}'.format(
            quote_info.split()[0].replace('#', ''),
            quote_info.split()[1].split('(')[1].split(')')[0].strip(),
            quote.replace('\n', ' ').replace('\r', ' |')))
    except:
        return "No quote found."
Exemplo n.º 34
0
def gelbooru_url(match):
    soup = http.get_soup('http://gelbooru.com/index.php?page=dapi&s=post&q=index&id={}'.format(match.group(1)))
    posts = soup.find_all('post')

    id, score, url, rating, tags = (posts[0].get('id'), posts[0].get('score'), posts[0].get('file_url'),posts[0].get('rating'),posts[0].get('tags'))

    if rating is 'e': rating = "\x02\x034NSFW\x03\x02"
    elif rating is 'q': rating = "\x02\x037Questionable\x03\x02"
    elif rating is 's': rating = "\x02\x033Safe\x03\x02"

    return u'\x02[{}]\x02 Score: \x02{}\x02 - Rating: {} - {} - {}'.format(id, score, rating, url, tags[:75].strip())
Exemplo n.º 35
0
def gcalc(inp):
    "gcalc <term> -- Calculate <term> with Google Calc."
    soup = http.get_soup('http://www.google.com/search', q=inp)

    result = soup.find('span', {'class': 'cwcot'})
    formula = soup.find('span', {'class': 'cwclet'})
    if not result:
        return "Could not calculate '{}'".format(inp)

    return u"{} {}".format(formula.contents[0].strip(),
                           result.contents[0].strip())
Exemplo n.º 36
0
def ytplaylist_url(match):
    location = match.group(4).split("=")[-1]
    try:
        soup = http.get_soup("https://www.youtube.com/playlist?list=" + location)
    except Exception:
        return "\x034\x02Invalid response."
    title = soup.find('title').text.split('-')[0].strip()
    author = soup.find('img', {'class': 'channel-header-profile-image'})['title']
    num_videos = soup.find('ul', {'class': 'header-stats'}).findAll('li')[0].text.split(' ')[0]
    views = soup.find('ul', {'class': 'header-stats'}).findAll('li')[1].text.split(' ')[0]
    return "\x02{}\x02 - \x02{}\x02 views - \x02{}\x02 videos - \x02{}\x02".format(title, views, num_videos, author)
Exemplo n.º 37
0
def get_title(url):
    soup = http.get_soup(url)

    if "#" in url:
        postid = url.split("#")[1]
        post = soup.find("div", {"id": postid})
    else:
        post = soup.find("div", {"class": "opContainer"})

    comment = http.process_text(post.find("blockquote", {"class": "postMessage"}).renderContents().strip())
    return "{} - {}".format(url, comment)  #
Exemplo n.º 38
0
def hulu_search(text):
    """hulu <search> - Search Hulu"""
    result = http.get_soup(
        "http://m.hulu.com/search?dp_identifier=hulu&{}&items_per_page=1&page=1".format(urlencode({'query': text})))
    data = result.find('results').find('videos').find('video')
    showname = data.find('show').find('name').text
    title = data.find('title').text
    duration = timeformat.format_time(int(float(data.find('duration').text)))
    description = data.find('description').text
    rating = data.find('content-rating').text
    return "{}: {} - {} - {} ({}) {}".format(showname, title, description, duration, rating,
                                             "http://www.hulu.com/watch/" + str(data.find('id').text))
Exemplo n.º 39
0
def refresh_cache():
    "gets a page of random bash.org quotes and puts them into a dictionary "
    num = 0
    soup = http.get_soup('http://bash.org/?random')
    quote_infos = soup.find_all('p', {'class': 'quote'})
    quotes = soup.find_all('p', {'class': 'qt'})

    while num < len(quotes):
        quote_info = quote_infos[num].text
        quote = quotes[num].text.replace('\n', ' ').replace('\r', ' |')
        bash_cache.append((quote_info.split()[0].replace('#',''),quote_info.split()[1].split('(')[1].split(')')[0].strip(), quote))
        num += 1
Exemplo n.º 40
0
def xkcd_search(term):
    search_term = http.quote_plus(term)
    soup = http.get_soup("http://www.ohnorobot.com/index.pl?s={}&Search=Search&"
                         "comic=56&e=0&n=0&b=0&m=0&d=0&t=0".format(search_term))
    result = soup.find('li')
    if result:
        url = result.find('div', {'class': 'tinylink'}).text
        xkcd_id = url[:-1].split("/")[-1]
        print(xkcd_id)
        return xkcd_info(xkcd_id, url=True)
    else:
        return "No results found!"
Exemplo n.º 41
0
def refresh_cache(inp):
    global furry_cache
    furry_cache = []
    num = 0
    search = inp.replace(' ','+').replace('explicit','rating:explicit').replace('nsfw','rating:explicit').replace('safe','rating:safe').replace('sfw','rating:safe')
    if inp == '':
        soup = http.get_soup('http://e621.net/post/index.xml?limit=20&page=1')
    else:
        soup = http.get_soup('http://e621.net/post/index.xml?limit=20&page=1&tags={}'.format(inp))
    posts = soup.find_all('post')

    for post in posts:
        id = post.find_all('id')[0].get_text()
        score = post.find_all('score')[0].get_text()
        url = post.find_all('file_url')[0].get_text()
        rating = post.find_all('rating')[0].get_text()
        tags = post.find_all('tags')[0].get_text()
        furry_cache.append((id, score, url, rating,tags))

    random.shuffle(furry_cache)
    return
Exemplo n.º 42
0
def fourchanquote_url(match):
    postid = match.split('#')[1]
    soup = http.get_soup(match)
    title = soup.title.renderContents().strip()
    post = soup.find('div', {'id': postid})
    comment = post.find('blockquote', {'class': 'postMessage'}).renderContents().strip()
    #comment = re.sub('&gt;&gt;\d*[\s]','',comment) #remove quoted posts
    #comment = re.sub('(&gt;&gt;\d*)','',comment)
    #comment = re.sub('[\|\s]{2,50}','',comment) #remove multiple | | | |
    #comment = re.sub('[\s]{3,}','  ',comment) #remove multiple spaces
    author = post.find_all('span', {'class': 'nameBlock'})[1].renderContents().strip()
    return http.process_text('\x02%s\x02 - posted by \x02%s\x02: %s' % (title, author, comment))
Exemplo n.º 43
0
def xkcd_search(term):
    search_term = http.quote_plus(term)
    soup = http.get_soup(
        "http://www.ohnorobot.com/index.pl?s={}&Search=Search&"
        "comic=56&e=0&n=0&b=0&m=0&d=0&t=0".format(search_term))
    result = soup.find('li')
    if result:
        url = result.find('div', {'class': 'tinylink'}).text
        xkcd_id = url[:-1].split("/")[-1]
        print xkcd_id
        return xkcd_info(xkcd_id, url=True)
    else:
        return "No results found!"
Exemplo n.º 44
0
def isup(inp):
    """isup -- uses isup.me to see if a site is up or not"""

    # slightly overcomplicated, esoteric URL parsing
    scheme, auth, path, query, fragment = urlparse.urlsplit(inp.strip())

    domain = auth.encode('utf-8') or path.encode('utf-8')
    url = urlnorm.normalize(domain, assume_scheme="http")

    try:
        soup = http.get_soup('http://isup.me/' + domain)
    except http.HTTPError, http.URLError:
        return "Could not get status."
Exemplo n.º 45
0
def twitch_lookup(location):
    locsplit = location.split("/")
    if len(locsplit) > 1 and len(locsplit) == 3:
        channel = locsplit[0]
        type = locsplit[1]  # should be b or c
        id = locsplit[2]
    else:
        channel = locsplit[0]
        type = None
        id = None
    h = HTMLParser()
    fmt = "{}: {} playing {} ({})"  # Title: nickname playing Game (x views)
    if type and id:
        if type == "b":  # I haven't found an API to retrieve broadcast info
            soup = http.get_soup("http://twitch.tv/" + location)
            title = soup.find('span', {'class': 'real_title js-title'}).text
            playing = soup.find('a', {'class': 'game js-game'}).text
            views = soup.find('span', {'id': 'views-count'}).text + " view"
            views = views + "s" if not views[0:2] == "1 " else views
            return h.unescape(fmt.format(title, channel, playing, views))
        elif type == "c":
            data = http.get_json("https://api.twitch.tv/kraken/videos/" +
                                 type + id)
            title = data['title']
            playing = data['game']
            views = str(data['views']) + " view"
            views = views + "s" if not views[0:2] == "1 " else views
            return h.unescape(fmt.format(title, channel, playing, views))
    else:
        data = http.get_json(
            "http://api.justin.tv/api/stream/list.json?channel=" + channel)
        if data and len(data) >= 1:
            data = data[0]
            title = data['title']
            playing = data['meta_game']
            viewers = "\x033\x02Online now!\x02\x0f " + str(
                data["channel_count"]) + " viewer"
            print(viewers)
            viewers = viewers + "s" if not " 1 view" in viewers else viewers
            print(viewers)
            return h.unescape(fmt.format(title, channel, playing, viewers))
        else:
            try:
                data = http.get_json("https://api.twitch.tv/kraken/channels/" +
                                     channel)
            except:
                return
            title = data['status']
            playing = data['game']
            viewers = "\x034\x02Offline\x02\x0f"
            return h.unescape(fmt.format(title, channel, playing, viewers))
Exemplo n.º 46
0
def refresh_cache(inp):
    global furry_cache
    furry_cache = []
    num = 0
    search = inp.replace(' ','+').replace('explicit','rating:explicit').replace('nsfw','rating:explicit').replace('safe','rating:safe').replace('sfw','rating:safe')
    soup = http.get_soup('http://e621.net/post/index.xml?limit=20&page=1&tags={}'.format(inp))
    posts = soup.find_all('post')

    while num < len(posts):
        furry_cache.append((posts[num].get('id'), posts[num].get('score'), posts[num].get('file_url'),posts[num].get('rating'),posts[num].get('tags')))
        num += 1

    random.shuffle(furry_cache)
    return
Exemplo n.º 47
0
def steam(text):
    """steam [search] - Search for specified game/trailer/DLC"""
    try:
        soup = http.get_soup("http://store.steampowered.com/search/",
                             term=text.strip().lower())
    except Exception as e:
        return "Could not get game info: {}".format(e)
    result = soup.find('a', {'class': 'search_result_row'})

    if not result:
        return "No game found."

    app_id = result['data-ds-appid']
    return format_data(app_id)
Exemplo n.º 48
0
def gelbooru_url(match):
    soup = http.get_soup('http://gelbooru.com/index.php?page=dapi&s=post&q=index&id={}'.format(match.group(1)))
    posts = soup.find_all('post')

    id, score, url, rating, tags = (posts[0].get('id'), posts[0].get('score'), posts[0].get('file_url'),posts[0].get('rating'),posts[0].get('tags'))

    if rating is 'e': rating = "\x02\x034NSFW\x03\x02"
    elif rating is 'q': rating = "\x02\x037Questionable\x03\x02"
    elif rating is 's': rating = "\x02\x033Safe\x03\x02"

    return u'\x02[{}]\x02 Score: \x02{}\x02 - Rating: {} - {} - {}'.format(id, score, rating, url, tags[:75].strip())


# http://gelbooru.com/index.php?page=post&s=list&tags=%3D_%3D
Exemplo n.º 49
0
def refresh_cache():
    "gets a page of random bash.org quotes and puts them into a dictionary "
    num = 0
    soup = http.get_soup('http://bash.org/?random')
    quote_infos = soup.find_all('p', {'class': 'quote'})
    quotes = soup.find_all('p', {'class': 'qt'})

    while num < len(quotes):
        quote_info = quote_infos[num].text
        quote = quotes[num].text.replace('\n', ' ').replace('\r', ' |')
        bash_cache.append(
            (quote_info.split()[0].replace('#', ''),
             quote_info.split()[1].split('(')[1].split(')')[0].strip(), quote))
        num += 1
Exemplo n.º 50
0
def hulu_search(inp):
    """hulu <search> - Search Hulu"""
    result = http.get_soup(
        "http://m.hulu.com/search?dp_identifier=hulu&{}&items_per_page=1&page=1"
        .format(urlencode({'query': inp})))
    data = result.find('results').find('videos').find('video')
    showname = data.find('show').find('name').text
    title = data.find('title').text
    duration = timeformat.timeformat(int(float(data.find('duration').text)))
    description = data.find('description').text
    rating = data.find('content-rating').text
    return "{}: {} - {} - {} ({}) {}".format(
        showname, title, description, duration, rating,
        "http://www.hulu.com/watch/" + str(data.find('id').text))
Exemplo n.º 51
0
def refresh_cache():
    "gets a page of random yande.re posts and puts them into a dictionary "
    url = 'https://yande.re/post?page=%s' % random.randint(1, 11000)
    soup = http.get_soup(url)

    for result in soup.findAll('li'):
        title = result.find('img',
                            {'class': re.compile(r'\bpreview\b')})  #['title']
        img = result.find('a',
                          {'class': re.compile(r'\bdirectlink\b')})  #['href']
        if img and title:
            yandere_cache.append(
                (result['id'].replace('p', ''),
                 title['title'].split(' User')[0], img['href']))
Exemplo n.º 52
0
def lyrics(inp):
    """lyrics <search> - Search AZLyrics.com for song lyrics"""
    if "pastelyrics" in inp:
        dopaste = True
        inp = inp.replace("pastelyrics", "").strip()
    else:
        dopaste = False
    soup = http.get_soup(url + inp.replace(" ", "+"))
    if "Try to compose less restrictive search query" in soup.find('div', {'id': 'inn'}).text:
        return "No results. Check spelling."
    div = None
    for i in soup.findAll('div', {'class': 'sen'}):
        if "/lyrics/" in i.find('a')['href']:
            div = i
            break
    if div:
        title = div.find('a').text
        link = div.find('a')['href']
        if dopaste:
            newsoup = http.get_soup(link)
            try:
                lyrics = newsoup.find('div', {'style': 'margin-left:10px;margin-right:10px;'}).text.strip()
                pasteurl = " " + web.haste(lyrics)
            except Exception as e:
                pasteurl = " (\x02Unable to paste lyrics\x02 [{}])".format(str(e))
        else:
            pasteurl = ""
        artist = div.find('b').text.title()
        lyricsum = div.find('div').text
        if "\r\n" in lyricsum.strip():
            lyricsum = " / ".join(lyricsum.strip().split("\r\n")[0:4])  # truncate, format
        else:
            lyricsum = " / ".join(lyricsum.strip().split("\n")[0:4])  # truncate, format
        return u"\x02{}\x02 by \x02{}\x02 {}{} - {}".format(title, artist, web.try_isgd(link), pasteurl,
                                                             lyricsum[:-3])
    else:
        return "No song results. " + url + inp.replace(" ", "+")
Exemplo n.º 53
0
def horoscope(inp):
    "horoscope <sign> -- Get your horoscope."

    url = "http://my.horoscope.com/astrology/free-daily-horoscope-%s.html" % inp
    soup = http.get_soup(url)

    title = soup.find_all('h1', {'class': 'h1b'})[1]
    horoscope = soup.find('div', {'class': 'fontdef1'})
    result = "\x02%s\x02 %s" % (title, horoscope)
    result = http.strip_html(result)
    #result = unicode(result, "utf8").replace('flight ','')

    if not title:
        return "Could not get the horoscope for %s." % inp

    return result
Exemplo n.º 54
0
def refresh_cache(inp):
    global gelbooru_cache
    gelbooru_cache = []
    num = 0
    search = inp.replace(' ','+').replace('explicit','rating:explicit').replace('nsfw','rating:explicit').replace('safe','rating:safe').replace('sfw','rating:safe')
    # score:>100
    #print 'http://gelbooru.com/index.php?page=dapi&s=post&q=index&limit=20&tags={}'.format(search)
    soup = http.get_soup(u'http://gelbooru.com/index.php?page=dapi&s=post&q=index&limit=20&tags={}'.format(search))
    posts = soup.find_all('post')

    while num < len(posts):
        gelbooru_cache.append((posts[num].get('id'), posts[num].get('score'), posts[num].get('file_url'),posts[num].get('rating'),posts[num].get('tags')))
        num += 1

    random.shuffle(gelbooru_cache)
    return
Exemplo n.º 55
0
def horoscope(text, db, notice, nick):
    """horoscope <sign> -- Get your horoscope."""

    # check if the user asked us not to save his details
    dontsave = text.endswith(" dontsave")
    if dontsave:
        sign = text[:-9].strip().lower()
    else:
        sign = text

    db.execute("create table if not exists horoscope(nick primary key, sign)")

    if not sign:
        sign = db.execute(
            "select sign from horoscope where "
            "nick=lower(:nick)", {
                'nick': nick
            }).fetchone()
        if not sign:
            notice("horoscope <sign> -- Get your horoscope")
            return
        sign = sign[0]

    url = "http://my.horoscope.com/astrology/free-daily-horoscope-{}.html".format(
        sign)
    soup = http.get_soup(url)

    title = soup.find_all('h1', {'class': 'h1b'})[1]
    horoscope_text = soup.find('div', {'class': 'fontdef1'})
    result = "\x02{}\x02 {}".format(title, horoscope_text)
    result = formatting.strip_html(result)
    #result = unicode(result, "utf8").replace('flight ','')

    if not title:
        return "Could not get the horoscope for {}.".format(text)

    if text and not dontsave:
        db.execute(
            "insert or replace into horoscope(nick, sign) values (:nick, :sign)",
            {
                'nick': nick.lower(),
                'sign': sign
            })
        db.commit()

    return result
Exemplo n.º 56
0
def horoscope(inp, db=None, notice=None, nick=None):
    """horoscope <sign> [save] -- Get your horoscope."""
    save = False
    database.init(db)

    if '@' in inp:
        nick = inp.split('@')[1].strip()
        sign = database.get(db, 'users', 'horoscope', 'nick', nick)
        if not sign:
            return "No horoscope sign stored for {}.".format(nick)
    else:
        sign = database.get(db, 'users', 'horoscope', 'nick', nick)
        if not inp:
            if not sign:
                notice(horoscope.__doc__)
                return
        else:
            if not sign:
                save = True
            if " save" in inp:
                save = True
            sign = inp.split()[0]

    url = "https://my.horoscope.com/astrology/free-daily-horoscope-{}.html".format(
        sign)
    try:
        result = http.get_soup(url)
        container = result.find('div', attrs={'class': 'main-horoscope'})
        if not container:
            return 'Could not parse the horoscope for {}.'.format(sign)

        paragraph = container.find('p')

        if paragraph:
            return nick + ': ' + paragraph.text
        else:
            return 'Could not read the horoscope for {}.'.format(sign)

    except Exception:
        raise
        return "Could not get the horoscope for {}.".format(sign)

    if sign and save:
        database.set(db, 'users', 'horoscope', sign, 'nick', nick)

    return u"\x02{}\x02 {}".format(title, horoscopetxt)