Пример #1
0
def get_tweets(url, sender_uid=False):
    try:
        data = urllib2.urlopen(url).read().replace('\r', '').replace('\n', ' ')
        data = re.compile(r'<table class="tweet.*?>.*?</table>').findall(data)
    except:
        return
    tweets = []
    for tweet in data:
        try:
            tmp = {}
            tmp['full'] = web.htmlescape(r_fullname.findall(tweet)[0].strip())
            tmp['user'] = r_username.findall(tweet)[0].strip()
            tmp['time'] = web.striptags(r_time.findall(tweet)[0]).strip()
            tweet_data = r_tweet.findall(tweet)[0].strip()
            urls = r_url.findall(tweet_data)
            for url in urls:
                url = list(url)
                tweet_data = tweet_data.replace(url[1], url[0])
            tmp['text'] = web.htmlescape(web.striptags(tweet_data).strip())
            uids = r_uid.findall(' ' + tmp['text'])
            for uid in uids:
                tmp['text'] = tmp['text'].replace(uid, '{purple}{b}@{b}%s{c}' % uid.strip('@')).lstrip()

            # Check if it's a retweet
            if sender_uid:
                if sender_uid.lower().strip('@') != tmp['user'].lower().strip('@'):
                    tmp['text'] = tmp['text'] + ' ({purple}{b}@{b}%s{c})' % tmp['user']
                    tmp['user'] = sender_uid.strip('@') + ' {blue}{b}retweeted{c}{b}'
            tweets.append(tmp)
        except:
            continue
    if tweets:
        return tweets
    else:
        return False
Пример #2
0
def define(code, input):
    try:
        data = web.json(uri % web.quote(input.group(2)))[0]
    except:
        return code.reply('{red}Failed to get definition!')

    # Go through filters to remove extra stuff that's not needed.
    word = data['html']
    word = web.striptags(word).strip()
    word = web.htmlescape(word)
    word = word.replace('\\n', '').replace('\n', '')
    while '  ' in word:
        word = word.replace('  ', ' ')

    word = word.encode('ascii', 'ignore')
    if 'is not in the dictionary.' in word:
        return code.say('Definition for {b}%s{b} not found' % input.group(2))

    # Everything below here is for colors only
    word = '{b}{purple}%s{c}{b}: %s' % (
        data['query'], word[len(data['query']) + 1::])
    word = word.replace('(', '{purple}{b}(').replace(')', '){b}{c}')
    if len(word) > 250:
        word = word[:245] + '{c}{b}[...]'
    code.say(word)
Пример #3
0
def chuck(code, input):
    """Get random Chuck Norris facts. I bet he's better than you."""
    try:
        data = web.json('http://api.icndb.com/jokes/random')
    except:
        return code.say('Chuck seems to be in the way. I\'m not f*****g with him.')
    code.say('#{blue}%s{c} - %s' % (data['value']['id'], web.htmlescape(data['value']['joke'])))
Пример #4
0
def define(code, input):
    try:
        data = web.json(uri % web.quote(input.group(2)))[0]
    except:
        return code.reply('{red}Failed to get definition!')

    # Go through filters to remove extra stuff that's not needed.
    word = data['html']
    word = web.striptags(word).strip()
    word = web.htmlescape(word)
    word = word.replace('\\n', '').replace('\n', '')
    while '  ' in word:
        word = word.replace('  ', ' ')

    word = word.encode('ascii', 'ignore')
    if 'is not in the dictionary.' in word:
        return code.say('Definition for {b}%s{b} not found' % input.group(2))

    # Everything below here is for colors only
    word = '{b}{purple}%s{c}{b}: %s' % (data['query'],
                                        word[len(data['query']) + 1::])
    word = word.replace('(', '{purple}{b}(').replace(')', '){b}{c}')
    if len(word) > 250:
        word = word[:245] + '{c}{b}[...]'
    code.say(word)
Пример #5
0
def get_url_data(url):
    if len(url) < url_min_length:
        return False  # URL is really short. Don't need shortening.
    try:
        uri = web.get(url)
        if not uri.info().maintype == 'text':
            return False
        data = uri.read(1024)  # Only read soo much of a large site.
        title = re.compile('<title>(.*?)</title>',
                           re.IGNORECASE | re.DOTALL).search(data).group(1)
        title = web.htmlescape(title)
        title = title.replace('\n', '').replace('\r', '')

        # Remove spaces...
        while '  ' in title:
            title = title.replace('  ', ' ')

        # Shorten LONG urls
        if len(title) > 200:
            title = title[:200] + '[...]'

        if len(title) < title_min_length:  # Title output too short
            return False

        return title
    except:
        return False
Пример #6
0
Файл: url.py Проект: adiq/stah
def get_url_data(url):
    if len(url) < url_min_length:
        return False  # URL is really short. Don't need shortening.
    try:
        uri = web.get(url)
        if not uri.info().maintype == 'text':
            return False
        data = uri.read(1024)  # Only read soo much of a large site.
        title = re.compile('<title>(.*?)</title>',
                           re.IGNORECASE | re.DOTALL).search(data).group(1)
        title = web.htmlescape(title)
        title = title.replace('\n', '').replace('\r', '')

        # Remove spaces...
        while '  ' in title:
            title = title.replace('  ', ' ')

        # Shorten LONG urls
        if len(title) > 200:
            title = title[:200] + '[...]'

        if len(title) < title_min_length:  # Title output too short
            return False

        return title
    except:
        return False
Пример #7
0
def user_lookup(code, id, showerror=True):
    try:
        data = web.get(
            'http://steamdb.info/calculator/?player=%s&currency=us' % id, timeout=10).read()
        if 'This profile is private, unable to retrieve owned games.' in data:
            if showerror:
                code.say(
                    '{b}Unabled to retrieve info, that account is {red}private{c}!')
            return
        realname = re.search(
            r'<title>.*?</title>', data).group().split('>')[1].split(' \xc2\xb7')[0]
        status = re.search(
            r'<td class="span2">Status</td>.*?<td>.*?</td>', data).group()
        status = web.striptags(status).strip('Status')
        # Basic user information
        details = data.split('[list]')[1].split('[/list]')[0]
        details = re.sub(r'\<\/.*?\>', '', details)
        details = re.sub(r'\<.*?\>', ' {b}- ', details)
        details = re.sub(r'\[.*?\]', '', details)
        details = details.replace(': ', ': {b}')
        url = 'http://steamcommunity.com/id/' + id
        return code.say('{b}%s{b} - {green}%s{c} - %s - %s' % (web.htmlescape(realname), status, details, url))
    except:
        if showerror:
            code.say('{b}Unable to find user information on %s!' % id)
        return
Пример #8
0
def lastfm(code, input):
    user = input.group(2).split()[0].strip().lower()
    # Charset fuckery
    data = getdata(user).decode('utf-8').encode('ascii', 'ignore')
    if not data:
        return code.say('Username %s does not exist in the last.fm database.' % (user))
    song = web.striptags(re.compile(r'<title>.*?</title>').findall(data)[1])
    code.reply('{purple}' + web.htmlescape(song).replace('  ', ' -- ', 1) + '{c} {red}(via Last.Fm)')
Пример #9
0
def chuck(code, input):
    """Get random Chuck Norris facts. I bet he's better than you."""
    try:
        data = web.json('http://api.icndb.com/jokes/random')
    except:
        return code.say(
            'Chuck seems to be in the way. I\'m not f*****g with him.')
    code.say('#{blue}%s{c} - %s' %
             (data['value']['id'], web.htmlescape(data['value']['joke'])))
Пример #10
0
def get_tweets(url, sender_uid=False):
    try:
        data = urllib2.urlopen(url).read().replace('\r', '').replace('\n', ' ')
        data = re.compile(r'<table class="tweet.*?>.*?</table>').findall(data)
    except:
        return
    tweets = []
    for tweet in data:
        try:
            tmp = {}
            tmp['full'] = web.htmlescape(r_fullname.findall(tweet)[0].strip())
            tmp['user'] = r_username.findall(tweet)[0].strip()
            tmp['time'] = web.striptags(r_time.findall(tweet)[0]).strip()
            tweet_data = r_tweet.findall(tweet)[0].strip()
            urls = r_url.findall(tweet_data)
            for url in urls:
                url = list(url)
                tweet_data = tweet_data.replace(url[1], url[0])
            tmp['text'] = web.htmlescape(web.striptags(tweet_data).strip())
            uids = r_uid.findall(' ' + tmp['text'])
            for uid in uids:
                tmp['text'] = tmp['text'].replace(
                    uid, '{purple}{b}@{b}%s{c}' % uid.strip('@')).lstrip()

            # Check if it's a retweet
            if sender_uid:
                if sender_uid.lower().strip('@') != tmp['user'].lower().strip(
                        '@'):
                    tmp['text'] = tmp[
                        'text'] + ' ({purple}{b}@{b}%s{c})' % tmp['user']
                    tmp['user'] = sender_uid.strip(
                        '@') + ' {blue}{b}retweeted{c}{b}'
            tweets.append(tmp)
        except:
            continue
    if tweets:
        return tweets
    else:
        return False
Пример #11
0
def fml(code, input):
    """fml - Retrieve random FML's, via FMyLife.com's dev API."""
    # Random/No input
    if not input.group(2):
        try:
            r = fml_random()
            code.say('#{blue}%s{c} %s +{b}%s{b}/-{b}%s{b}' % (str(r['fml-id']),
                                                              web.htmlescape(r['fml']).replace('FML', '{red}FML{c}'), r['+'], r['-']))
        except:
            return code.say('{red}Failed to retrieve random FML.')
    elif input.group(2).startswith('#') and input.group(2).lstrip('#').isdigit():
        try:
            r = fml_id_search(input.group(2).lstrip('#'))
            code.say('#{blue}%s{c} %s +{b}%s{b}/-{b}%s{b}' % (str(r['fml-id']),
                                                              web.htmlescape(r['fml']).replace('FML', '{red}FML{c}'), r['+'], r['-']))
        except:
            return code.say('Failed to retrieve FML via ID.')
    # Input/Assume search query, with (possible) number at end indicating FML
    # index
    else:
        msg = input.group(2).lower().strip()
        parts = msg.split()
        if parts[-1].replace('-', '').isdigit():
            if int(parts[-1]) <= 0:
                id = 1
            else:
                id = int(parts[-1].replace('-', ''))
            del parts[-1]
            query = '+'.join(parts)
        else:
            id = 1
            query = msg.replace(' ', '+')
        try:
            r = fml_search(query, id)
            code.say(
                '(%s/%s) #{blue}%s{c} %s +{b}%s{b}/-{b}%s{b}' % (r['id'], r['max'], str(r['fml-id']),
                                                                 web.htmlescape(r['fml']).replace('FML', '{red}FML{c}'), r['+'], r['-']))
        except:
            return code.say('Failed to search for FML.')
Пример #12
0
def dinner(code, input):
    """fd -- WHAT DO YOU WANT FOR F*****G DINNER?"""
    err = '{red}EAT LEFT OVER PIZZA FOR ALL I CARE.'
    try:
        data = web.get(uri).read()
        results = re_mark.findall(data)
        if not results:
            return code.say(err)
        url, food = results[0][0], web.htmlescape(results[0][1])
        code.say('WHY DON\'T YOU EAT SOME F*****G {b}%s{b}. HERE IS THE RECIPE: %s' % (
            food.upper(), url))
    except:
        return code.say(err)
Пример #13
0
def dinner(code, input):
    """fd -- WHAT DO YOU WANT FOR F*****G DINNER?"""
    err = '{red}EAT LEFT OVER PIZZA FOR ALL I CARE.'
    try:
        data = get(uri).read()
        results = re_mark.findall(data)
        if not results:
            return code.say(err)
        url, food = results[0][0], htmlescape(results[0][1])
        code.say(
            'WHY DON\'T YOU EAT SOME F*****G {b}%s{b}. HERE IS THE RECIPE: %s'
            % (food.upper(), url))
    except:
        return code.say(err)
Пример #14
0
def fml(code, input):
    """fml - Retrieve random FML's, via FMyLife.com's dev API."""
    # Random/No input
    if not input.group(2):
        try:
            r = fml_random()
            code.say('#{blue}%s{c} %s +{b}%s{b}/-{b}%s{b}' % (str(r['fml-id']),
                     web.htmlescape(r['fml']).replace('FML', '{red}FML{c}'), r['+'], r['-']))
        except:
            return code.say('{red}Failed to retrieve random FML.')
    elif input.group(2).startswith('#') and input.group(2).lstrip('#').isdigit():
        try:
            r = fml_id_search(input.group(2).lstrip('#'))
            code.say('#{blue}%s{c} %s +{b}%s{b}/-{b}%s{b}' % (str(r['fml-id']),
                     web.htmlescape(r['fml']).replace('FML', '{red}FML{c}'), r['+'], r['-']))
        except:
            return code.say('Failed to retrieve FML via ID.')
    # Input/Assume search query, with (possible) number at end indicating FML index
    else:
        msg = input.group(2).lower().strip()
        parts = msg.split()
        if parts[-1].replace('-', '').isdigit():
            if int(parts[-1]) <= 0:
                id = 1
            else:
                id = int(parts[-1].replace('-', ''))
            del parts[-1]
            query = '+'.join(parts)
        else:
            id = 1
            query = msg.replace(' ', '+')
        try:
            r = fml_search(query, id)
            code.say('(%s/%s) #{blue}%s{c} %s +{b}%s{b}/-{b}%s{b}' % (r['id'], r['max'], str(r['fml-id']),
                     web.htmlescape(r['fml']).replace('FML', '{red}FML{c}'), r['+'], r['-']))
        except:
            return code.say('Failed to search for FML.')
Пример #15
0
def wa(code, input):
    """Wolphram Alpha search"""
    query = input.group(2)
    uri = 'http://tumbolia.appspot.com/wa/'
    answer = urllib2.urlopen(uri + urllib.quote(query)).read()
    if answer and 'json stringified precioussss' not in answer:
        answer = answer.split(';')
        if len(answer) > 3:
            answer = answer[1]
        answer = '{purple}{b}WolphramAlpha: {c}{b}' + answer
        while '  ' in answer:
            answer = answer.replace('  ', ' ')
        return code.say(web.htmlescape(answer))
    else:
        return code.reply('{red}Sorry, no result.')
Пример #16
0
def fucking_weather(code, input):
    """fw (ZIP|City, State) -- provide a ZIP code or a city state pair to hear about the f*****g weather"""
    if not input.group(2):
        return code.say('{red}{b}INVALID F*****G INPUT. PLEASE ENTER A F*****G ZIP CODE, OR A F*****G CITY-STATE PAIR.')
    try:
        text = quote(input.group(2))
        data = get('http://thefuckingweather.com/?where=%s' % text).read()
        temp = re.compile(r'<p class="large"><span class="temperature" tempf=".*?">.*?</p>').findall(data)[0]
        temp = re.sub(r'\<.*?\>', '', temp).strip().replace(' ', '').replace('"', '')
        remark = re.compile(r'<p class="remark">.*?</p>').findall(data)[0]
        remark = re.sub(r'\<.*?\>', '', remark).strip()
        flavor = re.compile(r'<p class="flavor">.*?</p>').findall(data)[0]
        flavor = re.sub(r'\<.*?\>', '', flavor).strip()
        return code.say(web.htmlescape(temp) + ' ' + remark + '. ' + flavor)
    except:
        return code.say('{red}{b}I CAN\'T FIND THAT SHIT.')
Пример #17
0
def fml_random():
    """fml - Retrieve random FML's, via FMyLife.com's dev API."""
    try:
        r = web.get('http://api.fmylife.com/view/random/1?language=%s&key=%s' % (
            language, key
        )).read()
    except:
        return
    fml = re.compile(r'<text>.*?</text>').findall(r)
    fmlid = re.compile(r'<item id=".*?">').findall(r)
    agree = re.compile(r'<agree>.*?</agree>').findall(r)
    deserved = re.compile(r'<deserved>.*?</deserved>').findall(r)
    return {
        'fml': web.htmlescape(web.striptags(fml[0]).strip()),
        'fml-id': fmlid[0].replace('<item id="', '', 1).replace('">', '', 1).strip(),
        '+': web.striptags(agree[0]).strip(),
        '-': web.striptags(deserved[0]).strip()
    }
Пример #18
0
def fml_random():
    """fml - Retrieve random FML's, via FMyLife.com's dev API."""
    try:
        r = web.get('http://api.fmylife.com/view/random/1?language=%s&key=%s' % (
            language, key
        )).read()
    except:
        return
    fml = re.compile(r'<text>.*?</text>').findall(r)
    fmlid = re.compile(r'<item id=".*?">').findall(r)
    agree = re.compile(r'<agree>.*?</agree>').findall(r)
    deserved = re.compile(r'<deserved>.*?</deserved>').findall(r)
    return {
        'fml': web.htmlescape(web.striptags(fml[0]).strip()),
        'fml-id': fmlid[0].replace('<item id="', '', 1).replace('">', '', 1).strip(),
        '+': web.striptags(agree[0]).strip(),
        '-': web.striptags(deserved[0]).strip()
    }
Пример #19
0
def fml_id_search(query_id):
    """fml - Retrieve the FML in accordance with the assigned ID, via FMyLife.com's dev API."""
    try:
        r = web.get('http://api.fmylife.com/view/%s/nocomment?language=%s&key=%s' % (
            str(query_id),
            language, key
        )).read()
    except:
        return
    fml = re.compile(r'<text>.*?</text>').findall(r)
    fmlid = re.compile(r'<item id=".*?">').findall(r)
    agree = re.compile(r'<agree>.*?</agree>').findall(r)
    deserved = re.compile(r'<deserved>.*?</deserved>').findall(r)
    return {
        'fml': web.htmlescape(web.striptags(fml[0]).strip()),
        'fml-id': fmlid[0].replace('<item id="', '', 1).replace('">', '', 1).strip(),
        '+': web.striptags(agree[0]).strip(),
        '-': web.striptags(deserved[0]).strip()
    }
Пример #20
0
def fml_id_search(query_id):
    """fml - Retrieve the FML in accordance with the assigned ID, via FMyLife.com's dev API."""
    try:
        r = web.get('http://api.fmylife.com/view/%s/nocomment?language=%s&key=%s' % (
            str(query_id),
            language, key
        )).read()
    except:
        return
    fml = re.compile(r'<text>.*?</text>').findall(r)
    fmlid = re.compile(r'<item id=".*?">').findall(r)
    agree = re.compile(r'<agree>.*?</agree>').findall(r)
    deserved = re.compile(r'<deserved>.*?</deserved>').findall(r)
    return {
        'fml': web.htmlescape(web.striptags(fml[0]).strip()),
        'fml-id': fmlid[0].replace('<item id="', '', 1).replace('">', '', 1).strip(),
        '+': web.striptags(agree[0]).strip(),
        '-': web.striptags(deserved[0]).strip()
    }
Пример #21
0
def wa(code, input):
    """Wolfram Alpha search"""
    query = input.group(2)
    uri = 'http://tumbolia.appspot.com/wa/'
    try:
        answer = web.get(uri + web.quote(query), timeout=10).read()
    except:
        return code.say('It seems WolframAlpha took too long to respond!')

    if answer and 'json stringified precioussss' not in answer:
        answer = answer.strip('\n').split(';')
        for i in range(len(answer)):
            answer[i] = answer[i].replace('|', '').strip()
        answer = '{purple}{b}WolframAlpha: {c}{b}' + ' - '.join(answer).replace('\\', '').replace('->', ': ')
        while '  ' in answer:
            answer = answer.replace('  ', ' ')
        return code.say(web.htmlescape(answer))
    else:
        return code.reply('{red}Sorry, no result.')
Пример #22
0
def fucking_weather(code, input):
    """fw (ZIP|City, State) -- provide a ZIP code or a city state pair to hear about the f*****g weather"""
    if not input.group(2):
        return code.say(
            '{red}{b}INVALID F*****G INPUT. PLEASE ENTER A F*****G ZIP CODE, OR A F*****G CITY-STATE PAIR.'
        )
    try:
        text = quote(input.group(2))
        data = get('http://thefuckingweather.com/?where=%s' % text).read()
        temp = re.compile(
            r'<p class="large"><span class="temperature" tempf=".*?">.*?</p>'
        ).findall(data)[0]
        temp = re.sub(r'\<.*?\>', '',
                      temp).strip().replace(' ', '').replace('"', '')
        remark = re.compile(r'<p class="remark">.*?</p>').findall(data)[0]
        remark = re.sub(r'\<.*?\>', '', remark).strip()
        flavor = re.compile(r'<p class="flavor">.*?</p>').findall(data)[0]
        flavor = re.sub(r'\<.*?\>', '', flavor).strip()
        return code.say(web.htmlescape(temp) + ' ' + remark + '. ' + flavor)
    except:
        return code.say('{red}{b}I CAN\'T FIND THAT SHIT.')
Пример #23
0
def search(code, input):
    """Queries Google for the specified input."""
    r = google_search(input.group(2))
    if not r:
        return code.reply("Problem getting data from Google.")
    if not r['responseData']['results']:
        return code.reply("No results found for '{purple}%s{c}'." % input.group(2))
    urls = r['responseData']['results']
    if len(urls) > 3:
        urls = urls[0:3]

    count, time = r['responseData']['cursor']['resultCount'], r[
        'responseData']['cursor']['searchResultTime'] + 's'
    # Make the search count prettified
    count_commas = [m.start()
                    for m in re.finditer(r'{}'.format(re.escape(',')), count)]
    if len(count_commas) == 1:
        count = count.split(',', 1)[0] + 'k'
    elif len(count_commas) == 2:
        count = count.split(',', 1)[0] + 'm'
    elif len(count_commas) == 3:
        count = count.split(',', 1)[0] + 'b'

    output = []
    r_type = code.format('{b}{title}{b}{c} - {link}')
    colors, color_count = ['{blue}', '{teal}', '{green}'], 0
    for url in urls:
        # Change colors based on priority
        color = colors[color_count]
        color_count += 1
        # Remove html formatting
        title = web.striptags(web.htmlescape(url['title']))
        # Restrict sizing of titles to no longer than 50 chars
        if len(title) > 50:
            title = title[0:44] + '[...]'
        # Shorten URL to fit more responses cleaner
        link = url['url']
        output.append(color + r_type.format(title=title, link=link))
    code.say('%s ({b}%s{b}, {b}%s{b} results)' %
             (' | '.join(output), time, count))