Esempio n. 1
0
def daemon(code, tc):
    while True:
        time.sleep(auto_check)

        if code.debug:
            output.info('Running check for new tweets', 'TWITTER')
        # Here we do the work...
        for channel in tc:
            for tweet_item in tc[channel]:
                if tweet_item.startswith('#'):  # ID
                    data = get_tweets(uri_hash % web.quote(tweet_item))
                else:
                    data = get_tweets(uri_user % web.quote(tweet_item), tweet_item)
                if not data:
                    continue
                data = data[0]
                hash_str = hash(data['text'])
                db = database.get(code.default, 'twitter')
                if not db:  # New data on new database, don't output anywhere..
                    database.set(code.default, [hash_str], 'twitter')
                    continue
                if hash_str in db:
                    continue  # Same

                db.append(hash_str)
                database.set(code.default, db, 'twitter')
                msg = format(data)
                code.msg(channel, msg.decode('ascii', 'ignore'), shorten_urls=False)
            db = database.get(code.default, 'twitter')
            if db:
                if len(db) > 200:
                    db = db[-200:]
                    database.set(code.default, db, 'twitter')
Esempio n. 2
0
def twitter(code, input):
    """twitter <hashtag|username> - Return twitter results for search"""
    err = '{red}{b}Unabled to find any tweets with that search!'
    args = input.group(2).strip()
    if args.startswith('#'):
        data = get_tweets(uri_hash % web.quote(args))
        if not data:
            return code.say(err)
        return code.msg(input.sender, format(data[0]), shorten_urls=False)
    else:
        data = get_tweets(uri_user % web.quote(args.strip('@')))
        if not data:
            return code.say(err)
        return code.msg(input.sender, format(data[0]), shorten_urls=False)
    return code.say(err)
Esempio n. 3
0
def twitter(code, input):
    """twitter <hashtag|username> - Return twitter results for search"""
    err = '{red}{b}Unabled to find any tweets with that search!'
    args = input.group(2).strip()
    if args.startswith('#'):
        data = get_tweets(uri_hash % web.quote(args))
        if not data:
            return code.say(err)
        return code.msg(input.sender, format(data[0]), shorten_urls=False)
    else:
        data = get_tweets(uri_user % web.quote(args.strip('@')))
        if not data:
            return code.say(err)
        return code.msg(input.sender, format(data[0]), shorten_urls=False)
    return code.say(err)
Esempio n. 4
0
def check(ip):
    ip = str(ip)
    data = web.get(base % web.quote(ip)).read().replace('\n', '').replace('\r', '')
    items = re.compile(r'<div class="contain">.*?<p>(.*?)</p>').findall(data)
    if not items:
        return
    item = web.striptags(items[0])

    if 'We don\'t have data on this IP currently.' in item:
        return
    elif 'none of its visits have resulted' in item:
        return
    else:
        item = item.split('Below', 1)[0]

    if 'The Project Honey Pot system has ' in item:
        item = item.split('The Project Honey Pot system has ')[1]
        item = item[0].upper() + item[1:]

    if 'This IP has not seen any suspicious activity' in data:
        if 'the IP address' in item:
            item = item.replace('the IP address', '%s' % ip)
        output.warning(str(item) + 'This is an old record so it might be invalid.')
        return

    if 'the IP address' in item:
        item = item.replace('the IP address', '{red}%s{c}' % ip)

    return '{b}%s{b}' % item.strip()
Esempio n. 5
0
def google_search(query):
    """Search using Googles AjaxSearch functionality."""
    try:
        data = web.json(uri % web.quote(query))
        return data
    except:
        return False
Esempio n. 6
0
    def _search(cls, search_term, results=0):

        url = cls._search_url.format(
            API_KEY, web.quote(search_term.encode('UTF-8', 'ignore')))

        response = web.get(url=url, as_json=True, referer=REFERER)

        if len(response.errors) > 0:
            log.error(response.errors)
            return None
        else:
            if 'items' in response.json:
                tracks = []
                for i, item in enumerate(response.json['items']):
                    video_id = item['id']['videoId']
                    details = cls._details(video_id)

                    if details is not None:
                        tracks.append(details)

                        if results == 0 and len(tracks) == 1:
                            break

                        elif results > 0 and results == len(tracks):
                            break

                if results == 0 and len(tracks) > 0:
                    return tracks[0]

                return tracks

            return None
Esempio n. 7
0
File: steam.py Progetto: adiq/stah
def steam_app_auto(code, input):
    try:
        data = web.get('http://steamdb.info/app/%s/' %
                       web.quote(input.group(1)), timeout=10).read()
        output = []
        output.append(
            re.findall(r'<td>Name</td><td itemprop="name">(.*?)</td>', data)[0])  # Name
        # Metacritic Score
        score = re.findall(r'metacritic_score</td><td>(.*?)</td>', data)
        if len(score) < 1:
            score = '{b}N/A{b}'
        else:
            score = score[0]
        output.append('Rating: %s/100' % score)

        # Released yet?
        if '<td class="span3">releasestate</td><td>prerelease</td>' in data:
            output.append('{blue}Prerelease{c}')

        # OS List
        if '<td class="span3">oslist</td>' in data:
            tmp = re.findall(
                r'<tr><td class="span3">oslist</td><td>(.*?)</td></tr>', data)[0]
            tmp = re.findall(r'title="(.*?)"', tmp)
            output.append('OS: ' + ', '.join(tmp))
        else:
            output.append('OS: N/A')

        # With pricing, there are a few options...
        # 1. Free, 2. Cost, 3. Cost with discount
        # As well, 1. Not released (May cause issues with rendering the price
        # table) or 2. released

        if 'isfreeapp</td><td>Yes</td>' in data:
            # We know it's free!
            output.append('{green}Free{c}')
        elif '<table class="table table-prices">' in data:
            tmp = re.findall(
                r'<table class="table table-prices">.*?<tbody><tr>(.*?)</tr></tbody>', data)[0]
            tmp = tmp.replace('<td>', '').split('</td>', 1)[0]
            # We know it's paid... now check if discounted..
            if 'price-discount' in tmp:
                # We know it's discounted
                initial = tmp.split(
                    'class="price-initial">', 1)[1].split('</span>', 1)[0]
                new = tmp.split('</span>', 1)[1].split('<', 1)[0]
                discount = tmp.split(
                    '"price-discount">', 1)[1].split('<', 1)[0]
                output.append('{green}%s{c} (%s, was %s)' %
                              (new, discount, initial))
            else:
                output.append('{green}' + tmp)

        output.append('http://store.steampowered.com/app/%s/' %
                      re.findall(r'<td class="span3">App ID</td><td>(.*?)</td>', data)[0])
        # if else, it's unknown, so ignore it. Likely an issues with release
        # pricing.
        return str(' - {b}'.join(output).replace(': ', ': {b}'))
    except:
        return
Esempio n. 8
0
def define(code, input):
    try:
        data = web.json(uri % web.quote(input.group(2)))[0]
    except:
        return code.reply('{red}Failed to get definition!')

    # Go through filters to remove extra stuff that's not needed.
    word = data['html']
    word = web.striptags(word).strip()
    word = web.htmlescape(word)
    word = word.replace('\\n', '').replace('\n', '')
    while '  ' in word:
        word = word.replace('  ', ' ')

    word = word.encode('ascii', 'ignore')
    if 'is not in the dictionary.' in word:
        return code.say('Definition for {b}%s{b} not found' % input.group(2))

    # Everything below here is for colors only
    word = '{b}{purple}%s{c}{b}: %s' % (data['query'],
                                        word[len(data['query']) + 1::])
    word = word.replace('(', '{purple}{b}(').replace(')', '){b}{c}')
    if len(word) > 250:
        word = word[:245] + '{c}{b}[...]'
    code.say(word)
Esempio n. 9
0
def define(code, input):
    try:
        data = web.json(uri % web.quote(input.group(2)))[0]
    except:
        return code.reply('{red}Failed to get definition!')

    # Go through filters to remove extra stuff that's not needed.
    word = data['html']
    word = web.striptags(word).strip()
    word = web.htmlescape(word)
    word = word.replace('\\n', '').replace('\n', '')
    while '  ' in word:
        word = word.replace('  ', ' ')

    word = word.encode('ascii', 'ignore')
    if 'is not in the dictionary.' in word:
        return code.say('Definition for {b}%s{b} not found' % input.group(2))

    # Everything below here is for colors only
    word = '{b}{purple}%s{c}{b}: %s' % (
        data['query'], word[len(data['query']) + 1::])
    word = word.replace('(', '{purple}{b}(').replace(')', '){b}{c}')
    if len(word) > 250:
        word = word[:245] + '{c}{b}[...]'
    code.say(word)
Esempio n. 10
0
def check(ip):
    ip = str(ip)
    data = web.text(base % web.quote(ip)).replace('\n', '').replace('\r', '')
    items = re.compile(r'<div class="contain">.*?<p>(.*?)</p>').findall(data)
    if not items:
        return
    item = web.striptags(items[0])

    if 'We don\'t have data on this IP currently.' in item:
        return
    elif 'none of its visits have resulted' in item:
        return
    else:
        item = item.split('Below', 1)[0]

    if 'The Project Honey Pot system has ' in item:
        item = item.split('The Project Honey Pot system has ')[1]
        item = item[0].upper() + item[1:]

    if 'This IP has not seen any suspicious activity' in data:
        if 'the IP address' in item:
            item = item.replace('the IP address', '%s' % ip)
        output.warning(
            str(item) + 'This is an old record so it might be invalid.')
        return

    if 'the IP address' in item:
        item = item.replace('the IP address', '{red}%s{c}' % ip)

    if 'Double check your URL to make sure this error' in item:
        return
    return '{b}%s{b}' % item.strip()
Esempio n. 11
0
def translate(text, input='auto', output='en'):
    raw = False
    if output.endswith('-raw'):
        output = output[:-4]
        raw = True

    uri = 'https://translate.google.com/translate_a/t'
    params = {
        'sl': web.quote(input),
        'tl': web.quote(output),
        'js': 'n',
        'prev': '_t',
        'hl': 'en',
        'ie': 'UTF-8',
        'text': web.quote(text),
        'client': 't',
        'multires': '1',
        'sc': '1',
        'uptl': 'en',
        'tsel': '0',
        'ssel': '0',
        'otf': '1',
    }

    result = web.text(uri, params=params)

    # this is hackish
    # this makes the returned data parsable by the json module
    result = result.replace(',,', ',').replace('[,', '["",')

    while ',,' in result:
        result = result.replace(',,', ',null,')
    data = json.loads(result)

    if raw:
        return str(data), 'en-raw'

    try:
        language = data[2]
    except:
        language = '?'

    if isinstance(language, list):
        language = data[-2][0][0]

    return ''.join(x[0] for x in data[0]), language
Esempio n. 12
0
def tag_search(search_str, by_id=True, max_tunes=40):
    """
    Search last.fm for tunes matching the search term
    and turns them in to a youtube list of Tracks objects.

    :param search_str: Search term to search for.
    :type search_str: str
    :param by_id: If True, only tunes that have a youtube id will be added(recommended)
    :type by_id: bool
    :param max_tunes: The max amount of tunes to return.
    :type max_tunes: int
    :return: A list of Track objects.
    :rtype list | None
    """
    url = TAG_SEARCH_URL.format(max_tunes, web.quote(search_str))
    lastfm = web.get(url=url, as_json=True)

    log.debug('lastfm response %s' % lastfm.json)

    if len(lastfm.errors) > 0:
        log.error(lastfm.errors)
        return None
    else:
        if 'track' in lastfm.json['results']:
            if len(lastfm.json['results']['track']) is not 0:
                yt_tracks = []

                for track in lastfm.json['results']['track']:
                    search_str = '%s-%s' % (track['artist'], track['name'])
                    if 'playlink' in track:

                        if 'data-youtube-id' in track['playlink']:
                            youtube_id = track['playlink']['data-youtube-id']
                            yt = Youtube.id_details(youtube_id)
                            log.debug(yt)
                            if yt is not None:
                                yt_tracks.append(yt)

                        else:
                            if not by_id:
                                yt = Youtube.search(search_str)
                                log.debug(
                                    'search by search string: %s result: %s' %
                                    (search_str, yt))
                                if yt is not None:
                                    yt_tracks.append(yt)

                    else:
                        if not by_id:
                            yt = Youtube.search(search_str)
                            log.debug(
                                'search by search string: %s result: %s' %
                                (search_str, yt))
                            if yt is not None:
                                yt_tracks.append(yt)

                return yt_tracks
Esempio n. 13
0
def calc(code, input):
    try:
        data = web.json(uri % web.quote(input.group(2).replace('^', '**')))
        if data['AnswerType'] != 'calc':
            return code.reply('Failed to calculate')
        answer = web.striptags(data['Answer'])
        return code.say(answer)
    except:
        return code.reply('Failed to calculate!')
Esempio n. 14
0
def location(name):
    name = web.quote(name)
    data = web.json(
        'http://ws.geonames.org/searchJSON?q=%s&maxRows=1&username=%s' % (name, user))
    try:
        name = data['geonames'][0]['name']
    except IndexError:
        return None, None, None, None
    country = data['geonames'][0]['countryName']
    lat = data['geonames'][0]['lat']
    lng = data['geonames'][0]['lng']
    return name, country, lat, lng
Esempio n. 15
0
def urban(code, input):
    # clean and split the input
    try:
        if input.group(2):
            msg = input.group(2).lower().strip()
            tmp = msg.replace('-', '').split()
            if tmp[-1].isdigit():
                if int(tmp[-1]) <= 0:
                    id = 0
                else:
                    id = int(tmp[-1].replace('-', '')) - 1
                del tmp[-1]
                msg = ' '.join(tmp)
            else:
                id = 0
            data = web.json(uri % web.quote(msg))['list']
            if not data:
                return code.reply(error)
            max = len(data)
            if id > max:
                id = max
                data = data[max - 1]
            else:
                data = data[id]
                id += 1
            msg = '({purple}{id}{c} of {purple}{max}{c}) "{purple}{word}{c}": {definition} +{red}{up}{c}/-{red}{down}{c}'
            if len(data['definition']) > 235:
                data['definition'] = data['definition'][0:235] + '[...]'
            return code.say(
                code.format(msg).format(id=str(id),
                                        max=str(max),
                                        definition=strp(data['definition']),
                                        word=data['word'],
                                        up=str(data['thumbs_up']),
                                        down=str(data['thumbs_down'])))
            # Begin trying to get the definition
        else:
            # Get a random definition...
            data = web.json(random_uri)['list'][0]
            if not data:
                return code.reply(error)
            msg = '(Definition for "{purple}{word}{c}"): {definition} +{red}{up}{c}/-{red}{down}{c}'
            if len(data['definition']) > 235:
                data['definition'] = data['definition'][0:235] + '[...]'
            return code.say(
                code.format(msg).format(definition=strp(data['definition']),
                                        word=data['word'],
                                        up=str(data['thumbs_up']),
                                        down=str(data['thumbs_down'])))
    except:
        return code.reply(
            '{red}{b}Failed to pull definition from urbandictionary.com!')
Esempio n. 16
0
def location(name):
    name = web.quote(name)
    data = web.json(
        'http://ws.geonames.org/searchJSON?q=%s&maxRows=1&username=%s' %
        (name, user))
    try:
        name = data['geonames'][0]['name']
    except IndexError:
        return None, None, None, None
    country = data['geonames'][0]['countryName']
    lat = data['geonames'][0]['lat']
    lng = data['geonames'][0]['lng']
    return name, country, lat, lng
Esempio n. 17
0
File: calc.py Progetto: HeyMan7/Code
def py(code, input):
    """python <commands> -- Execute Python inside of a sandbox"""
    query = input.group(2).encode('utf-8')
    try:
        answer = web.text(py_uri + web.quote(query))
        if answer:
            answer = answer.replace('\n', ' ').replace(
                '\t', ' ').replace('\r', '')
            return code.reply(answer)
        else:
            return code.reply('Sorry, no {b}%s{b}')
    except:
        return code.reply('{red}The server did not return an answer.')
Esempio n. 18
0
def steam_app_auto(code, input):
    data = web.text('http://steamdb.info/app/%s/' % web.quote(input.group(1)),
                    timeout=10)
    output = []
    output.append(
        re.findall(r'<td>Name</td><td itemprop="name">(.*?)</td>',
                   data)[0])  # Name

    # Metacritic Score
    score = re.findall(r'metacritic_score</td><td>(.*?)</td>', data)
    if len(score) < 1:
        output.append('Rating: N/A')
    else:
        output.append('Rating: %s/100' % score[0])

    # Released yet?
    if re.search(r'(?im)<td .*?>releasestate</td><td>prerelease</td>', data):
        output.append('{blue}Prerelease{c}')

    # OS List
    if '<td class="span3">oslist</td>' in data:
        tmp = re.findall(
            r'<tr><td class="span3">oslist</td><td>(.*?)</td></tr>', data)[0]
        tmp = re.findall(r'title="(.*?)"', tmp)
        output.append('OS: ' + ', '.join(tmp))
    else:
        output.append('OS: N/A')

    # With pricing, there are a few options...
    # 1. Free, 2. Cost, 3. Cost with discount
    # As well, 1. Not released (May cause issues with rendering the price
    # table) or 2. released
    if re.search(r'(?im)<td .*?>isfreeapp</td>.*?<td>Yes</td>', data):
        output.append('{green}Free{c}')
    else:
        tmp = re.findall(  # e.g. $19.99 at -20%
            r'<img .*? alt="us".*?> U.S. Dollar</td><td .*?>(?P<price>.*?)</td>'
            + '<td .*?>Base Price</td><td .*?>(?P<lowest>.*?)</td></tr>',
            data)[0][0]
        tmp = re.sub(r'^(?P<price>\$[0-9,.-]{2,6})$', r'{green}\g<price>{c}',
                     tmp)
        tmp = re.sub(
            r'(?P<price>\$[0-9,.-]{2,6}) at (?P<discount>\-[0-9.]{1,3}\%)',
            r'{green}\g<price>{c} ({red}\g<discount>{c})', web.striptags(tmp))
        output.append(tmp)

    output.append(
        'http://store.steampowered.com/app/%s/' %
        re.findall(r'<td class="span3">App ID</td><td>(.*?)</td>', data)[0])

    return str(' - {b}'.join(output).replace(': ', ': {b}'))
Esempio n. 19
0
def py(code, input):
    """python <commands> -- Execute Python inside of a sandbox"""
    query = input.group(2).encode('utf-8')
    uri = 'http://tumbolia.appspot.com/py/'
    try:
        answer = web.get(uri + web.quote(query)).read()
        if answer:
            answer = answer.replace('\n', ' ').replace(
                '\t', ' ').replace('\r', '')
            return code.reply(answer)
        else:
            return code.reply('Sorry, no {b}%s{b}')
    except:
        return code.reply('{red}The server did not return an answer.')
Esempio n. 20
0
def wikiSearch(query, url, results=5):
    """Use MediaWikis API to search for values from wiktionary and wikipedia"""
    # First, we need to grab the data, and serialize it in JSON
    url_query = web.quote(query)
    data = web.json(full_search % (lang, url, url_query))

    # Check to see if we have #(results as arg form) results, then make a list
    if not data[1]:
        return False
    if len(data[1]) > results:
        return data[1][:results]
    else:
        # Assume it's smaller than or = 5
        return data[1]
Esempio n. 21
0
def daemon(code, tc):
    while True:
        time.sleep(auto_check)

        if code.debug:
            output.info('Running check for new tweets', 'TWITTER')
        # Here we do the work...
        for channel in tc:
            for tweet_item in tc[channel]:
                if tweet_item.startswith('#'):  # ID
                    data = get_tweets(uri_hash % web.quote(tweet_item))
                else:
                    data = get_tweets(uri_user % web.quote(tweet_item),
                                      tweet_item)
                if not data:
                    continue
                data = data[0]
                hash_str = hash(data['text'])
                db = database.get(code.default, 'twitter')
                if not db:  # New data on new database, don't output anywhere..
                    database.set(code.default, [hash_str], 'twitter')
                    continue
                if hash_str in db:
                    continue  # Same

                db.append(hash_str)
                database.set(code.default, db, 'twitter')
                msg = format(data)
                code.msg(channel,
                         msg.decode('ascii', 'ignore'),
                         shorten_urls=False)
            db = database.get(code.default, 'twitter')
            if db:
                if len(db) > 200:
                    db = db[-200:]
                    database.set(code.default, db, 'twitter')
Esempio n. 22
0
def urban(code, input):
    # clean and split the input
    try:
        if input.group(2):
            msg = input.group(2).lower().strip()
            tmp = msg.replace('-', '').split()
            if tmp[-1].isdigit():
                if int(tmp[-1]) <= 0:
                    id = 0
                else:
                    id = int(tmp[-1].replace('-', '')) - 1
                del tmp[-1]
                msg = ' '.join(tmp)
            else:
                id = 0
            data = web.json(uri % web.quote(msg))['list']
            if not data:
                return code.reply(error)
            max = len(data)
            if id > max:
                id = max
                data = data[max - 1]
            else:
                data = data[id]
                id += 1
            msg = '({purple}{id}{c} of {purple}{max}{c}) "{purple}{word}{c}": {definition} +{red}{up}{c}/-{red}{down}{c}'
            if len(data['definition']) > 235:
                data['definition'] = data['definition'][0:235] + '[...]'
            return code.say(code.format(msg).format(
                id=str(id), max=str(max), definition=strp(data['definition']),
                word=data['word'], up=str(data['thumbs_up']), down=str(data['thumbs_down'])
            ))
            # Begin trying to get the definition
        else:
            # Get a random definition...
            data = web.json(random_uri)['list'][0]
            if not data:
                return code.reply(error)
            msg = '(Definition for "{purple}{word}{c}"): {definition} +{red}{up}{c}/-{red}{down}{c}'
            if len(data['definition']) > 235:
                data['definition'] = data['definition'][0:235] + '[...]'
            return code.say(code.format(msg).format(
                definition=strp(data['definition']), word=data['word'],
                up=str(data['thumbs_up']), down=str(data['thumbs_down'])
            ))
    except:
        return code.reply('{red}{b}Failed to pull definition from urbandictionary.com!')
Esempio n. 23
0
def steam_app_auto(code, input):
    data = web.text('http://steamdb.info/app/%s/' % web.quote(input.group(1)), timeout=10)
    output = []
    output.append(
        re.findall(r'<td>Name</td><td itemprop="name">(.*?)</td>', data)[0])  # Name

    # Metacritic Score
    score = re.findall(r'metacritic_score</td><td>(.*?)</td>', data)
    if len(score) < 1:
        output.append('Rating: N/A')
    else:
        output.append('Rating: %s/100' % score[0])

    # Released yet?
    if re.search(r'(?im)<td .*?>releasestate</td><td>prerelease</td>', data):
        output.append('{blue}Prerelease{c}')

    # OS List
    if '<td class="span3">oslist</td>' in data:
        tmp = re.findall(
            r'<tr><td class="span3">oslist</td><td>(.*?)</td></tr>', data)[0]
        tmp = re.findall(r'title="(.*?)"', tmp)
        output.append('OS: ' + ', '.join(tmp))
    else:
        output.append('OS: N/A')

    # With pricing, there are a few options...
    # 1. Free, 2. Cost, 3. Cost with discount
    # As well, 1. Not released (May cause issues with rendering the price
    # table) or 2. released
    if re.search(r'(?im)<td .*?>isfreeapp</td>.*?<td>Yes</td>', data):
        output.append('{green}Free{c}')
    else:
        tmp = re.findall(  # e.g. $19.99 at -20%
            r'<img .*? alt="us".*?> U.S. Dollar</td><td .*?>(?P<price>.*?)</td>' +
            '<td .*?>Base Price</td><td .*?>(?P<lowest>.*?)</td></tr>', data)[0][0]
        tmp = re.sub(r'^(?P<price>\$[0-9,.-]{2,6})$', r'{green}\g<price>{c}', tmp)
        tmp = re.sub(
            r'(?P<price>\$[0-9,.-]{2,6}) at (?P<discount>\-[0-9.]{1,3}\%)',
            r'{green}\g<price>{c} ({red}\g<discount>{c})', web.striptags(tmp))
        output.append(tmp)

    output.append('http://store.steampowered.com/app/%s/' %
                  re.findall(r'<td class="span3">App ID</td><td>(.*?)</td>', data)[0])

    return str(' - {b}'.join(output).replace(': ', ': {b}'))
Esempio n. 24
0
def fucking_weather(code, input):
    """fw (ZIP|City, State) -- provide a ZIP code or a city state pair to hear about the f*****g weather"""
    if not input.group(2):
        return code.say('{red}{b}INVALID F*****G INPUT. PLEASE ENTER A F*****G ZIP CODE, OR A F*****G CITY-STATE PAIR.')
    try:
        text = web.quote(input.group(2))
        data = web.get('http://thefuckingweather.com/?where=%s' % text).read()
        temp = re.compile(
            r'<p class="large"><span class="temperature" tempf=".*?">.*?</p>').findall(data)[0]
        temp = re.sub(r'\<.*?\>', '', temp).strip().replace(' ',
                                                            '').replace('"', '')
        remark = re.compile(r'<p class="remark">.*?</p>').findall(data)[0]
        remark = re.sub(r'\<.*?\>', '', remark).strip()
        flavor = re.compile(r'<p class="flavor">.*?</p>').findall(data)[0]
        flavor = re.sub(r'\<.*?\>', '', flavor).strip()
        return code.say(web.htmlescape(temp) + ' ' + remark + '. ' + flavor)
    except:
        return code.say('{red}{b}I CAN\'T FIND THAT SHIT.')
Esempio n. 25
0
def fucking_weather(code, input):
    """fw (ZIP|City, State) -- provide a ZIP code or a city state pair to hear about the f*****g weather"""
    if not input.group(2):
        return code.say('{red}{b}INVALID F*****G INPUT. PLEASE ENTER A F*****G ZIP CODE, OR A F*****G CITY-STATE PAIR.')
    try:
        args = {
            "where": web.quote(input.group(2))
        }
        data = web.text('http://thefuckingweather.com/', params=args)
        temp = re.compile(
            r'<p class="large"><span class="temperature" tempf=".*?">.*?</p>').findall(data)[0]
        temp = web.striptags(temp).replace(' ', '').replace('"', '')
        remark = re.compile(r'<p class="remark">.*?</p>').findall(data)[0]
        remark = re.sub(r'\<.*?\>', '', remark).strip()
        flavor = re.compile(r'<p class="flavor">.*?</p>').findall(data)[0]
        flavor = re.sub(r'\<.*?\>', '', flavor).strip()
        return code.say('%s {b}%s{b}. %s' % (web.escape(temp), remark, flavor))
    except:
        return code.say('{red}{b}I CAN\'T FIND THAT SHIT.')
Esempio n. 26
0
def wa(code, input):
    """Wolfram Alpha search - It's slow. """
    query = input.group(2)
    uri = 'http://tumbolia.appspot.com/wa/'
    try:
        answer = web.text(uri + web.quote(query), timeout=14)
    except:
        return code.say('It seems WolframAlpha took too long to respond!')

    if answer and 'json stringified precioussss' not in answer:
        answer = answer.strip('\n').split(';')
        for i in range(len(answer)):
            answer[i] = answer[i].replace('|', '').strip()
        answer = '{purple}{b}WolframAlpha: {c}{b}' + ' - '.join(answer).replace('\\', '').replace('->', ': ')
        while '  ' in answer:
            answer = answer.replace('  ', ' ')
        return code.say(web.escape(answer))
    else:
        return code.reply('{red}Sorry, no result.')
Esempio n. 27
0
def wa(code, input):
    """Wolfram Alpha search"""
    query = input.group(2)
    uri = 'http://tumbolia.appspot.com/wa/'
    try:
        answer = web.get(uri + web.quote(query), timeout=10).read()
    except:
        return code.say('It seems WolframAlpha took too long to respond!')

    if answer and 'json stringified precioussss' not in answer:
        answer = answer.strip('\n').split(';')
        for i in range(len(answer)):
            answer[i] = answer[i].replace('|', '').strip()
        answer = '{purple}{b}WolframAlpha: {c}{b}' + ' - '.join(answer).replace('\\', '').replace('->', ': ')
        while '  ' in answer:
            answer = answer.replace('  ', ' ')
        return code.say(web.htmlescape(answer))
    else:
        return code.reply('{red}Sorry, no result.')
Esempio n. 28
0
def movie_search(code, input):
    """imdb movie/show title -- displays information about a production"""
    try:
        # Url-ify
        search = web.quote(input.group(2).strip())

        # Pull response from API, and load into a JSON based dict()
        data = web.json(search_uri % search)

        # If we get an error from the API. (Other errors are caught from the
        # try:;except:)
        if data['Response'] == 'False':
            return code.reply(error)

        # Start creating a response
        response = build_response(data)
        output = []
        for section in response:
            output.append('{blue}%s{c}: %s' % (section[0], section[1]))
        return code.say(' | '.join(output))
    except:
        return code.reply(error)
Esempio n. 29
0
    def playlist_search(cls, search_term, results=5):
        """
        Search for a playlist matching the search term.

        :param search_term: The search term to search for.
        :type search_term: str
        :param results: The amount of playlist to return.
        :type results: int
        :return: A list containing `playlist_title` and `playlist_id`.
        :rtype: list | None
        """
        url = cls._playlist_search_url.format(
            API_KEY, web.quote(search_term.encode('UTF-8', 'ignore')))

        response = web.get(url=url, as_json=True, referer=REFERER)

        if len(response.errors) > 0:
            log.error(response.errors)
            return None
        else:
            if 'items' in response.json:
                play_lists = []

                for i, item in enumerate(response.json['items']):

                    playlist_id = item['id']['playlistId']
                    playlist_title = item['snippet']['title']  #

                    play_list_info = {
                        'playlist_title': playlist_title,
                        'playlist_id': playlist_id
                    }
                    play_lists.append(play_list_info)

                    if i == results - 1:
                        break

                return play_lists
Esempio n. 30
0
def wikiDefine(term, url):
    """Use MediaWikis API to define a value from wiktionary and wikipedia"""
    # First, we need to grab the data, and serialize it in JSON
    url_query = web.quote(term)
    data = web.json(full_define % (lang, url, maxlen, url_query))[
        'query']['pages']

    # We need to see if it was found. If it wasn't it'll be a -1 page
    for pageNumber, pageData in data.iteritems():
        if pageNumber == '-1':
            # Assume failed to find
            return False
        else:
            # Assume found a result. Now, find and return the title/contents.
            if pageData['extract'].startswith('REDIRECT'):
                # This means it's a redirect page according to MediaWiki API
                return False
            title = pageData['title']
            content = pageData['extract'].encode(
                'ascii', 'ignore').replace('\n', ' ')
            while '  ' in content:
                content = content.replace('  ', ' ')
            return [title, content]
Esempio n. 31
0
def define(code, input):
    try:
        data = web.json(uri.format(word=web.quote(input.group(2))))[0]
    except:
        return code.reply('{red}Failed to get definition!')

    # Go through filters to remove extra stuff that's not needed.
    word = data['html']
    word = web.striptags(word)
    word = web.escape(word)
    word = word.replace('\\n', '').replace('\n', '')
    while '  ' in word:
        word = word.replace('  ', ' ')

    word = word.encode('ascii', 'ignore')
    if len(word) > 380:
        word = word[:375] + '{c}{b}[...]'

    # loop through and replace all possible type names

    for name in highlight:
        name = ' {} '.format(name)
        if data['query'].lower().strip() == name.lower():
            continue
        tmp = re.findall(name, word, flags=re.IGNORECASE)
        for item in tmp:
            word = word.replace(item, " [{blue}{b}%s{b}{c}] " % item.strip())

    if 'is not in the dictionary.' in word:
        return code.say('Definition for {b}%s{b} not found' % input.group(2))

    name = data['query'][0].upper() + data['query'][1::]
    # Everything below here is for colors only
    word = '{b}{purple}%s{c}{b}: %s' % (name, word[len(data['query']) + 1::])
    word = word.replace('(', '{purple}{b}(').replace(')', '){b}{c}')
    code.say(word)
Esempio n. 32
0
def define(code, input):
    try:
        data = web.json(uri.format(word=web.quote(input.group(2))))[0]
    except:
        return code.reply('{red}Failed to get definition!')

    # Go through filters to remove extra stuff that's not needed.
    word = data['html']
    word = web.striptags(word)
    word = web.escape(word)
    word = word.replace('\\n', '').replace('\n', '')
    while '  ' in word:
        word = word.replace('  ', ' ')

    word = word.encode('ascii', 'ignore')
    if len(word) > 380:
        word = word[:375] + '{c}{b}[...]'

    # loop through and replace all possible type names

    for name in highlight:
        name = ' {} '.format(name)
        if data['query'].lower().strip() == name.lower():
            continue
        tmp = re.findall(name, word, flags=re.IGNORECASE)
        for item in tmp:
            word = word.replace(item, " [{blue}{b}%s{b}{c}] " % item.strip())

    if 'is not in the dictionary.' in word:
        return code.say('Definition for {b}%s{b} not found' % input.group(2))

    name = data['query'][0].upper() + data['query'][1::]
    # Everything below here is for colors only
    word = '{b}{purple}%s{c}{b}: %s' % (name, word[len(data['query']) + 1::])
    word = word.replace('(', '{purple}{b}(').replace(')', '){b}{c}')
    code.say(word)
Esempio n. 33
0
    def search(cls, search_term):
        """
        Search wikipedia APi for a search term.

        :param search_term: The search term to search for.
        :type search_term: str
        :return: A WikiResponse.
        :rtype: WikiResponse
        """
        search = web.quote(search_term.encode('UTF-8', 'ignore'))
        url = cls.base_url.format('&action=opensearch&search=%s&redirects=resolve' % search)
        response = web.get(url=url, as_json=True)

        if len(response.errors) > 0:
            log.debug(response.errors)
        else:
            # response.json is always 4, even if there is no such thing

            # response.json[0] is the search term
            # response.json[1] is a list a possible response titles
            # response.json[2] is a list of summaries
            # response.json[3] is a list of links to the actual wikipedia pages

            return WikiResponse(response.json)