Esempio n. 1
0
File: search.py Progetto: J3RN/jenni
def duck_api(query):
    '''Send 'query' to DDG's API and return results as a dictionary'''
    #query = web.urllib.quote(query)
    uri = 'https://api.duckduckgo.com/?q=%s&format=json&no_html=1&no_redirect=1&kp=-1' % query
    results = proxy.get(uri)
    results = json.loads(results)
    return results
Esempio n. 2
0
def isup(jenni, input):
    '''isup.me website status checker'''
    site = input.group(2)
    if not site:
        return jenni.reply('What site do you want to check?')
    if ' ' in site:
        idx = site.find(' ')
        site = site[:idx+1]
    site = (site).strip()

    if site[:7] != 'http://' and site[:8] != 'https://':
        if '://' in site:
            protocol = site.split('://')[0] + '://'
            return jenni.reply('Try it again without the %s' % protocol)
        else:
            site = 'http://' + site
    try:
        response = proxy.get(site)
    except Exception as e:
        jenni.say(site + ' looks down from here.')
        return

    if response:
        jenni.say(site + ' looks fine to me.')
    else:
        jenni.say(site + ' is down from here.')
Esempio n. 3
0
def btc_coinbase_page():
    try:
        page = proxy.get(
            'https://coinbase.com/api/v1/currencies/exchange_rates')
    except Exception, e:
        print dt.datetimenow.now(), e
        return False, 'Failed to reach coinbase.com'
Esempio n. 4
0
def isup(jenni, input):
    '''isup.me website status checker'''
    site = input.group(2)
    if not site:
        return jenni.reply('What site do you want to check?')
    if ' ' in site:
        idx = site.find(' ')
        site = site[:idx + 1]
    site = (site).strip()

    if site[:7] != 'http://' and site[:8] != 'https://':
        if '://' in site:
            protocol = site.split('://')[0] + '://'
            return jenni.reply('Try it again without the %s' % protocol)
        else:
            site = 'http://' + site
    try:
        response = proxy.get(site)
    except Exception as e:
        jenni.say(site + ' looks down from here.')
        return

    if response:
        jenni.say(site + ' looks fine to me.')
    else:
        jenni.say(site + ' is down from here.')
Esempio n. 5
0
def duck_api(query):
    '''Send 'query' to DDG's API and return results as a dictionary'''
    #query = web.urllib.quote(query)
    uri = 'https://api.duckduckgo.com/?q=%s&format=json&no_html=1&no_redirect=1&kp=-1' % query
    results = proxy.get(uri)
    results = json.loads(results)
    return results
Esempio n. 6
0
File: search.py Progetto: J3RN/jenni
def duck_search(query):
    '''Do a DuckDuckGo Search'''

    ## grab results from the API for the query
    duck_api_results = duck_api(query)

    ## output is a string of the URL result

    ## try to find the first result
    if 'Results' in duck_api_results and min_size('Results', duck_api_results):
        ## 'Results' is the most common place to look for the first result
        output = duck_api_results['Results'][0]['FirstURL']
    elif 'AbstractURL' in duck_api_results and min_size('AbstractURL', duck_api_results):
        ## if there is no 'result', let's try AbstractURL
        ## this is usually a wikipedia article
        output = duck_api_results['AbstractURL']
    elif 'RelatedTopics' in duck_api_results and min_size('RelatedTopics', duck_api_results):
        ## if we still can't find a search result, let's grab a topic URL
        ## this is usually vaguely related to the search query
        ## many times this is a wikipedia result
        for topic in duck_api_results['RelatedTopics']:
            output = '%s - %s' % (topic['Name'], topic['Topics'][0]['FirstURL'])
            if 'duckduckgo.com' in output:
                ## as a last resort, DuckDuckGo will provide links to the query on its site
                ## it doesn't appear to ever return a https URL
                output = output.replace('http://', 'https://')
            break
    else:
        ## if we still can't find a search result via the API
        ## let's try scraping the html page
        uri = 'https://duckduckgo.com/html/?q=%s&kl=us-en&kp=-1' % query #web.urllib.quote(query)
        #page = web.get(uri)
        page = proxy.get(uri)

        r_duck = re.compile(r'nofollow" class="[^"]+" href="(.*?)">')

        bad_results = ['/y.js?', '//ad.ddg.gg/', '.msn.com/', 'r.search.yahoo.com/',]
        m = r_duck.findall(page)
        output = str()
        if m:
            for result in m:
                valid_result = True
                for each in bad_results:
                    if each in result:
                        valid_result = False
                if valid_result:
                    output = result
                    break
        else:
            ## if we absolustely can't find a URL, let's try scraping the HTML
            ## page for a zero_click info
            return((duck_zero_click_scrape(page), False))

    return((duck_sanitize(output), True))
Esempio n. 7
0
File: pun.py Progetto: MrRaph/jenni
def puns(jenni, input):
    url = 'http://www.punoftheday.com/cgi-bin/randompun.pl'
    exp = re.compile(r'<div class="dropshadow1">\n<p>(.*?)</p>\n</div>')
    page = proxy.get(url)

    result = exp.search(page)
    if result:
        pun = result.groups()[0]
        jenni.say(pun)
    else:
        jenni.say("I'm afraid I'm not feeling punny today!")
Esempio n. 8
0
def duck_search(query):
    '''Do a DuckDuckGo Search'''

    ## grab results from the API for the query
    duck_api_results = duck_api(query)

    ## output is a string of the URL result

    ## try to find the first result
    if 'Results' in duck_api_results and min_size('Results', duck_api_results):
        ## 'Results' is the most common place to look for the first result
        output = duck_api_results['Results'][0]['FirstURL']
    elif 'AbstractURL' in duck_api_results and min_size('AbstractURL', duck_api_results):
        ## if there is no 'result', let's try AbstractURL
        ## this is usually a wikipedia article
        output = duck_api_results['AbstractURL']
    elif 'RelatedTopics' in duck_api_results and min_size('RelatedTopics', duck_api_results):
        ## if we still can't find a search result, let's grab a topic URL
        ## this is usually vaguely related to the search query
        ## many times this is a wikipedia result
        for topic in duck_api_results['RelatedTopics']:
            output = '%s - %s' % (topic['Name'], topic['Topics'][0]['FirstURL'])
            if 'duckduckgo.com' in output:
                ## as a last resort, DuckDuckGo will provide links to the query on its site
                ## it doesn't appear to ever return a https URL
                output = output.replace('http://', 'https://')
            break
    else:
        ## if we still can't find a search result via the API
        ## let's try scraping the html page
        uri = 'https://duckduckgo.com/html/?q=%s&kl=us-en&kp=-1' % web.urllib.quote(query)
        page = proxy.get(uri)

        r_duck = re.compile(r'nofollow" class="[^"]+" href="(.*?)">')

        bad_results = ['/y.js?', '//ad.ddg.gg/', '.msn.com/', 'r.search.yahoo.com/',]
        m = r_duck.findall(page)
        output = str()
        if m:
            for result in m:
                valid_result = True
                for each in bad_results:
                    if each in result:
                        valid_result = False
                if valid_result:
                    output = result
                    break
        else:
            ## if we absolustely can't find a URL, let's try scraping the HTML
            ## page for a zero_click info
            return((duck_zero_click_scrape(page), False))

    return((duck_sanitize(output), True))
Esempio n. 9
0
def excuse(jenni, input):
    a = re.compile('<a [\s\S]+>(.*)</a>')

    try:
        page = proxy.get('http://programmingexcuses.com/')
    except:
        return jenni.say("I'm all out of excuses!")

    results = a.findall(page)

    if results:
        result = results[0]
        result = result.strip()
        if result[-1] not in ['.', '?', '!']:
            result += '.'
        jenni.say(result)
    else:
        jenni.say("I'm too lazy to find an excuse.")
Esempio n. 10
0
def ytsearch(jenni, trigger):
    """Search YouTube"""
    #modified from ytinfo: Copyright 2010-2011, Michael Yanovich, yanovich.net, Kenneth Sham.
    if not hasattr(jenni.config, 'google_dev_apikey'):
        return jenni.say(
            'Please sign up for a Google Developer API key to use this function.'
        )
    key = jenni.config.google_dev_apikey

    query = trigger.group(2).encode('utf-8').strip()
    uri = BASE_URL + "search?part=snippet&type=video&q=" + query + "&key=" + key
    result = json.loads(proxy.get(uri))

    num_results = result['pageInfo']['totalResults']
    return_text = "YouTube returned {0} results: ".format(num_results)

    entry_text = []
    for item in result['items']:
        try:
            title = item['snippet']['title']
            title = title.encode('utf8')
        except KeyError:
            title = "N/A"
        if len(title) > 50:
            title = title[:50] + ' ...'
        title = colorize(title)

        try:
            author = item['snippet']['channelTitle']
            author = author.encode('utf8')
        except KeyError:
            author = 'N/A'

        link = 'https://youtu.be/' + item['id']['videoId']
        link = link.encode('utf8')

        entry_text.append("{0} by {1} ({2})".format(title, author, link))

    all_entries = ""
    if int(num_results) > 0:
        all_entries = ', '.join(entry_text[1:])

    jenni.say(return_text + all_entries)
Esempio n. 11
0
def ytsearch(jenni, trigger):
    """Search YouTube"""
    #modified from ytinfo: Copyright 2010-2011, Michael Yanovich, yanovich.net, Kenneth Sham.
    if not hasattr(jenni.config, 'google_dev_apikey'):
        return jenni.say('Please sign up for a Google Developer API key to use this function.')
    key = jenni.config.google_dev_apikey

    query = trigger.group(2).encode('utf-8').strip()
    uri = BASE_URL + "search?part=snippet&type=video&q=" + query + "&key=" + key
    result = json.loads(proxy.get(uri))

    num_results = result['pageInfo']['totalResults']
    return_text = "YouTube returned {0} results: ".format(num_results)

    entry_text = []
    for item in result['items']:
        try:
            title = item['snippet']['title']
            title = title.encode('utf8')
        except KeyError:
            title = "N/A"
        if len(title) > 50:
            title = title[:50] + ' ...'
        title = colorize(title)

        try:
            author = item['snippet']['channelTitle']
            author = author.encode('utf8')
        except KeyError:
            author = 'N/A'

        link = 'https://youtu.be/' + item['id']['videoId']
        link = link.encode('utf8')

        entry_text.append("{0} by {1} ({2})".format(title, author, link))

    all_entries = ""
    if int(num_results) > 0:
      all_entries = ', '.join(entry_text[1:])

    jenni.say(return_text + all_entries)
Esempio n. 12
0
def ytsearch(jenni, trigger):
    """Search YouTube"""
    # modified from ytinfo: Copyright 2010-2011, Michael Yanovich, yanovich.net, Kenneth Sham.
    if not hasattr(jenni.config, "google_dev_apikey"):
        return jenni.say("Please sign up for a Google Developer API key to use this function.")
    key = jenni.config.google_dev_apikey

    query = trigger.group(2).encode("utf-8").strip()
    uri = BASE_URL + "search?part=snippet&type=video&q=" + query + "&key=" + key
    result = json.loads(proxy.get(uri))

    num_results = result["pageInfo"]["totalResults"]
    return_text = "YouTube returned {0} results: ".format(num_results)

    entry_text = []
    for item in result["items"]:
        try:
            title = item["snippet"]["title"]
            title = title.encode("utf8")
        except KeyError:
            title = "N/A"
        if len(title) > 50:
            title = title[:50] + " ..."
        title = colorize(title)

        try:
            author = item["snippet"]["channelTitle"]
            author = author.encode("utf8")
        except KeyError:
            author = "N/A"

        link = "https://youtu.be/" + item["id"]["videoId"]
        link = link.encode("utf8")

        entry_text.append("{0} by {1} ({2})".format(title, author, link))

    all_entries = ""
    if int(num_results) > 0:
        all_entries = ", ".join(entry_text[1:])

    jenni.say(return_text + all_entries)
Esempio n. 13
0
def image_me(term):
    global google_images_uri

    t = urllib.quote_plus(term)
    # URL encode the term given
    if '%' in term:
        t = urllib.quote_plus(term.replace('%', ''))

    content = proxy.get(google_images_uri % t)

    soup = Soup(content)
    img_links = [a['href'] for a in soup.findAll('a', 'rg_l', href=True)]

    if img_links:
        full_link = img_links[random.randint(0, len(img_links) - 1)]
        parsed_link = urlparse.urlparse(full_link)
        query = urlparse.parse_qs(parsed_link.query)
        img_url = query['imgurl']
        if type(img_url) == list:
            img_url = img_url[0]
        return urllib.unquote_plus(img_url)
Esempio n. 14
0
def image_me(term):
    global google_images_uri

    t = urllib.quote_plus(term)
    # URL encode the term given
    if '%' in term:
        t = urllib.quote_plus(term.replace('%', ''))

    content = proxy.get(google_images_uri % t)

    soup = Soup(content)
    img_links = [a['href'] for a in soup.findAll('a', 'rg_l', href=True)]

    if img_links:
        full_link = img_links[random.randint(0, len(img_links) - 1)]
        parsed_link = urlparse.urlparse(full_link)
        query = urlparse.parse_qs(parsed_link.query)
        img_url = query['imgurl']
        if type(img_url) == list:
            img_url = img_url[0]
        return urllib.unquote_plus(img_url)
Esempio n. 15
0
def fbtc(jenni, input):
    '''.fbtc - returns prices from "The F*****g Bitcoin"'''
    try:
        page = proxy.get('http://thefuckingbitcoin.com/')
    except:
        return jenni.say('Could not access thefuckingbitcoin.com')

    price = re.search('<p id="lastPrice">(\S+)</p>', page)
    remarks = re.search('<p id="remark">(.*?)</p><p id="remarkL2">(.*?)</p>', page)

    try:
        remarks = remarks.groups()
    except:
        return jenni.say('Could not find relevant information.')

    resp = str()
    resp += '1 BTC == %s USD. ' % price.groups()

    if remarks:
        resp += '%s %s' % (remarks[0], remarks[1])

    jenni.say(resp)
Esempio n. 16
0
File: calc.py Progetto: myano/jenni
def math(jenni, input):
    if not input.group(2):
        return jenni.reply("No search term.")

    txt = input.group(2)
    txt = txt.encode('utf-8')
    txt = txt.decode('utf-8')
    txt = txt.encode('utf-8')
    txt = urllib.quote(txt.replace('+', '%2B'))

    url = 'http://gamma.sympy.org/input/?i='

    re_answer = re.compile(r'<script type="\S+; mode=display".*?>(.*?)</script>')

    page = proxy.get(url + txt)

    results = re_answer.findall(page)

    if results:
        jenni.say(results[0])
    else:
        jenni.say('No results found on gamma.sympy.org!')
Esempio n. 17
0
def cryptocoin(jenni, input):
    try:
        page = proxy.get("https://api.coinmarketcap.com/v1/ticker/")
    except:
        return jenni.say('[CryptoCoin] Connection to API did not succeed.')

    try:
        data = json.loads(page)
    except:
        return jenni.say(
            "[CryptoCoin] Couldn't make sense of information from API")
    currency = None
    text = input.group(2)
    for x in data:
        if x["name"].lower() == text.lower():
            currency = x
            break
        elif x["id"].lower() == text.lower():
            currency = x
            break
        elif x["symbol"].lower() == text.lower():
            currency = x
            break
    if currency is None:
        jenni.say("Currency not found")
    else:
        jenni.say(currency["name"] + " (" + currency["symbol"] +
                  ") - Price (USD): " + nicecurrency(currency['price_usd']) +
                  " - Price (BTC): " + nicedeci(currency['price_btc']) +
                  " - Market Cap (USD): " +
                  nicecurrency(currency['market_cap_usd']) +
                  " - In Circulation: " +
                  nicenum(currency["available_supply"]) +
                  " - Volume (24 hours - USD): " +
                  nicecurrency(currency['24h_volume_usd']) + " - 1 hour: " +
                  currency['percent_change_1h'] + "% - 24 hours: " +
                  currency['percent_change_24h'] + "% - 7 days: " +
                  currency['percent_change_7d'] + "%")
Esempio n. 18
0
def math(jenni, input):
    if not input.group(2):
        return jenni.reply("No search term.")

    txt = input.group(2)
    txt = txt.encode('utf-8')
    txt = txt.decode('utf-8')
    txt = txt.encode('utf-8')
    txt = urllib.quote(txt.replace('+', '%2B'))

    url = 'http://gamma.sympy.org/input/?i='

    re_answer = re.compile(
        r'<script type="\S+; mode=display".*?>(.*?)</script>')

    page = proxy.get(url + txt)

    results = re_answer.findall(page)

    if results:
        jenni.say(results[0])
    else:
        jenni.say('No results found on gamma.sympy.org!')
Esempio n. 19
0
def fbtc(jenni, input):
    '''.fbtc - returns prices from "The F*****g Bitcoin"'''
    try:
        page = proxy.get('http://thefuckingbitcoin.com/')
    except:
        return jenni.say('Could not access thefuckingbitcoin.com')

    price = re.search('<p id="lastPrice">(\S+)</p>', page)
    remarks = re.search('<p id="remark">(.*?)</p><p id="remarkL2">(.*?)</p>',
                        page)

    try:
        remarks = remarks.groups()
    except:
        return jenni.say('Could not find relevant information.')

    resp = str()
    resp += '1 BTC == %s USD. ' % price.groups()

    if remarks:
        resp += '%s %s' % (remarks[0], remarks[1])

    jenni.say(resp)
Esempio n. 20
0
def ytget(jenni, trigger):
    if not hasattr(jenni.config, 'google_dev_apikey'):
        return 'err'

    key = jenni.config.google_dev_apikey

    try:
        vid_id = trigger.group(2)
        uri = BASE_URL + "videos?part=snippet,contentDetails,statistics&id=" + vid_id + "&key=" + key
        bytes = proxy.get(uri)
        result = json.loads(bytes)
        video_entry = result['items'][0]
    except IndexError:
        jenni.say('Video not found through the YouTube API.')
        return 'err'
    except Exception:
        jenni.say('Something went wrong when accessing the YouTube API.')
        traceback.print_exc()
        return 'err'

    vid_info = {}
    vid_info['link'] = 'https://youtu.be/' + vid_id

    try:
        vid_info['title'] = video_entry['snippet']['title']
    except KeyError:
        vid_info['title'] = 'N/A'

    #get youtube channel
    try:
        vid_info['uploader'] = video_entry['snippet']['channelTitle']
    except KeyError:
        vid_info['uploader'] = 'N/A'

    #get upload time in format: yyyy-MM-ddThh:mm:ss.sssZ
    try:
        upraw = video_entry['snippet']['publishedAt']
        vid_info['uploaded'] = '%s/%s/%s, %s:%s' % (
            upraw[0:4], upraw[5:7], upraw[8:10], upraw[11:13], upraw[14:16])
    except KeyError:
        vid_info['uploaded'] = 'N/A'

    #get duration in seconds (contentDetails)
    try:
        if video_entry["snippet"]["liveBroadcastContent"] == "live":
            vid_info['length'] = 'LIVE'
        elif video_entry["snippet"]["liveBroadcastContent"] == "upcoming":
            vid_info['length'] = 'UPCOMING'
        else:
            duration = video_entry["contentDetails"]["duration"]
            # Now replace
            duration = duration.replace("P", "")
            duration = duration.replace("D", "days ")
            duration = duration.replace("T", "")
            duration = duration.replace("H", "hours ")
            duration = duration.replace("M", "mins ")
            duration = duration.replace("S", "secs")
            vid_info['length'] = duration
    except KeyError:
        vid_info['length'] = 'N/A'

    #get views (statistics)
    try:
        views = video_entry['statistics']['viewCount']
        vid_info['views'] = str('{0:20,d}'.format(int(views))).lstrip(' ')
    except KeyError:
        vid_info['views'] = 'N/A'

    #get comment count (statistics)
    try:
        comments = video_entry['statistics']['commentCount']
        vid_info['comments'] = str('{0:20,d}'.format(
            int(comments))).lstrip(' ')
    except KeyError:
        vid_info['comments'] = 'N/A'

    #get favourites (statistics)
    try:
        favourites = video_entry['statistics']['favoriteCount']
        vid_info['favourites'] = str('{0:20,d}'.format(
            int(favourites))).lstrip(' ')
    except KeyError:
        vid_info['favourites'] = 'N/A'

    #get likes & dislikes (statistics)
    try:
        likes = video_entry['statistics']['likeCount']
        vid_info['likes'] = str('{0:20,d}'.format(int(likes))).lstrip(' ')
    except KeyError:
        vid_info['likes'] = 'N/A'
    try:
        dislikes = video_entry['statistics']['dislikeCount']
        vid_info['dislikes'] = str('{0:20,d}'.format(
            int(dislikes))).lstrip(' ')
    except KeyError:
        vid_info['dislikes'] = 'N/A'

    #get video description (snippet)
    try:
        vid_info['description'] = video_entry['snippet']['description']
    except KeyError:
        vid_info['description'] = 'N/A'
    return vid_info
Esempio n. 21
0
File: calc.py Progetto: myano/jenni
def c(jenni, input):
    '''.c -- Google calculator.'''

    ## let's not bother if someone doesn't give us input
    if not input.group(2):
        return jenni.reply('Nothing to calculate.')

    ## handle some unicode conversions
    q = input.group(2).encode('utf-8')
    q = q.replace('\xcf\x95', 'phi')  # utf-8 U+03D5
    q = q.replace('\xcf\x80', 'pi')  # utf-8 U+03C0
    temp_q = q.replace(' ', '')

    ## Attempt #1 (Google)
    uri_base = 'https://www.google.com/search?gbv=1&q='
    uri = uri_base + web.urllib.quote(temp_q)

    ## To the webs!
    page = str()
    try:
        page = proxy.get(uri)
    except:
        ## if we can't access Google for calculating
        ## let us try with good ole' web.get
        page = web.get(uri)

    answer = False

    if page:
        ## if we get a response from Google
        ## let us parse out an equation from Google Search results
        answer = c_answer.findall(page)

    if answer:
        ## if the regex finding found a match we want the first result
        answer = answer[0]
        answer = clean_up_answer(answer)
        jenni.say(answer)
    else:
        #### Attempt #1a
        uri = uri_base + web.urllib.quote(q)
        try:
            page = proxy.get(uri)
        except:
            page = web.get(uri)

        answer = False

        if page:
            answer = c_answer.findall(page)

        if answer:
            answer = answer[0]
            answer = clean_up_answer(answer)
            jenni.say(answer)
        else:
            #### Attempt #2 (DuckDuckGo's API)
            ddg_uri = 'https://api.duckduckgo.com/?format=json&q='
            ddg_uri += urllib.quote(q)

            ## Try to grab page (results)
            ## If page can't be accessed, we shall fail!
            try:
                page = proxy.get(ddg_uri)
            except:
                page = web.get(ddg_uri)

            ## Try to take page source and json-ify it!
            try:
                json_response = json.loads(page)
            except:
                ## if it can't be json-ified, then we shall fail!
                json_response = None

            ## Check for 'AnswerType' (stolen from search.py)
            ## Also 'fail' to None so we can move on to Attempt #3
            if (not json_response) or (hasattr(json_response, 'AnswerType') and json_response['AnswerType'] != 'calc'):
                answer = None
            else:
                ## If the json contains an Answer that is the result of 'calc'
                ## then continue
                answer = json_response['Answer']
                if hasattr(answer, 'result'):
                    answer = answer['result']
                parts = answer.split('</style>')
                answer = ''.join(parts[1:])
                answer = re.sub(r'<.*?>', '', answer).strip()

            if answer:
                ## If we have found answer with Attempt #2
                ## go ahead and display it
                answer += ' [DDG API]'
                return jenni.say(answer)

            else:
                #### Attempt #3 (Wolfram Alpha)
                if not hasattr(jenni.config, 'wolframalpha_apikey'):
                    return jenni.say(WAKEY_NOTFOUND)

                answer = get_wa(q, jenni.config.wolframalpha_apikey)

                return jenni.say(answer + ' [WA]')
Esempio n. 22
0
def btc_page():
    try:
        page = proxy.get('https://api.bitcoincharts.com/v1/markets.json')
    except Exception, e:
        print dt.datetime.now(), e
        return False, 'Failed to reach bitcoincharts.com'
Esempio n. 23
0
def movie(jenni, input):
    '''.imdb movie/show title -- displays information about a production'''

    if not input.group(2):
        return jenni.say('Please enter a movie or TV show title. '
                         'Year is optional.')

    word = input.group(2).rstrip()
    matchObj = re.match(r'([\w\s]*)\s?,\s?(\d{4})', word, re.M | re.I)

    if matchObj:
        title = matchObj.group(1)
        year = matchObj.group(2)
        title = prep_title(title)
        uri = API_BASE_URL + '?t=%s&y=%s&plot=short&r=json' % (title, year)
    else:
        title = word
        title = prep_title(title)
        uri = API_BASE_URL + '?t=%s&plot=short&r=json' % (title)

    try:
        page = proxy.get(uri)
    except:
        return jenni.say('[IMDB] Connection to API did not succeed.')

    try:
        data = json.loads(page)
    except:
        return jenni.say("[IMDB] Couldn't make sense of information from API")

    message = '[IMDB] '

    if data['Response'] == 'False':
        if 'Error' in data:
            message += data['Error']
        else:
            message += 'Got an error from imdbapi'
    else:
        pre_plot_output = u'Title: {0} | Released: {1} | Rated: {2} '
        pre_plot_output += '| Rating: {3} | Metascore: {4} | Genre: {5} '
        pre_plot_output += '| Runtime: {6} | Plot: '
        genre = data['Genre']
        runtime = data['Runtime']
        pre_plot = pre_plot_output.format(data['Title'], data['Released'],
                                          data['Rated'], data['imdbRating'],
                                          data['Metascore'], genre, runtime)

        after_plot_output = ' | IMDB Link: http://imdb.com/title/{0}'
        after_plot = after_plot_output.format(data['imdbID'])
        truncation = '[...]'

        ## 510 - (16 + 8 + 63)
        ## max_chars (minus \r\n) - (max_nick_length + max_ident_length
        ##     + max_vhost_lenth_on_freenode)
        max_len_of_plot = 423 - (len(pre_plot) + len(after_plot) +
                                 len(truncation))

        new_plot = data['Plot']
        if len(data['Plot']) > max_len_of_plot:
            new_plot = data['Plot'][:max_len_of_plot] + truncation

        message = pre_plot + new_plot + after_plot

    jenni.say(message)
Esempio n. 24
0
File: imdb.py Progetto: J3RN/jenni
def movie(jenni, input):
    '''.imdb movie/show title -- displays information about a production'''

    if not input.group(2):
        return jenni.say('Please enter a movie or TV show title. '
                         'Year is optional.')

    word = input.group(2).rstrip()
    matchObj = re.match(r'([\w\s]*)\s?,\s?(\d{4})', word, re.M | re.I)

    if matchObj:
        title = matchObj.group(1)
        year = matchObj.group(2)
        title = prep_title(title)
        uri = API_BASE_URL + '?t=%s&y=%s&plot=short&r=json' % (title, year)
    else:
        title = word
        title = prep_title(title)
        uri = API_BASE_URL + '?t=%s&plot=short&r=json' % (title)

    try:
        page = proxy.get(uri)
    except:
        return jenni.say('[IMDB] Connection to API did not succeed.')

    try:
        data = json.loads(page)
    except:
        return jenni.say("[IMDB] Couldn't make sense of information from API")

    message = '[IMDB] '

    if data['Response'] == 'False':
        if 'Error' in data:
            message += data['Error']
        else:
            message += 'Got an error from imdbapi'
    else:
        pre_plot_output = u'Title: {0} | Released: {1} | Rated: {2} '
        pre_plot_output += '| Rating: {3} | Metascore: {4} | Genre: {5} '
        pre_plot_output += '| Runtime: {6} | Plot: '
        genre = data['Genre']
        runtime = data['Runtime']
        pre_plot = pre_plot_output.format(data['Title'], data['Released'],
                                          data['Rated'], data['imdbRating'],
                                          data['Metascore'], genre,
                                          runtime)

        after_plot_output = ' | IMDB Link: http://imdb.com/title/{0}'
        after_plot = after_plot_output.format(data['imdbID'])
        truncation = '[...]'

        ## 510 - (16 + 8 + 63)
        ## max_chars (minus \r\n) - (max_nick_length + max_ident_length
        ##     + max_vhost_lenth_on_freenode)
        max_len_of_plot = 423 - (len(pre_plot) + len(after_plot) + len(truncation))

        new_plot = data['Plot']
        if len(data['Plot']) > max_len_of_plot:
            new_plot = data['Plot'][:max_len_of_plot] + truncation

        message = pre_plot + new_plot + after_plot

    jenni.say(message)
Esempio n. 25
0
def c(jenni, input):
    '''.c -- Google calculator.'''

    ## let's not bother if someone doesn't give us input
    if not input.group(2):
        return jenni.reply('Nothing to calculate.')

    ## handle some unicode conversions
    q = input.group(2).encode('utf-8')
    q = q.replace('\xcf\x95', 'phi')  # utf-8 U+03D5
    q = q.replace('\xcf\x80', 'pi')  # utf-8 U+03C0
    temp_q = q.replace(' ', '')

    ## Attempt #1 (Google)
    uri_base = 'http://www.google.com/search?gbv=1&q='
    uri = uri_base + web.urllib.quote(temp_q)

    ## To the webs!
    page = str()
    try:
        page = proxy.get(uri)
    except:
        ## if we can't access Google for calculating
        ## let us move on to Attempt #2
        page = web.get(uri)

    answer = False
    if page:
        ## if we get a response from Google
        ## let us parse out an equation from Google Search results
        answer = c_answer.findall(page)

    if answer:
        ## if the regex finding found a match we want the first result
        answer = answer[0]
        answer = clean_up_answer(answer)
        jenni.say(answer)
    else:
        #### Attempt #1a
        uri = uri_base + web.urllib.quote(q)
        try:
            page = proxy.get(uri)
        except:
            page = web.get(uri)

        answer = False

        if page:
            answer = c_answer.findall(page)
        if answer:
            answer = answer[0]
            answer = clean_up_answer(answer)
            jenni.say(answer)
        else:

            #### Attempt #2
            attempt_two = False
            try:
                from BeautifulSoup import BeautifulSoup
                attempt_two = True
            except:
                attempt_two = False

            output = str()
            """
            if attempt_two:
                new_url = 'https://duckduckgo.com/html/?q=%s&kl=us-en&kp=-1' % (web.urllib.quote(q))
                try:
                    ddg_html_page = proxy.get(new_url)
                except:
                    ddg_html_page = web.get(new_url)
                soup = BeautifulSoup(ddg_html_page)

                ## use BeautifulSoup to parse HTML for an answer
                zero_click = str()
                if soup('div', {'class': 'zero-click-result'}):
                    zero_click = str(soup('div', {'class': 'zero-click-result'})[0])

                ## remove some excess text
                output = r_tag.sub('', zero_click).strip()
                output = output.replace('\n', '').replace('\t', '')

                ## test to see if the search module has 'remove_spaces'
                ## otherwise, let us fail
                try:
                    output = search.remove_spaces(output)
                except:
                    output = str()
            """
            output = False

            if output:
                ## If Attempt #2 worked, display the answer
                jenni.say(output + ' [DDG HTML]')

            else:
                #### Attempt #3 (DuckDuckGo's API)
                ddg_uri = 'https://api.duckduckgo.com/?format=json&q='
                ddg_uri += urllib.quote(q)

                ## Try to grab page (results)
                ## If page can't be accessed, we shall fail!
                try:
                    page = proxy.get(ddg_uri)
                except:
                    page = web.get(ddg_uri)

                ## Try to take page source and json-ify it!
                try:
                    json_response = json.loads(page)
                except:
                    ## if it can't be json-ified, then we shall fail!
                    json_response = None

                ## Check for 'AnswerType' (stolen from search.py)
                ## Also 'fail' to None so we can move on to Attempt #3
                if (not json_response) or (hasattr(json_response, 'AnswerType') and json_response['AnswerType'] != 'calc'):
                    answer = None
                else:
                    ## If the json contains an Answer that is the result of 'calc'
                    ## then continue
                    answer = json_response['Answer']
                    if hasattr(answer, 'result'):
                        answer = answer['result']
                    parts = answer.split('</style>')
                    answer = ''.join(parts[1:])
                    answer = re.sub(r'<.*?>', '', answer).strip()

                if answer:
                    ## If we have found answer with Attempt #2
                    ## go ahead and display it
                    answer += ' [DDG API]'
                    jenni.say(answer)

                else:
                    #### Attempt #4 (DuckDuckGo's HTML)
                    ## This relies on BeautifulSoup; if it can't be found, don't even bother

                    #### Attempt #3 (Wolfram Alpha)
                    status, answer = get_wa(q)

                    if status:
                        jenni.say(answer + ' [WA]')
                    else:
                        ## If we made it this far, we have tried all available resources
                        jenni.say('Absolutely no results!')
Esempio n. 26
0
def btc_coinbase_page():
    try:
        page = proxy.get('https://coinbase.com/api/v1/currencies/exchange_rates')
    except Exception, e:
        print dt.datetimenow.now(), e
        return False, 'Failed to reach coinbase.com'
Esempio n. 27
0
def btc_page():
    try:
        page = proxy.get('https://api.bitcoincharts.com/v1/markets.json')
    except Exception, e:
        print dt.datetime.now(), e
        return False, 'Failed to reach bitcoincharts.com'
Esempio n. 28
0
def bing_search(query, lang='en-GB'):
    query = web.urllib.quote(query)
    base = 'https://www.bing.com/search?mkt=%s&q=' % lang
    page = proxy.get(base + query)
    m = r_bing.search(page)
    if m: return m.group(1)
Esempio n. 29
0
def ytget(jenni, trigger):
    if not hasattr(jenni.config, "google_dev_apikey"):
        return "err"

    key = jenni.config.google_dev_apikey

    try:
        vid_id = trigger.group(2)
        uri = BASE_URL + "videos?part=snippet,contentDetails,statistics&id=" + vid_id + "&key=" + key
        bytes = proxy.get(uri)
        result = json.loads(bytes)
        video_entry = result["items"][0]
    except IndexError:
        jenni.say("Video not found through the YouTube API.")
        return "err"
    except Exception:
        jenni.say("Something went wrong when accessing the YouTube API.")
        traceback.print_exc()
        return "err"

    vid_info = {}
    vid_info["link"] = "https://youtu.be/" + vid_id

    try:
        vid_info["title"] = video_entry["snippet"]["title"]
    except KeyError:
        vid_info["title"] = "N/A"

    # get youtube channel
    try:
        vid_info["uploader"] = video_entry["snippet"]["channelTitle"]
    except KeyError:
        vid_info["uploader"] = "N/A"

    # get upload time in format: yyyy-MM-ddThh:mm:ss.sssZ
    try:
        upraw = video_entry["snippet"]["publishedAt"]
        vid_info["uploaded"] = "%s/%s/%s, %s:%s" % (upraw[0:4], upraw[5:7], upraw[8:10], upraw[11:13], upraw[14:16])
    except KeyError:
        vid_info["uploaded"] = "N/A"

    # get duration in seconds (contentDetails)
    try:
        if video_entry["snippet"]["liveBroadcastContent"] == "live":
            vid_info["length"] = "LIVE"
        elif video_entry["snippet"]["liveBroadcastContent"] == "upcoming":
            vid_info["length"] = "UPCOMING"
        else:
            duration = video_entry["contentDetails"]["duration"]
            # Now replace
            duration = duration.replace("P", "")
            duration = duration.replace("D", "days ")
            duration = duration.replace("T", "")
            duration = duration.replace("H", "hours ")
            duration = duration.replace("M", "mins ")
            duration = duration.replace("S", "secs")
            vid_info["length"] = duration
    except KeyError:
        vid_info["length"] = "N/A"

    # get views (statistics)
    try:
        views = video_entry["statistics"]["viewCount"]
        vid_info["views"] = str("{0:20,d}".format(int(views))).lstrip(" ")
    except KeyError:
        vid_info["views"] = "N/A"

    # get comment count (statistics)
    try:
        comments = video_entry["statistics"]["commentCount"]
        vid_info["comments"] = str("{0:20,d}".format(int(comments))).lstrip(" ")
    except KeyError:
        vid_info["comments"] = "N/A"

    # get favourites (statistics)
    try:
        favourites = video_entry["statistics"]["favoriteCount"]
        vid_info["favourites"] = str("{0:20,d}".format(int(favourites))).lstrip(" ")
    except KeyError:
        vid_info["favourites"] = "N/A"

    # get likes & dislikes (statistics)
    try:
        likes = video_entry["statistics"]["likeCount"]
        vid_info["likes"] = str("{0:20,d}".format(int(likes))).lstrip(" ")
    except KeyError:
        vid_info["likes"] = "N/A"
    try:
        dislikes = video_entry["statistics"]["dislikeCount"]
        vid_info["dislikes"] = str("{0:20,d}".format(int(dislikes))).lstrip(" ")
    except KeyError:
        vid_info["dislikes"] = "N/A"

    # get video description (snippet)
    try:
        vid_info["description"] = video_entry["snippet"]["description"]
    except KeyError:
        vid_info["description"] = "N/A"
    return vid_info
Esempio n. 30
0
File: search.py Progetto: J3RN/jenni
def bing_search(query, lang='en-GB'):
    query = web.urllib.quote(query)
    base = 'https://www.bing.com/search?mkt=%s&q=' % lang
    page = proxy.get(base + query)
    m = r_bing.search(page)
    if m: return m.group(1)
Esempio n. 31
0
def c(jenni, input):
    '''.c -- Google calculator.'''

    ## let's not bother if someone doesn't give us input
    if not input.group(2):
        return jenni.reply('Nothing to calculate.')

    ## handle some unicode conversions
    q = input.group(2).encode('utf-8')
    q = q.replace('\xcf\x95', 'phi')  # utf-8 U+03D5
    q = q.replace('\xcf\x80', 'pi')  # utf-8 U+03C0
    temp_q = q.replace(' ', '')

    ## Attempt #1 (Google)
    uri_base = 'https://www.google.com/search?gbv=1&q='
    uri = uri_base + web.urllib.quote(temp_q)

    ## To the webs!
    page = str()
    try:
        page = proxy.get(uri)
    except:
        ## if we can't access Google for calculating
        ## let us try with good ole' web.get
        page = web.get(uri)

    answer = False

    if page:
        ## if we get a response from Google
        ## let us parse out an equation from Google Search results
        answer = c_answer.findall(page)

    if answer:
        ## if the regex finding found a match we want the first result
        answer = answer[0]
        answer = clean_up_answer(answer)
        jenni.say(answer)
    else:
        #### Attempt #1a
        uri = uri_base + web.urllib.quote(q)
        try:
            page = proxy.get(uri)
        except:
            page = web.get(uri)

        answer = False

        if page:
            answer = c_answer.findall(page)

        if answer:
            answer = answer[0]
            answer = clean_up_answer(answer)
            jenni.say(answer)
        else:
            #### Attempt #2 (DuckDuckGo's API)
            ddg_uri = 'https://api.duckduckgo.com/?format=json&q='
            ddg_uri += urllib.quote(q)

            ## Try to grab page (results)
            ## If page can't be accessed, we shall fail!
            try:
                page = proxy.get(ddg_uri)
            except:
                page = web.get(ddg_uri)

            ## Try to take page source and json-ify it!
            try:
                json_response = json.loads(page)
            except:
                ## if it can't be json-ified, then we shall fail!
                json_response = None

            ## Check for 'AnswerType' (stolen from search.py)
            ## Also 'fail' to None so we can move on to Attempt #3
            if (not json_response) or (hasattr(json_response, 'AnswerType') and
                                       json_response['AnswerType'] != 'calc'):
                answer = None
            else:
                ## If the json contains an Answer that is the result of 'calc'
                ## then continue
                answer = json_response['Answer']
                if hasattr(answer, 'result'):
                    answer = answer['result']
                parts = answer.split('</style>')
                answer = ''.join(parts[1:])
                answer = re.sub(r'<.*?>', '', answer).strip()

            if answer:
                ## If we have found answer with Attempt #2
                ## go ahead and display it
                answer += ' [DDG API]'
                return jenni.say(answer)

            else:
                #### Attempt #3 (Wolfram Alpha)
                if not hasattr(jenni.config, 'wolframalpha_apikey'):
                    return jenni.say(WAKEY_NOTFOUND)

                answer = get_wa(q, jenni.config.wolframalpha_apikey)

                return jenni.say(answer + ' [WA]')
Esempio n. 32
0
def ytget(jenni, trigger):
    if not hasattr(jenni.config, 'google_dev_apikey'):
        return 'err'

    key = jenni.config.google_dev_apikey

    try:
        vid_id = trigger.group(2)
        uri = BASE_URL + "videos?part=snippet,contentDetails,statistics&id=" + vid_id + "&key=" + key
        bytes = proxy.get(uri)
        result = json.loads(bytes)
        video_entry = result['items'][0]
    except IndexError:
        jenni.say('Video not found through the YouTube API.')
        return 'err'
    except Exception:
        jenni.say('Something went wrong when accessing the YouTube API.')
        traceback.print_exc()
        return 'err'

    vid_info = {}
    vid_info['link'] = 'https://youtu.be/' + vid_id

    try:
        vid_info['title'] = video_entry['snippet']['title']
    except KeyError:
        vid_info['title'] = 'N/A'

    #get youtube channel
    try:
        vid_info['uploader'] = video_entry['snippet']['channelTitle']
    except KeyError:
        vid_info['uploader'] = 'N/A'

    #get upload time in format: yyyy-MM-ddThh:mm:ss.sssZ
    try:
        upraw = video_entry['snippet']['publishedAt']
        vid_info['uploaded'] = '%s/%s/%s, %s:%s' % (upraw[0:4], upraw[5:7],
                                                  upraw[8:10], upraw[11:13],
                                                  upraw[14:16])
    except KeyError:
        vid_info['uploaded'] = 'N/A'

    #get duration in seconds (contentDetails)
    try:
        if video_entry["snippet"]["liveBroadcastContent"] == "live":
            vid_info['length'] = 'LIVE'
        elif video_entry["snippet"]["liveBroadcastContent"] == "upcoming":
            vid_info['length'] = 'UPCOMING'
        else:
            duration = video_entry["contentDetails"]["duration"]
            # Now replace
            duration = duration.replace("P", "")
            duration = duration.replace("D", "days ")
            duration = duration.replace("T", "")
            duration = duration.replace("H", "hours ")
            duration = duration.replace("M", "mins ")
            duration = duration.replace("S", "secs")
            vid_info['length'] = duration
    except KeyError:
        vid_info['length'] = 'N/A'

    #get views (statistics)
    try:
        views = video_entry['statistics']['viewCount']
        vid_info['views'] = str('{0:20,d}'.format(int(views))).lstrip(' ')
    except KeyError:
        vid_info['views'] = 'N/A'

    #get comment count (statistics)
    try:
        comments = video_entry['statistics']['commentCount']
        vid_info['comments'] = str('{0:20,d}'.format(int(comments))).lstrip(' ')
    except KeyError:
        vid_info['comments'] = 'N/A'

    #get favourites (statistics)
    try:
        favourites = video_entry['statistics']['favoriteCount']
        vid_info['favourites'] = str('{0:20,d}'.format(int(favourites))).lstrip(' ')
    except KeyError:
        vid_info['favourites'] = 'N/A'

    #get likes & dislikes (statistics)
    try:
        likes = video_entry['statistics']['likeCount']
        vid_info['likes'] = str('{0:20,d}'.format(int(likes))).lstrip(' ')
    except KeyError:
        vid_info['likes'] = 'N/A'
    try:
        dislikes = video_entry['statistics']['dislikeCount']
        vid_info['dislikes'] = str('{0:20,d}'.format(int(dislikes))).lstrip(' ')
    except KeyError:
        vid_info['dislikes'] = 'N/A'

    #get video description (snippet)
    try:
        vid_info['description'] = video_entry['snippet']['description']
    except KeyError:
        vid_info['description'] = 'N/A'
    return vid_info