Esempio n. 1
0
def timefunction2(inp, nick="", reply=None, db=None, notice=None):
    "time [location] [dontsave] | [@ nick] -- Gets time for <location>."

    save = True

    if '@' in inp:
        nick = inp.split('@')[1].strip()
        location = database.get(db,'users','location','nick',nick)
        if not location: return "No location stored for {}.".format(nick.encode('ascii', 'ignore'))
    else:
        location = database.get(db,'users','location','nick',nick)
        if not inp:
            if not location:
                notice(time.__doc__)
                return
        else:
            # if not location: save = True
            if " dontsave" in inp: save = False
            location = inp

    # now, to get the actual time
    url = "https://time.is/%s" % location.replace(' ','_').replace(' save','')
    try:
        request = urllib2.Request(url, None, headers)
        page = urllib2.urlopen(request).read()
        soup = BeautifulSoup(page, 'lxml')
        soup = soup.find('div', attrs={'id': re.compile('time_section')})
        time = filter(None, http.strip_html(soup.find('div', attrs={'id': re.compile('twd')}).renderContents().strip()))
        details = filter(None, http.strip_html(soup.find('div', attrs={'id': re.compile('dd')}).renderContents().strip()))
        prefix = filter(None, http.strip_html(soup.find('div', attrs={'id': re.compile('msgdiv')}).renderContents().strip()))
    except IndexError:
        return "Could not get time for that location."

    return formatting.output('Time', [u'{} {}, {}'.format(prefix.decode('ascii', 'ignore'), time, details)])
Esempio n. 2
0
def convert(inp, conn=None, chan=None):
    "gconvert <val1> <val2> -- converts a measurement or currency"\
    "gconvert 1000 usd to yen"\
    "gconvert 100 miles to km"

    if 'btc' in inp.lower() or 'bitcoin' in inp.lower():
        convert_btc(inp, conn, chan)
        return None
    elif 'ltc' in inp.lower() or 'litecoin' in inp.lower():
        convert_ltc(inp, conn, chan)
        return None

    url = "http://www.google.com/search?q=convert+{}".format(
        urllib.quote_plus(inp))

    request = urllib2.Request(url, None, headers)
    page = urllib2.urlopen(request).read()
    soup = BeautifulSoup(page, 'lxml')

    soup = soup.find('div', attrs={'id': re.compile('ires')})
    is_currency = soup.find('li', attrs={'class': re.compile('currency')})
    if is_currency:
        conv_inp = http.strip_html(
            soup.find('div', attrs={
                'class': re.compile('vk_sh')
            }).renderContents().strip())
        conv_out = http.strip_html(
            soup.find('div', attrs={
                'class': re.compile('vk_ans')
            }).renderContents().strip())
        return "{} {}".format(conv_inp.replace("equals", "="), conv_out)
    else:
        conv_inp = soup.find('div', attrs={'id': re.compile('_Aif')})
        inp_value = conv_inp.find('input',
                                  attrs={'class':
                                         re.compile('_eif')})['value'].strip()
        inp_unit = http.strip_html(
            conv_inp.find('select', attrs={
                'class': re.compile('_dif')
            }).find_all('option', selected=True)[0])

        conv_out = soup.find('div', attrs={'id': re.compile('_Cif')})
        out_value = conv_out.find('input',
                                  attrs={'class':
                                         re.compile('_eif')})['value'].strip()
        out_unit = http.strip_html(
            conv_out.find('select', attrs={
                'class': re.compile('_dif')
            }).find_all('option', selected=True)[0])

    return "{} {}s = {} {}s".format(inp_value, inp_unit, out_value, out_unit)
Esempio n. 3
0
def horoscope(inp, db=None, notice=None, nick=None):
    """horoscope <sign> [save] -- Get your horoscope."""
    save = False
    database.init(db)

    if '@' in inp:
        nick = inp.split('@')[1].strip()
        sign = database.get(db,'users','horoscope','nick',nick)
        if not sign: return "No horoscope sign stored for {}.".format(nick)
    else:
        sign = database.get(db,'users','horoscope','nick',nick)
        if not inp:
            if not sign:
                notice(horoscope.__doc__)
                return
        else:
            if not sign: save = True
            if " save" in inp: save = True
            sign = inp.split()[0]

    url = "http://www.astrology.com/horoscope/daily/%s.html" % sign
    try:
        request = urllib2.Request(url, None, headers)
        page = urllib2.urlopen(request).read()
        result = BeautifulSoup(page, 'lxml')
        horoscopetxt = http.strip_html(str(result.find('div', attrs={'class':('page-horoscope-text')})))
    except: return "Check your spelling, acronyms and short forms are not accepted."


    if sign and save: database.set(db,'users','horoscope',sign,'nick',nick)
    horoscopetxt = horoscopetxt.rsplit('.', 2)[0]
    horoscopetxt += '.'
    return u"{}".format(horoscopetxt)
Esempio n. 4
0
def amazonsearch(inp):
    url = "http://www.amazon.com/s/url=search-alias%3Daps&field-keywords={}".format(inp.replace(" ", "%20"))
    try:
        request = urllib2.Request(url, None, headers)
        page = urllib2.urlopen(request).read()
        soup = BeautifulSoup(page, 'lxml')
        soup = soup.find('li', attrs={'id': ('result_0')})
        title = soup.find('h2')
        title = title.renderContents()
        url = soup.find('a', attrs={'class': ('a-link-normal s-access-detail-page a-text-normal')})
        url = url.get('href')
        try:
            price = soup.find('div', attrs={'class': ('a-column a-span7')})
            price = http.strip_html(price.find('span'))
            print price
        except AttributeError:
            price = soup.find('span', attrs={'class': ('a-size-medium a-color-price')})
            try:
                price = http.strip_html(price)
            except TypeError:
                price = soup.find('span', attrs={'class': ('a-size-base a-color-price s-price a-text-bold')})
                price = http.strip_html(price)
        azid = re.match(r'^.*\/dp\/([\w]+)\/.*',url).group(1)
    except AttributeError:
        request = urllib2.Request(url, None, headers)
        page = urllib2.urlopen(request).read()
        soup = BeautifulSoup(page, 'lxml')
        soup = soup.find('li', attrs={'id': ('result_1')})
        title = soup.find('h2')
        title = title.renderContents()
        url = soup.find('a', attrs={'class': ('a-link-normal s-access-detail-page a-text-normal')})
        url = url.get('href')
        try:
            price = soup.find('div', attrs={'class': ('a-column a-span7')})
            price = http.strip_html(price.find('span'))
        except AttributeError:
            price = soup.find('span', attrs={'class': ('a-size-medium a-color-price')})
            try:
                price = http.strip_html(price)
            except TypeError:
                price = "Not Available"
        azid = re.match(r'^.*\/dp\/([\w]+)\/.*',url).group(1)
        

    return u'(\x02{}\x02) {}, https://amzn.com/{}'.format(price, title.decode('ascii', 'ignore'), azid)
Esempio n. 5
0
def timefunction(inp, nick="", reply=None, db=None, notice=None):
    "time [location] [dontsave] | [@ nick] -- Gets time for <location>."

    save = True

    if '@' in inp:
        nick = inp.split('@')[1].strip()
        location = database.get(db, 'users', 'location', 'nick', nick)
        if not location:
            return "No location stored for {}.".format(
                nick.encode('ascii', 'ignore'))
    else:
        location = database.get(db, 'users', 'location', 'nick', nick)
        if not inp:
            if not location:
                notice(time.__doc__)
                return
        else:
            # if not location: save = True
            if " dontsave" in inp: save = False
            location = inp.split()[0]

    # now, to get the actual time
    try:
        url = "https://www.google.com/search?q=time+in+{}".format(
            location.replace(' ', '+').replace(' save', ''))
        request = urllib2.Request(url, None, headers)
        page = urllib2.urlopen(request).read()
        soup = BeautifulSoup(page, 'lxml')
        soup = soup.find('div', attrs={'id': re.compile('ires')})

        time = filter(
            None,
            http.strip_html(
                soup.find('div', attrs={
                    'class': re.compile('vk_gy')
                }).renderContents().strip()).split(' '))
        prefix = ' '.join(time[6:])
        curtime = time[0]
        day = time[1]
        date = ' '.join(time[2:4])
    except IndexError:
        return "Could not get time for that location."

    if location and save:
        database.set(db, 'users', 'location', location, 'nick', nick)

    return formatting.output(
        'Time',
        [u'{} is \x02{}\x02 [{} {}]'.format(prefix, curtime, day, date)])
Esempio n. 6
0
def convert(inp,conn=None,chan=None):
    "gconvert <val1> <val2> -- converts a measurement or currency"\
    "gconvert 1000 usd to yen"\
    "gconvert 100 miles to km"

    if 'btc' in inp.lower() or 'bitcoin' in inp.lower():
        convert_btc(inp,conn,chan)
        return None
    elif 'ltc' in inp.lower() or 'litecoin' in inp.lower():
        convert_ltc(inp,conn,chan)
        return None
    
    url = "http://www.google.com/search?q=convert+{}".format(urllib.quote_plus(inp))

    request = urllib2.Request(url, None, headers)
    page = urllib2.urlopen(request).read()
    soup = BeautifulSoup(page,'lxml')


    soup = soup.find('div', attrs={'id': re.compile('ires')})
    is_currency = soup.find('li', attrs={'class': re.compile('currency')})
    if is_currency:
        conv_inp = http.strip_html(soup.find('div', attrs={'class': re.compile('vk_sh')}).renderContents().strip())
        conv_out = http.strip_html(soup.find('div', attrs={'class': re.compile('vk_ans')}).renderContents().strip())
        return "{} {}".format(conv_inp.replace("equals","="),conv_out)
    else:
        conv_inp = soup.find('div', attrs={'id': re.compile('_Aif')})
        inp_value = conv_inp.find('input', attrs={'class': re.compile('_eif')})['value'].strip()
        inp_unit = http.strip_html(conv_inp.find('select', attrs={'class': re.compile('_dif')}).find_all('option', selected=True)[0])

        conv_out = soup.find('div', attrs={'id': re.compile('_Cif')})
        out_value = conv_out.find('input', attrs={'class': re.compile('_eif')})['value'].strip()
        out_unit = http.strip_html(conv_out.find('select', attrs={'class': re.compile('_dif')}).find_all('option', selected=True)[0])


    return "{} {}s = {} {}s".format(inp_value, inp_unit, out_value, out_unit)
Esempio n. 7
0
def horoscope(inp):
    "horoscope <sign> -- Get your horoscope."

    url = "http://my.horoscope.com/astrology/free-daily-horoscope-%s.html" % inp
    soup = http.get_soup(url)

    title = soup.find_all('h1', {'class': 'h1b'})[1]
    horoscope = soup.find('div', {'class': 'fontdef1'})
    result = "\x02%s\x02 %s" % (title, horoscope)
    result = http.strip_html(result)
    #result = unicode(result, "utf8").replace('flight ','')

    if not title:
        return "Could not get the horoscope for %s." % inp

    return result
Esempio n. 8
0
def horoscope(inp):
    "horoscope <sign> -- Get your horoscope."

    url = "http://my.horoscope.com/astrology/free-daily-horoscope-%s.html" % inp
    soup = http.get_soup(url)

    title = soup.find_all('h1', {'class': 'h1b'})[1]
    horoscope = soup.find('div', {'class': 'fontdef1'})
    result = "\x02%s\x02 %s" % (title, horoscope)
    result = http.strip_html(result)
    #result = unicode(result, "utf8").replace('flight ','')

    if not title:
        return "Could not get the horoscope for %s." % inp

    return result
Esempio n. 9
0
def distance(inp):
    "distance <start> to <end> -- Calculate the distance between 2 places."
    if 'from ' in inp: inp = inp.replace('from ','')
    inp = inp.replace(', ','+')
    start = inp.split(" to ")[0].strip().replace(' ','+')
    dest = inp.split(" to ")[1].strip().replace(' ','+')
    url = "http://www.travelmath.com/flying-distance/from/%s/to/%s" % (start, dest)
    print url
    soup = http.get_soup(url)
    query = soup.find('h1', {'class': re.compile('flight-distance')})
    distance = soup.find('h3', {'class': 'space'})
    result = "%s %s" % (query, distance)
    result = http.strip_html(result)
    result = unicode(result, "utf8").replace('flight ','')

    if not distance:
        return "Could not calculate the distance from %s to %s." % (start, dest)

    return result
Esempio n. 10
0
def distance(inp):
    "distance <start> to <end> -- Calculate the distance between 2 places."
    if 'from ' in inp: inp = inp.replace('from ', '')
    inp = inp.replace(', ', '+')
    start = inp.split(" to ")[0].strip().replace(' ', '+')
    dest = inp.split(" to ")[1].strip().replace(' ', '+')
    url = "http://www.travelmath.com/flying-distance/from/%s/to/%s" % (start,
                                                                       dest)
    print url
    soup = http.get_soup(url)
    query = soup.find('h1', {'class': re.compile('flight-distance')})
    distance = soup.find('h3', {'class': 'space'})
    result = "%s %s" % (query, distance)
    result = http.strip_html(result)
    result = unicode(result, "utf8").replace('flight ', '')

    if not distance:
        return "Could not calculate the distance from %s to %s." % (start,
                                                                    dest)

    return result
Esempio n. 11
0
def timefunction(inp, nick="", reply=None, db=None, notice=None):
    "time [location] [dontsave] | [@ nick] -- Gets time for <location>."

    save = True

    if '@' in inp:
        nick = inp.split('@')[1].strip()
        location = database.get(db,'users','location','nick',nick)
        if not location: return "No location stored for {}.".format(nick.encode('ascii', 'ignore'))
    else:
        location = database.get(db,'users','location','nick',nick)
        if not inp:
            if not location:
                notice(time.__doc__)
                return
        else:
            # if not location: save = True
            if " dontsave" in inp: save = False
            location = inp.split()[0]

    # now, to get the actual time
    try:
        url = "https://www.google.com/search?q=time+in+{}".format(location.replace(' ','+').replace(' save',''))
        request = urllib2.Request(url, None, headers)
        page = urllib2.urlopen(request).read()
        soup = BeautifulSoup(page, 'lxml')
        soup = soup.find('div', attrs={'id': re.compile('ires')})

        time = filter(None, http.strip_html(soup.find('div', attrs={'class': re.compile('vk_gy')}).renderContents().strip()).split(' '))
        prefix = ' '.join(time[6:])
        curtime = time[0]
        day = time[1]
        date = ' '.join(time[2:4])
    except IndexError:
        return "Could not get time for that location."

    if location and save: database.set(db,'users','location',location,'nick',nick)

    return formatting.output('Time', [u'{} is \x02{}\x02 [{} {}]'.format(prefix, curtime, day, date)])
Esempio n. 12
0
def amazonsearch(inp):
    url = "http://www.amazon.com/s/url=search-alias%3Daps&field-keywords={}".format(
        inp.replace(" ", "%20"))
    try:
        request = urllib2.Request(url, None, headers)
        page = urllib2.urlopen(request).read()
        soup = BeautifulSoup(page, 'lxml')
        soup = soup.find('li', attrs={'id': ('result_0')})
        title = soup.find('h2')
        title = title.renderContents()
        url = soup.find(
            'a',
            attrs={
                'class': ('a-link-normal s-access-detail-page a-text-normal')
            })
        url = url.get('href')
        try:
            price = soup.find('div', attrs={'class': ('a-column a-span7')})
            price = http.strip_html(price.find('span'))
            print price
        except AttributeError:
            price = soup.find('span',
                              attrs={'class': ('a-size-medium a-color-price')})
            try:
                price = http.strip_html(price)
            except TypeError:
                price = soup.find(
                    'span',
                    attrs={
                        'class':
                        ('a-size-base a-color-price s-price a-text-bold')
                    })
                price = http.strip_html(price)
        azid = re.match(r'^.*\/dp\/([\w]+)\/.*', url).group(1)
    except AttributeError:
        request = urllib2.Request(url, None, headers)
        page = urllib2.urlopen(request).read()
        soup = BeautifulSoup(page, 'lxml')
        soup = soup.find('li', attrs={'id': ('result_1')})
        title = soup.find('h2')
        title = title.renderContents()
        url = soup.find(
            'a',
            attrs={
                'class': ('a-link-normal s-access-detail-page a-text-normal')
            })
        url = url.get('href')
        try:
            price = soup.find('div', attrs={'class': ('a-column a-span7')})
            price = http.strip_html(price.find('span'))
        except AttributeError:
            price = soup.find('span',
                              attrs={'class': ('a-size-medium a-color-price')})
            try:
                price = http.strip_html(price)
            except TypeError:
                price = "Not Available"
        azid = re.match(r'^.*\/dp\/([\w]+)\/.*', url).group(1)

    return u'(\x02{}\x02) {}, https://amzn.com/{}'.format(
        price, title.decode('ascii', 'ignore'), azid)