def googleimage(query, num=1, single=False): ''' @param query: Query for searching @param num: Get the nth result @param single: Get only the title @summary: Performs a Google search on thinkdigit forum and returns the result ''' try: response = urllib2.urlopen( 'https://www.googleapis.com/customsearch/v1?key=%s&cx=008715276770992001381:iyfgiiccnki&q=%s&alt=atom&num=%d&searchType=image' % (config['app-id'], urllib.quote(query), num)) page = response.read() response.close() soup = BeautifulSoup(page) item_1 = soup.findAll('entry')[num - 1] url = ''.join(item_1.find('id').find(text=True)) if single: desc = htmlx.unescape( htmlx.unescape( re.sub(r'<[^&]+>', '', item_1.find('title').find(text=True)))) else: desc = htmlx.unescape( htmlx.unescape( re.sub(r'<[^&]+>', '', item_1.find('summary').find(text=True)))) return ("%s : %s" % (url, desc)).encode('utf-8') except Exception: Log.error() return None
def forecast(place, num=3): ''' @param term: Term for searching @summary: Performs a urban dictionary search and returns the first result ''' try: response = urllib2.urlopen( 'http://free.worldweatheronline.com/feed/weather.ashx?format=xml&num_of_days=%d&key=%s&q=%s' % (num, config['app-id'], urllib.quote(place))) page = response.read() response.close() soup = BeautifulSoup(page) forecasts = soup.findAll('weather') query = soup.find('request') r = [] for f in forecasts[:num]: r.append( '%s on %s [%sC-%sC], %skmph winds' % (''.join(f.find('weatherdesc').findAll(text=True)).strip(), ''.join(f.find('date').findAll(text=True)), ''.join( f.find('tempminc').findAll(text=True)), ''.join( f.find('tempmaxc').findAll(text=True)), ''.join( f.find('windspeedkmph').findAll(text=True)))) return ('%s: %s' % (''.join(query.find('query').findAll(text=True)), ' | '.join(r))).encode('utf-8') except Exception: Log.error() return None
def google_forecast(place, num=3): """ @param term: Term for searching @summary: Performs a urban dictionary search and returns the first result """ try: response = urllib2.urlopen("http://www.google.com/ig/api?weather=%s" % urllib.quote(place)) page = response.read() response.close() soup = BeautifulSoup(page) forecasts = soup.findAll("forecast_conditions") r = [] for f in forecasts[:num]: r.append( "%s on %s %dC-%dC" % ( f.find("condition")["data"], f.find("day_of_week")["data"], to_celcius(f.find("low")["data"]), to_celcius(f.find("high")["data"]), ) ) return ("%s: %s" % (soup.find("forecast_information").find("city")["data"], " | ".join(r))).encode("utf-8") except Exception: Log.error() return None
def wiki(word, num=1, single=False): ''' @param word: Word to search for @param num: Get the nth result @param single: Get only the title @summary: Searches for a word on Wikipedia and returns an abstract ''' try: response = urllib2.urlopen( 'http://en.wikipedia.org/w/api.php?action=opensearch&search=%s&format=xml' % urllib.quote(word)) page = response.read() response.close() soup = BeautifulSoup(page) item_1 = soup.findAll('item')[num - 1] if single: desc = ''.join(item_1.find('text').find(text=True)) else: desc = ''.join(item_1.find('description').find(text=True)) url = ''.join(item_1.find('url').find(text=True)) return ("%s : %s" % (url, htmlx.unescape(desc.replace('\n', ' ')))).encode('utf-8') except Exception: Log.error() return None
def forecast(place, num=3): """ @param term: Term for searching @summary: Performs a urban dictionary search and returns the first result """ try: response = urllib2.urlopen( "http://free.worldweatheronline.com/feed/weather.ashx?format=xml&num_of_days=%d&key=%s&q=%s" % (num, config["app-id"], urllib.quote(place)) ) page = response.read() response.close() soup = BeautifulSoup(page) forecasts = soup.findAll("weather") query = soup.find("request") r = [] for f in forecasts[:num]: r.append( "%s on %s [%sC-%sC], %skmph winds" % ( "".join(f.find("weatherdesc").findAll(text=True)).strip(), "".join(f.find("date").findAll(text=True)), "".join(f.find("tempminc").findAll(text=True)), "".join(f.find("tempmaxc").findAll(text=True)), "".join(f.find("windspeedkmph").findAll(text=True)), ) ) return ("%s: %s" % ("".join(query.find("query").findAll(text=True)), " | ".join(r))).encode("utf-8") except Exception: Log.error() return None
def dictionary(term, num=1): ''' @param term: Term for searching @param num: Return the (n)th result @summary: Performs a abbreviations.com dictionary search and returns the first result ''' try: response = urllib2.urlopen( 'http://www.stands4.com/services/v2/defs.php?uid=%s&tokenid=%s&word=%s' % (config['stands4']['userid'], config['stands4']['token'], urllib.quote(term))) page = response.read() response.close() soup = BeautifulSoup(page) items = soup.findAll('result') item = items[num - 1] term = htmlx.unescape(''.join(item.find('term').findAll(text=True))) part = htmlx.unescape(''.join( item.find('partofspeech').findAll(text=True))) definition = htmlx.unescape(''.join( item.find('definition').findAll(text=True))) example = htmlx.unescape(''.join( item.find('example').findAll(text=True))) return ('%s (%s), %s. Eg: %s' % (term, part, definition, example)).encode('utf-8') except Exception: Log.error() return None
def quote(term, search=False, author=False, num=1): ''' @param term: Term for searching @param num: Return the (n)th result @summary: Returns a quote from abbreviations.com ''' try: if search: srch = "SEARCH" elif author: srch = "AUTHOR" else: srch = "RANDOM" response = urllib2.urlopen('http://www.stands4.com/services/v2/quotes.php?uid=%s&tokenid=%s&searchtype=%s&query=%s' % (config['stands4']['userid'], config['stands4']['token'], srch, urllib.quote(term))) page = response.read() response.close() soup = BeautifulSoup(page) items = soup.findAll('result') item = items[num-1] quote = htmlx.unescape(''.join(item.find('quote').findAll(text=True))).strip('"') author = htmlx.unescape(''.join(item.find('author').findAll(text=True))) return ('"%s" -%s' % (quote, author)).encode('utf-8') except Exception: Log.error() return None
def quote(term, search=False, author=False, num=1): ''' @param term: Term for searching @param num: Return the (n)th result @summary: Returns a quote from abbreviations.com ''' try: if search: srch = "SEARCH" elif author: srch = "AUTHOR" else: srch = "RANDOM" response = urllib2.urlopen( 'http://www.stands4.com/services/v2/quotes.php?uid=%s&tokenid=%s&searchtype=%s&query=%s' % (config['stands4']['userid'], config['stands4']['token'], srch, urllib.quote(term))) page = response.read() response.close() soup = BeautifulSoup(page) items = soup.findAll('result') item = items[num - 1] quote = htmlx.unescape(''.join( item.find('quote').findAll(text=True))).strip('"') author = htmlx.unescape(''.join( item.find('author').findAll(text=True))) return ('"%s" -%s' % (quote, author)).encode('utf-8') except Exception: Log.error() return None
def google_forecast(place, num=3): ''' @param term: Term for searching @summary: Performs a urban dictionary search and returns the first result ''' try: response = urllib2.urlopen('http://www.google.com/ig/api?weather=%s' % urllib.quote(place)) page = response.read() response.close() soup = BeautifulSoup(page) forecasts = soup.findAll('forecast_conditions') r = [] for f in forecasts[:num]: r.append( '%s on %s %dC-%dC' % (f.find('condition')['data'], f.find('day_of_week')['data'], to_celcius(f.find('low')['data']), to_celcius(f.find('high')['data']))) return ('%s: %s' % (soup.find('forecast_information').find('city')['data'], ' | '.join(r))).encode('utf-8') except Exception: Log.error() return None
def synonyms(term, num=1): ''' @param term: Term for searching @param num: Return the (n)th result @summary: Performs a abbreviations.com synonym search and returns the results ''' try: response = urllib2.urlopen('http://www.stands4.com/services/v2/syno.php?uid=%s&tokenid=%s&word=%s' % (config['stands4']['userid'], config['stands4']['token'], urllib.quote(term))) page = response.read() response.close() soup = BeautifulSoup(page) items = soup.findAll('result') item = items[num-1] part = htmlx.unescape(''.join(item.find('partofspeech').findAll(text=True))) syno = htmlx.unescape(''.join(item.find('synonyms').findAll(text=True))) return ('(%s) %s' % (part, syno)).encode('utf-8') except Exception: Log.error() return None
def googledefine(query, num=1): ''' @param query: Query for searching @param num: Return the (n)th result @summary: Performs a Google search and returns the first result @attention: Google's description requires unescaping twice ''' try: response = urllib2.urlopen('https://www.googleapis.com/customsearch/v1?key=%s&cx=013036536707430787589:_pqjad5hr1a&q=define+%s&alt=atom&num=%d' % (config['google']['app-id'], urllib.quote(query), num)) page = response.read() response.close() soup = BeautifulSoup(page) item_1 = soup.findAll('entry')[num-1] url = ''.join(item_1.find('id').find(text=True)) desc = htmlx.unescape(htmlx.unescape(re.sub(r'<[^&]+>','',item_1.find('summary').find(text=True)))) return ("%s, %s" % (url, desc)).encode('utf-8') except Exception: Log.error() return None
def geo(latitude, longitude): ''' @param latitude: The latitude of location @param longitude: The longitude of location @summary: Performs a reverse geo lookup on Google Maps API ''' try: response = urllib2.urlopen('http://maps.googleapis.com/maps/api/geocode/xml?latlng=%s,%s&sensor=false' % (latitude, longitude)) page = response.read() response.close() soup = BeautifulSoup(page) for addr in soup.findAll('formatted_address'): address = addr.find(text=True) if address: address = str(address) break return '[%s, %s] : %s' % (latitude, longitude, address) except Exception: Log.error() return None
def dictionary(term, num=1): ''' @param term: Term for searching @param num: Return the (n)th result @summary: Performs a abbreviations.com dictionary search and returns the first result ''' try: response = urllib2.urlopen('http://www.stands4.com/services/v2/defs.php?uid=%s&tokenid=%s&word=%s' % (config['stands4']['userid'], config['stands4']['token'], urllib.quote(term))) page = response.read() response.close() soup = BeautifulSoup(page) items = soup.findAll('result') item = items[num-1] term = htmlx.unescape(''.join(item.find('term').findAll(text=True))) part = htmlx.unescape(''.join(item.find('partofspeech').findAll(text=True))) definition = htmlx.unescape(''.join(item.find('definition').findAll(text=True))) example = htmlx.unescape(''.join(item.find('example').findAll(text=True))) return ('%s (%s), %s. Eg: %s' % (term, part, definition, example)).encode('utf-8') except Exception: Log.error() return None
def wolfram(query): ''' @param query: Query for calculation @summary: Performs calculation on Wolfram Alpha and returns the results ''' try: response = urllib2.urlopen('http://api.wolframalpha.com/v2/query?appid=%s&input=%s&format=plaintext' % (config['app-id'], urllib.quote(query))) page = response.read() response.close() soup = BeautifulSoup(page) out = [] primary_items = soup.findAll('pod', attrs={'primary': 'true'}) for primary in primary_items: out.append(htmlx.unescape(''.join(primary.find('plaintext').findAll(text=True)))) if len(out): return re.sub(r'^rupee\s*', 'Rs. ', (', '.join(out))).encode('utf-8') else: return None except Exception: Log.error() return None
def googleimage(query, num=1, single=False): ''' @param query: Query for searching @param num: Get the nth result @param single: Get only the title @summary: Performs a Google search on thinkdigit forum and returns the result ''' try: response = urllib2.urlopen('https://www.googleapis.com/customsearch/v1?key=%s&cx=008715276770992001381:iyfgiiccnki&q=%s&alt=atom&num=%d&searchType=image' % (config['app-id'], urllib.quote(query), num)) page = response.read() response.close() soup = BeautifulSoup(page) item_1 = soup.findAll('entry')[num-1] url = ''.join(item_1.find('id').find(text=True)) if single: desc = htmlx.unescape(htmlx.unescape(re.sub(r'<[^&]+>','',item_1.find('title').find(text=True)))) else: desc = htmlx.unescape(htmlx.unescape(re.sub(r'<[^&]+>','',item_1.find('summary').find(text=True)))) return ("%s : %s" % (url, desc)).encode('utf-8') except Exception: Log.error() return None
def wiki(word, num=1, single=False): ''' @param word: Word to search for @param num: Get the nth result @param single: Get only the title @summary: Searches for a word on Wikipedia and returns an abstract ''' try: response = urllib2.urlopen('http://en.wikipedia.org/w/api.php?action=opensearch&search=%s&format=xml' % urllib.quote(word)) page = response.read() response.close() soup = BeautifulSoup(page) item_1 = soup.findAll('item')[num-1] if single: desc = ''.join(item_1.find('text').find(text=True)) else: desc = ''.join(item_1.find('description').find(text=True)) url = ''.join(item_1.find('url').find(text=True)) return ("%s : %s" % (url, htmlx.unescape(desc.replace('\n', ' ')))).encode('utf-8') except Exception: Log.error() return None
def synonyms(term, num=1): ''' @param term: Term for searching @param num: Return the (n)th result @summary: Performs a abbreviations.com synonym search and returns the results ''' try: response = urllib2.urlopen( 'http://www.stands4.com/services/v2/syno.php?uid=%s&tokenid=%s&word=%s' % (config['stands4']['userid'], config['stands4']['token'], urllib.quote(term))) page = response.read() response.close() soup = BeautifulSoup(page) items = soup.findAll('result') item = items[num - 1] part = htmlx.unescape(''.join( item.find('partofspeech').findAll(text=True))) syno = htmlx.unescape(''.join( item.find('synonyms').findAll(text=True))) return ('(%s) %s' % (part, syno)).encode('utf-8') except Exception: Log.error() return None
def googledefine(query, num=1): ''' @param query: Query for searching @param num: Return the (n)th result @summary: Performs a Google search and returns the first result @attention: Google's description requires unescaping twice ''' try: response = urllib2.urlopen( 'https://www.googleapis.com/customsearch/v1?key=%s&cx=013036536707430787589:_pqjad5hr1a&q=define+%s&alt=atom&num=%d' % (config['google']['app-id'], urllib.quote(query), num)) page = response.read() response.close() soup = BeautifulSoup(page) item_1 = soup.findAll('entry')[num - 1] url = ''.join(item_1.find('id').find(text=True)) desc = htmlx.unescape( htmlx.unescape( re.sub(r'<[^&]+>', '', item_1.find('summary').find(text=True)))) return ("%s, %s" % (url, desc)).encode('utf-8') except Exception: Log.error() return None