def urbandefine(term, num=1): ''' @param term: Term for searching @param num: Return the (n)th result @summary: Performs a urban dictionary search and returns the first result ''' try: response = urllib2.urlopen( 'http://www.urbandictionary.com/define.php?term=%s' % urllib.quote(term)) page = response.read() response.close() soup = BeautifulSoup(page) items = soup.find('table', attrs={ 'id': 'entries' }).findAll('td', attrs={ 'class': 'text', 'id': re.compile('entry_\d+') }) item = items[num - 1] define = htmlx.unescape(''.join( item.find('div', attrs={ 'class': 'definition' }).findAll(text=True))) example = htmlx.unescape(''.join( item.find('div', attrs={ 'class': 'example' }).findAll(text=True))) if len(example): example = ", Eg: " + example return ("%s: %s%s" % (term, define, example)).encode('utf-8') except Exception: Log.error() return None
def dictionary(term, num=1): ''' @param term: Term for searching @param num: Return the (n)th result @summary: Performs a abbreviations.com dictionary search and returns the first result ''' try: response = urllib2.urlopen( 'http://www.stands4.com/services/v2/defs.php?uid=%s&tokenid=%s&word=%s' % (config['stands4']['userid'], config['stands4']['token'], urllib.quote(term))) page = response.read() response.close() soup = BeautifulSoup(page) items = soup.findAll('result') item = items[num - 1] term = htmlx.unescape(''.join(item.find('term').findAll(text=True))) part = htmlx.unescape(''.join( item.find('partofspeech').findAll(text=True))) definition = htmlx.unescape(''.join( item.find('definition').findAll(text=True))) example = htmlx.unescape(''.join( item.find('example').findAll(text=True))) return ('%s (%s), %s. Eg: %s' % (term, part, definition, example)).encode('utf-8') except Exception: Log.error() return None
def description(url): ''' @param url: The url to resolve @summary: Fetches the meta-description of an url ''' status, ctype, url = visit(url) if url is None: return None else: if status == 302: return 'Redirection loop detected for url %s' % url elif status == 200: try: if ctype.startswith('text/'): response = urllib2.urlopen(url) page = response.read() response.close() soup = BeautifulSoup(page) desc = soup.find( 'meta', {'name': re.compile('description', re.I)})['content'] return 'Description %s : [%s]' % (htmlx.unescape(desc), min_url(url)) else: return 'Preview not available for content type %s : [%s]' % ( ctype, min_url(url)) except Exception: Log.error() return None else: return 'Status Code %s : url %s' % (status, url)
def google_forecast(place, num=3): ''' @param term: Term for searching @summary: Performs a urban dictionary search and returns the first result ''' try: response = urllib2.urlopen('http://www.google.com/ig/api?weather=%s' % urllib.quote(place)) page = response.read() response.close() soup = BeautifulSoup(page) forecasts = soup.findAll('forecast_conditions') r = [] for f in forecasts[:num]: r.append( '%s on %s %dC-%dC' % (f.find('condition')['data'], f.find('day_of_week')['data'], to_celcius(f.find('low')['data']), to_celcius(f.find('high')['data']))) return ('%s: %s' % (soup.find('forecast_information').find('city')['data'], ' | '.join(r))).encode('utf-8') except Exception: Log.error() return None
def translate(msg): ''' @param msg: Message to translate @summary: Translates a query into destination language using Microsoft Translate @attention: TODO ''' try: req = urllib2.Request('http://translate.google.com/#auto|en|%s.' % urllib.quote(msg)) req.add_header( 'User-Agent', 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.52 Safari/536.5' ) response = urllib2.urlopen(req) page = response.read() Log.write(page) response.close() soup = BeautifulSoup(page) trans = ''.join( soup.find('span', attrs={ 'id': 'result' }).findAll(text=True)) return ("%s -> %s" % (msg, trans)).encode('utf-8') except Exception: Log.error() return None
def googleimage(query, num=1, single=False): ''' @param query: Query for searching @param num: Get the nth result @param single: Get only the title @summary: Performs a Google search on thinkdigit forum and returns the result ''' try: response = urllib2.urlopen( 'https://www.googleapis.com/customsearch/v1?key=%s&cx=008715276770992001381:iyfgiiccnki&q=%s&alt=atom&num=%d&searchType=image' % (config['app-id'], urllib.quote(query), num)) page = response.read() response.close() soup = BeautifulSoup(page) item_1 = soup.findAll('entry')[num - 1] url = ''.join(item_1.find('id').find(text=True)) if single: desc = htmlx.unescape( htmlx.unescape( re.sub(r'<[^&]+>', '', item_1.find('title').find(text=True)))) else: desc = htmlx.unescape( htmlx.unescape( re.sub(r'<[^&]+>', '', item_1.find('summary').find(text=True)))) return ("%s : %s" % (url, desc)).encode('utf-8') except Exception: Log.error() return None
def description(url): ''' @param url: The url to resolve @summary: Fetches the meta-description of an url ''' status, ctype, url = visit(url) if url is None: return None else: if status == 302: return 'Redirection loop detected for url %s' % url elif status == 200: try: if ctype.startswith('text/'): response = urllib2.urlopen(url) page = response.read() response.close() soup = BeautifulSoup(page) desc = soup.find('meta', {'name': re.compile('description', re.I)})['content'] return 'Description %s : [%s]' % (htmlx.unescape(desc), min_url(url)) else: return 'Preview not available for content type %s : [%s]' % (ctype, min_url(url)) except Exception: Log.error() return None else: return 'Status Code %s : url %s' % (status, url)
def wiki(word, num=1, single=False): ''' @param word: Word to search for @param num: Get the nth result @param single: Get only the title @summary: Searches for a word on Wikipedia and returns an abstract ''' try: response = urllib2.urlopen( 'http://en.wikipedia.org/w/api.php?action=opensearch&search=%s&format=xml' % urllib.quote(word)) page = response.read() response.close() soup = BeautifulSoup(page) item_1 = soup.findAll('item')[num - 1] if single: desc = ''.join(item_1.find('text').find(text=True)) else: desc = ''.join(item_1.find('description').find(text=True)) url = ''.join(item_1.find('url').find(text=True)) return ("%s : %s" % (url, htmlx.unescape(desc.replace('\n', ' ')))).encode('utf-8') except Exception: Log.error() return None
def quote(term, search=False, author=False, num=1): ''' @param term: Term for searching @param num: Return the (n)th result @summary: Returns a quote from abbreviations.com ''' try: if search: srch = "SEARCH" elif author: srch = "AUTHOR" else: srch = "RANDOM" response = urllib2.urlopen('http://www.stands4.com/services/v2/quotes.php?uid=%s&tokenid=%s&searchtype=%s&query=%s' % (config['stands4']['userid'], config['stands4']['token'], srch, urllib.quote(term))) page = response.read() response.close() soup = BeautifulSoup(page) items = soup.findAll('result') item = items[num-1] quote = htmlx.unescape(''.join(item.find('quote').findAll(text=True))).strip('"') author = htmlx.unescape(''.join(item.find('author').findAll(text=True))) return ('"%s" -%s' % (quote, author)).encode('utf-8') except Exception: Log.error() return None
def forecast(place, num=3): """ @param term: Term for searching @summary: Performs a urban dictionary search and returns the first result """ try: response = urllib2.urlopen( "http://free.worldweatheronline.com/feed/weather.ashx?format=xml&num_of_days=%d&key=%s&q=%s" % (num, config["app-id"], urllib.quote(place)) ) page = response.read() response.close() soup = BeautifulSoup(page) forecasts = soup.findAll("weather") query = soup.find("request") r = [] for f in forecasts[:num]: r.append( "%s on %s [%sC-%sC], %skmph winds" % ( "".join(f.find("weatherdesc").findAll(text=True)).strip(), "".join(f.find("date").findAll(text=True)), "".join(f.find("tempminc").findAll(text=True)), "".join(f.find("tempmaxc").findAll(text=True)), "".join(f.find("windspeedkmph").findAll(text=True)), ) ) return ("%s: %s" % ("".join(query.find("query").findAll(text=True)), " | ".join(r))).encode("utf-8") except Exception: Log.error() return None
def weather(place): """ @param term: Term for searching @summary: Performs a urban dictionary search and returns the first result """ try: response = urllib2.urlopen( "http://free.worldweatheronline.com/feed/weather.ashx?format=xml&fx=no&extra=localObsTime&key=%s&q=%s" % (config["app-id"], urllib.quote(place)) ) page = response.read() response.close() soup = BeautifulSoup(page) current = soup.find("current_condition") query = soup.find("request") return ( "%s (%s): %s at %sC, %s%% humidity, %skmph winds" % ( "".join(query.find("query").findAll(text=True)), "".join(current.find("localobsdatetime").findAll(text=True)), "".join(current.find("weatherdesc").findAll(text=True)).strip(), "".join(current.find("temp_c").findAll(text=True)), "".join(current.find("humidity").findAll(text=True)), "".join(current.find("windspeedkmph").findAll(text=True)), ) ).encode("utf-8") except Exception: Log.error() return None
def weather(place): ''' @param term: Term for searching @summary: Performs a urban dictionary search and returns the first result ''' try: response = urllib2.urlopen( 'http://free.worldweatheronline.com/feed/weather.ashx?format=xml&fx=no&extra=localObsTime&key=%s&q=%s' % (config['app-id'], urllib.quote(place))) page = response.read() response.close() soup = BeautifulSoup(page) current = soup.find('current_condition') query = soup.find('request') return ( '%s (%s): %s at %sC, %s%% humidity, %skmph winds' % (''.join(query.find('query').findAll(text=True)), ''.join( current.find('localobsdatetime').findAll(text=True)), ''.join( current.find('weatherdesc').findAll(text=True)).strip(), ''.join(current.find('temp_c').findAll(text=True)), ''.join( current.find('humidity').findAll(text=True)), ''.join( current.find('windspeedkmph').findAll(text=True))) ).encode('utf-8') except Exception: Log.error() return None
def google_forecast(place, num=3): """ @param term: Term for searching @summary: Performs a urban dictionary search and returns the first result """ try: response = urllib2.urlopen("http://www.google.com/ig/api?weather=%s" % urllib.quote(place)) page = response.read() response.close() soup = BeautifulSoup(page) forecasts = soup.findAll("forecast_conditions") r = [] for f in forecasts[:num]: r.append( "%s on %s %dC-%dC" % ( f.find("condition")["data"], f.find("day_of_week")["data"], to_celcius(f.find("low")["data"]), to_celcius(f.find("high")["data"]), ) ) return ("%s: %s" % (soup.find("forecast_information").find("city")["data"], " | ".join(r))).encode("utf-8") except Exception: Log.error() return None
def quote(term, search=False, author=False, num=1): ''' @param term: Term for searching @param num: Return the (n)th result @summary: Returns a quote from abbreviations.com ''' try: if search: srch = "SEARCH" elif author: srch = "AUTHOR" else: srch = "RANDOM" response = urllib2.urlopen( 'http://www.stands4.com/services/v2/quotes.php?uid=%s&tokenid=%s&searchtype=%s&query=%s' % (config['stands4']['userid'], config['stands4']['token'], srch, urllib.quote(term))) page = response.read() response.close() soup = BeautifulSoup(page) items = soup.findAll('result') item = items[num - 1] quote = htmlx.unescape(''.join( item.find('quote').findAll(text=True))).strip('"') author = htmlx.unescape(''.join( item.find('author').findAll(text=True))) return ('"%s" -%s' % (quote, author)).encode('utf-8') except Exception: Log.error() return None
def forecast(place, num=3): ''' @param term: Term for searching @summary: Performs a urban dictionary search and returns the first result ''' try: response = urllib2.urlopen( 'http://free.worldweatheronline.com/feed/weather.ashx?format=xml&num_of_days=%d&key=%s&q=%s' % (num, config['app-id'], urllib.quote(place))) page = response.read() response.close() soup = BeautifulSoup(page) forecasts = soup.findAll('weather') query = soup.find('request') r = [] for f in forecasts[:num]: r.append( '%s on %s [%sC-%sC], %skmph winds' % (''.join(f.find('weatherdesc').findAll(text=True)).strip(), ''.join(f.find('date').findAll(text=True)), ''.join( f.find('tempminc').findAll(text=True)), ''.join( f.find('tempmaxc').findAll(text=True)), ''.join( f.find('windspeedkmph').findAll(text=True)))) return ('%s: %s' % (''.join(query.find('query').findAll(text=True)), ' | '.join(r))).encode('utf-8') except Exception: Log.error() return None
def output(self, channel, user, options): try: if options.restart: self._bot.disconnect(' '.join(options.msg) if options.msg else 'Restarting') else: self._bot.close() self._bot.disconnect(' '.join(options.msg) if options.msg else "I'll be back") except Exception: Log.error('QircBot.parse_cmd: ') finally: Log.stop() return False
def call_listeners(self, key, channel, user, line): ''' @summary: Calls the listener on all modules registered for it ''' if self._listeners.has_key(key): for o in self._listeners[key]: try: if o.event(key, channel, user, line): break except Exception, e: Log.error('Error in extension "%s" for key "%s": %s' % (o.key, key, e)) return True
def ask(self, msg): try: self.data['stimulus'] = msg self.data['icognocheck'] = self.query_hash(self.data) req = urllib2.Request(self.params['url'], urllib.urlencode(self.data), self.headers) resp = self.opener.open(req) msg = resp.read() resp.close() return self.parse_response(msg) except Exception: Log.error() return None
def call_listeners(self, key, channel, user, line): ''' @summary: Calls the listener on all modules registered for it ''' #if (key != 'privmsg' or user.powers is None or key in user.powers) and self._listeners.has_key(key): if self._listeners.has_key(key): for o in self._listeners[key]: try: if o.event(key, channel, user, line): break except Exception, e: Log.error('Error in %s: %s' % (o.key, e)) return True
def call_listener(self, key, mod_key, channel, user, line): ''' @param mod_key: A string identifying the module @summary: Calls the listener for a particular modules registered for it ''' if self._listeners.has_key(key): for o in self._listeners[key]: try: if o.key == mod_key: o.event(key, channel, user, line) break except Exception, e: Log.error('Error in extension "%s" for key "%s": %s' % (o.key, key, e)) return True
def googleshort(url): ''' @param url: The url to shorten @summary: Shortens the url to its the goo.gl url ''' try: req = urllib2.Request('https://www.googleapis.com/urlshortener/v1/url?key=%s' % config['app-id'], data='{"longUrl": "%s"}' % url, headers={'Content-Type': 'application/json'}) response = urllib2.urlopen(req) page = response.read() response.close() result = json.loads(page) return 'Short URL %s : %s' % (result['id'], min_url(result['longUrl'])) except Exception: Log.error() return None
def title(url, only_title=False): ''' @param url: The url to resolve @summary: Fetches the title of an url ''' status, ctype, url = visit(url) if url is None: return None else: if status == 302: return 'Redirection loop detected for url %s' % url elif status == 200: try: if ctype.startswith('text/'): # Fast Title Search found = None buff = '' m = 0 n = 512 # Chunk Size while True: req = urllib2.Request(url) req.headers['Range'] = 'bytes=%s-%s' % (m, m + n - 1) response = urllib2.urlopen(req) buff += response.read() response.close() soup = BeautifulSoup(buff) found = soup.find('title') m += n # If PARTIAL OK (206) and <title> has an ending tag if response.code == 200 or (response.code == 206 and found and found.nextSibling): break if only_title: return 'Title: %s' % htmlx.unescape(u''.join( found.findAll(text=True))).encode('utf-8') else: return '%s : [%s]' % (htmlx.unescape(u''.join( found.findAll(text=True))).encode('utf-8'), min_url(url)) else: return 'Title not available for content type %s : url %s' % ( ctype, min_url(url)) except Exception: Log.error() return None else: return 'Status Code %s : url %s' % (status, url)
def googleexpand(url): ''' @param url: The url to shorten @summary: Gets the full url of a goo.gl url ''' try: response = urllib2.urlopen('https://www.googleapis.com/urlshortener/v1/url?key=%s&shortUrl=%s' % (config['app-id'], urllib.quote(url))) page = response.read() response.close() result = json.loads(page) if result['status'] == 'OK': return 'Expanded URL %s : %s' % (result['id'], result['longUrl']) else: return None except Exception: Log.error() return None
def parse(self, channel, user, key, args): ''' @param key: A string identifying the module @summary: Parses the command for the specified module ''' if not self.exists(key): return (None, None, None) elif self.is_enabled(key): parser = self._modules[key].parser try: args = self.arg_split(args) return key, self._modules[key].output(channel, user, parser.parse_args(args)), True except ValueError, e: Log.error("ParserError: %s" % e.message) return key, ModuleResult("Parser Error '%s': %s" % (key, e)), False except StopIteration, e: # help requested return key, ModuleResult(parser.format_help()), False
def googlecalc(query): ''' @param query: Query for calculation @summary: Performs calculation on Google Calc and returns the results ''' try: response = urllib2.urlopen('http://www.google.com/ig/calculator?hl=en&q=%s' % urllib.quote(query)) page = response.read().replace('\xa0', ' ') # Convert to actual space page = re.sub(r'(\d+)(\s|\xa0)(\d+)', r'\1,\3', page) # Replace spaces between numbers by comma response.close() result = json.loads(htmlx.fixjson(page)) if result['error'] == '': return ('%s = %s' % (htmlx.unescape(result['lhs']), htmlx.unescape(result['rhs']))).encode('utf-8') else: return None except Exception: Log.error() return None
def translate(msg): ''' @param msg: Message to translate @summary: Translates a query into destination language using Microsoft Translate @attention: TODO ''' try: req = urllib2.Request('http://translate.google.com/#auto|en|%s.' % urllib.quote(msg)) req.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.52 Safari/536.5') response = urllib2.urlopen(req) page = response.read() Log.write(page) response.close() soup = BeautifulSoup(page) trans = ''.join(soup.find('span', attrs={'id': 'result'}).findAll(text=True)) return ("%s -> %s" % (msg, trans)).encode('utf-8') except Exception: Log.error() return None
def googleexpand(url): ''' @param url: The url to shorten @summary: Gets the full url of a goo.gl url ''' try: response = urllib2.urlopen( 'https://www.googleapis.com/urlshortener/v1/url?key=%s&shortUrl=%s' % (config['app-id'], urllib.quote(url))) page = response.read() response.close() result = json.loads(page) if result['status'] == 'OK': return 'Expanded URL %s : %s' % (result['id'], result['longUrl']) else: return None except Exception: Log.error() return None
def googleshort(url): ''' @param url: The url to shorten @summary: Shortens the url to its the goo.gl url ''' try: req = urllib2.Request( 'https://www.googleapis.com/urlshortener/v1/url?key=%s' % config['app-id'], data='{"longUrl": "%s"}' % url, headers={'Content-Type': 'application/json'}) response = urllib2.urlopen(req) page = response.read() response.close() result = json.loads(page) return 'Short URL %s : %s' % (result['id'], min_url(result['longUrl'])) except Exception: Log.error() return None
def add(self, key, module, aliases=None): ''' @param key: An identifier for module @param module: An instance of type BaseModule @param enabled: Enable the module @param aliases: List of aliases, if any @summary: Adds the module to the manager ''' self._modules[key] = module if aliases is not None: for a in aliases: self._aliases[a] = key # Persistence s = self.get_module_state(key) if s: try: module.set_state(s) # Load the modules previous state except: Log.error('Error restoring state for %s. Have you changed the class structure? : ' % key)
def googledefine(query, num=1): ''' @param query: Query for searching @param num: Return the (n)th result @summary: Performs a Google search and returns the first result @attention: Google's description requires unescaping twice ''' try: response = urllib2.urlopen('https://www.googleapis.com/customsearch/v1?key=%s&cx=013036536707430787589:_pqjad5hr1a&q=define+%s&alt=atom&num=%d' % (config['google']['app-id'], urllib.quote(query), num)) page = response.read() response.close() soup = BeautifulSoup(page) item_1 = soup.findAll('entry')[num-1] url = ''.join(item_1.find('id').find(text=True)) desc = htmlx.unescape(htmlx.unescape(re.sub(r'<[^&]+>','',item_1.find('summary').find(text=True)))) return ("%s, %s" % (url, desc)).encode('utf-8') except Exception: Log.error() return None
def synonyms(term, num=1): ''' @param term: Term for searching @param num: Return the (n)th result @summary: Performs a abbreviations.com synonym search and returns the results ''' try: response = urllib2.urlopen('http://www.stands4.com/services/v2/syno.php?uid=%s&tokenid=%s&word=%s' % (config['stands4']['userid'], config['stands4']['token'], urllib.quote(term))) page = response.read() response.close() soup = BeautifulSoup(page) items = soup.findAll('result') item = items[num-1] part = htmlx.unescape(''.join(item.find('partofspeech').findAll(text=True))) syno = htmlx.unescape(''.join(item.find('synonyms').findAll(text=True))) return ('(%s) %s' % (part, syno)).encode('utf-8') except Exception: Log.error() return None
def title(url, only_title=False): ''' @param url: The url to resolve @summary: Fetches the title of an url ''' status, ctype, url = visit(url) if url is None: return None else: if status == 302: return 'Redirection loop detected for url %s' % url elif status == 200: try: if ctype.startswith('text/'): # Fast Title Search found = None buff = '' m = 0 n = 512 # Chunk Size while True: req = urllib2.Request(url) req.headers['Range'] = 'bytes=%s-%s' % (m, m+n-1) response = urllib2.urlopen(req) buff += response.read() response.close() soup = BeautifulSoup(buff) found = soup.find('title') m += n # If PARTIAL OK (206) and <title> has an ending tag if response.code == 200 or (response.code == 206 and found and found.nextSibling): break if only_title: return 'Title: %s' % htmlx.unescape(u''.join(found.findAll(text=True))).encode('utf-8') else: return '%s : [%s]' % (htmlx.unescape(u''.join(found.findAll(text=True))).encode('utf-8'), min_url(url)) else: return 'Title not available for content type %s : url %s' % (ctype, min_url(url)) except Exception: Log.error() return None else: return 'Status Code %s : url %s' % (status, url)
def geo(latitude, longitude): ''' @param latitude: The latitude of location @param longitude: The longitude of location @summary: Performs a reverse geo lookup on Google Maps API ''' try: response = urllib2.urlopen('http://maps.googleapis.com/maps/api/geocode/xml?latlng=%s,%s&sensor=false' % (latitude, longitude)) page = response.read() response.close() soup = BeautifulSoup(page) for addr in soup.findAll('formatted_address'): address = addr.find(text=True) if address: address = str(address) break return '[%s, %s] : %s' % (latitude, longitude, address) except Exception: Log.error() return None
def iplocate(ip): ''' @param ip: The IP address @summary: Performs a IP lookup and obtains the location of the user ''' try: response = urllib2.urlopen('http://api.ipinfodb.com/v3/ip-city/?key=%s&format=xml&ip=%s' % (config['app-id'], urllib.quote(ip))) page = response.read() response.close() soup = BeautifulSoup(page) reply = soup.find('response') if reply.find('statuscode').find(text=True) == "OK": r_lat = str((reply.find('latitude').find(text=True))) r_long = str(reply.find('longitude').find(text=True)) return '%s belongs to %s' % (ip, geo(r_lat, r_long)) else: return None except Exception: Log.error() return None
def google_weather(place): ''' @param term: Term for searching @summary: Performs a urban dictionary search and returns the first result ''' try: response = urllib2.urlopen('http://www.google.com/ig/api?weather=%s' % urllib.quote(place)) page = response.read() response.close() soup = BeautifulSoup(page) current = soup.find('current_conditions') return ( '%s: %s at %sC, %s, %s' % (soup.find('forecast_information').find('city')['data'], current.find('condition')['data'], current.find('temp_c')['data'], current.find('humidity')['data'], current.find('wind_condition')['data'])).encode('utf-8') except Exception: Log.error() return None
def dictionary(term, num=1): ''' @param term: Term for searching @param num: Return the (n)th result @summary: Performs a abbreviations.com dictionary search and returns the first result ''' try: response = urllib2.urlopen('http://www.stands4.com/services/v2/defs.php?uid=%s&tokenid=%s&word=%s' % (config['stands4']['userid'], config['stands4']['token'], urllib.quote(term))) page = response.read() response.close() soup = BeautifulSoup(page) items = soup.findAll('result') item = items[num-1] term = htmlx.unescape(''.join(item.find('term').findAll(text=True))) part = htmlx.unescape(''.join(item.find('partofspeech').findAll(text=True))) definition = htmlx.unescape(''.join(item.find('definition').findAll(text=True))) example = htmlx.unescape(''.join(item.find('example').findAll(text=True))) return ('%s (%s), %s. Eg: %s' % (term, part, definition, example)).encode('utf-8') except Exception: Log.error() return None
def urbandefine(term, num=1): ''' @param term: Term for searching @param num: Return the (n)th result @summary: Performs a urban dictionary search and returns the first result ''' try: response = urllib2.urlopen('http://www.urbandictionary.com/define.php?term=%s' % urllib.quote(term)) page = response.read() response.close() soup = BeautifulSoup(page) items = soup.find('table', attrs={'id': 'entries'}).findAll('td', attrs={'class': 'text', 'id': re.compile('entry_\d+')}) item = items[num-1] define = htmlx.unescape(''.join(item.find('div', attrs={'class': 'definition'}).findAll(text=True))) example = htmlx.unescape(''.join(item.find('div', attrs={'class': 'example'}).findAll(text=True))) if len(example): example = ", Eg: " + example return ("%s: %s%s" % (term, define, example)).encode('utf-8') except Exception: Log.error() return None
def wolfram(query): ''' @param query: Query for calculation @summary: Performs calculation on Wolfram Alpha and returns the results ''' try: response = urllib2.urlopen('http://api.wolframalpha.com/v2/query?appid=%s&input=%s&format=plaintext' % (config['app-id'], urllib.quote(query))) page = response.read() response.close() soup = BeautifulSoup(page) out = [] primary_items = soup.findAll('pod', attrs={'primary': 'true'}) for primary in primary_items: out.append(htmlx.unescape(''.join(primary.find('plaintext').findAll(text=True)))) if len(out): return re.sub(r'^rupee\s*', 'Rs. ', (', '.join(out))).encode('utf-8') else: return None except Exception: Log.error() return None
def wiki(word, num=1, single=False): ''' @param word: Word to search for @param num: Get the nth result @param single: Get only the title @summary: Searches for a word on Wikipedia and returns an abstract ''' try: response = urllib2.urlopen('http://en.wikipedia.org/w/api.php?action=opensearch&search=%s&format=xml' % urllib.quote(word)) page = response.read() response.close() soup = BeautifulSoup(page) item_1 = soup.findAll('item')[num-1] if single: desc = ''.join(item_1.find('text').find(text=True)) else: desc = ''.join(item_1.find('description').find(text=True)) url = ''.join(item_1.find('url').find(text=True)) return ("%s : %s" % (url, htmlx.unescape(desc.replace('\n', ' ')))).encode('utf-8') except Exception: Log.error() return None
def googleimage(query, num=1, single=False): ''' @param query: Query for searching @param num: Get the nth result @param single: Get only the title @summary: Performs a Google search on thinkdigit forum and returns the result ''' try: response = urllib2.urlopen('https://www.googleapis.com/customsearch/v1?key=%s&cx=008715276770992001381:iyfgiiccnki&q=%s&alt=atom&num=%d&searchType=image' % (config['app-id'], urllib.quote(query), num)) page = response.read() response.close() soup = BeautifulSoup(page) item_1 = soup.findAll('entry')[num-1] url = ''.join(item_1.find('id').find(text=True)) if single: desc = htmlx.unescape(htmlx.unescape(re.sub(r'<[^&]+>','',item_1.find('title').find(text=True)))) else: desc = htmlx.unescape(htmlx.unescape(re.sub(r'<[^&]+>','',item_1.find('summary').find(text=True)))) return ("%s : %s" % (url, desc)).encode('utf-8') except Exception: Log.error() return None
def synonyms(term, num=1): ''' @param term: Term for searching @param num: Return the (n)th result @summary: Performs a abbreviations.com synonym search and returns the results ''' try: response = urllib2.urlopen( 'http://www.stands4.com/services/v2/syno.php?uid=%s&tokenid=%s&word=%s' % (config['stands4']['userid'], config['stands4']['token'], urllib.quote(term))) page = response.read() response.close() soup = BeautifulSoup(page) items = soup.findAll('result') item = items[num - 1] part = htmlx.unescape(''.join( item.find('partofspeech').findAll(text=True))) syno = htmlx.unescape(''.join( item.find('synonyms').findAll(text=True))) return ('(%s) %s' % (part, syno)).encode('utf-8') except Exception: Log.error() return None
def googledefine(query, num=1): ''' @param query: Query for searching @param num: Return the (n)th result @summary: Performs a Google search and returns the first result @attention: Google's description requires unescaping twice ''' try: response = urllib2.urlopen( 'https://www.googleapis.com/customsearch/v1?key=%s&cx=013036536707430787589:_pqjad5hr1a&q=define+%s&alt=atom&num=%d' % (config['google']['app-id'], urllib.quote(query), num)) page = response.read() response.close() soup = BeautifulSoup(page) item_1 = soup.findAll('entry')[num - 1] url = ''.join(item_1.find('id').find(text=True)) desc = htmlx.unescape( htmlx.unescape( re.sub(r'<[^&]+>', '', item_1.find('summary').find(text=True)))) return ("%s, %s" % (url, desc)).encode('utf-8') except Exception: Log.error() return None
def google_weather(place): """ @param term: Term for searching @summary: Performs a urban dictionary search and returns the first result """ try: response = urllib2.urlopen("http://www.google.com/ig/api?weather=%s" % urllib.quote(place)) page = response.read() response.close() soup = BeautifulSoup(page) current = soup.find("current_conditions") return ( "%s: %s at %sC, %s, %s" % ( soup.find("forecast_information").find("city")["data"], current.find("condition")["data"], current.find("temp_c")["data"], current.find("humidity")["data"], current.find("wind_condition")["data"], ) ).encode("utf-8") except Exception: Log.error() return None
class DynamicExtensionManager(BaseManager, ListenerManager): ''' Manages Dynamic Extensions ''' def __init__(self): BaseManager.__init__(self) ListenerManager.__init__(self) self._modregex = None # Regex to match module key in the command string self._modregex_str = '' pass @property def modregex(self): return self._modregex @property def modregex_str(self): return self._modregex_str def reload(self, key=None): ''' @summary: Calls reload on each module ''' if key: self._modules[key].reload() else: for m in self._modules.values(): m.reload() def build_regex(self, regex_str=None): ''' @param regex_str: If specified the string is set as the regex instead of building the string from modules ''' if regex_str: r = regex_str else: r = '' for m in self._modules.values(): rxs = m.get_regex_str() if rxs: r += rxs + '|' r = r.rstrip('|') if len(r): r = '^(?:%s)(?:\s|$)' % r self._modregex = re.compile(r) self._modregex_str = r else: self._modregex = None self._modregex_str = '' def add(self, module, rebuild=True): ''' @param module: An instance of type BaseDynamicModule @param rebuild: True if the regex matcher should be built after attaching, False if build_regex() will be called explicitly @summary: Adds the module to the manager ''' if self._modules.has_key(module.key): removed = True self.remove(module.key) else: removed = False self._modules[module.key] = module if module.aliases is not None: for a in module.aliases: self._aliases[a] = module.key # Persistence s = self.get_module_state(module.key) if s: module.set_state(s) # Load the modules previous state if rebuild: self.build_regex() # Attach Listeners self.attach_listeners(module.listeners, module) return removed def remove(self, key, rebuild=True): ''' @param key: An identifier for module @param rebuild: True if the regex matcher should be re-built after removing, False if build_regex() will be called explicitly @summary: Removes the module from the manager ''' # Purge Listeners if self._modules.has_key(key): self.purge_listeners(self._modules[key]) super(DynamicExtensionManager, self).remove(key) if rebuild: self.build_regex() return True def enabled_modules(self): l = [] for k, m in self._modules.items(): if m.is_enabled(): l.append(k) return l def disabled_modules(self): l = [] for k, m in self._modules.items(): if not m.is_enabled(): l.append(k) return l def clear(self): ''' Removes all modules from the manager ''' for key in self._modules.keys(): self.remove(key, False) self._modregex = None self._modregex_str = '' def check(self, user, line): ''' @summary: Checks a line for a command and returns the result of the parse ''' m = self._modregex.match(line) if m and m.lastgroup: return m.lastgroup, line[m.end(m.lastgroup)+1:] else: return (None, None) def parse(self, channel, user, key, args): ''' @param key: A string identifying the module @summary: Parses the command for the specified module ''' if not self.exists(key): return (None, None, None) elif self.is_enabled(key): parser = self._modules[key].parser try: args = self.arg_split(args) return key, self._modules[key].output(channel, user, parser.parse_args(args)), True except ValueError, e: Log.error("ParserError: %s" % e.message) return key, ModuleResult("Parser Error '%s': %s" % (key, e)), False except StopIteration, e: # help requested return key, ModuleResult(parser.format_help()), False except Exception, e: Log.error("ArgumentError: %s" % e) return key, ModuleResult("Argument Error '%s': %s. Use -h/--help to get help on the parameters" % (key, e)), False
status = None ctype = None request = urllib2.Request(url) request.get_method = lambda: 'HEAD' try: response = urllib2.urlopen(request) status = response.getcode() ctype = response.info().getheader('content-type') except urllib2.URLError, e: if e.code in (301, 302, 303, 307): # Redirection loop status = 302 else: status = e.code return status, ctype, url except Exception: Log.error('URL.visit: ') return None, None, None def title(url, only_title=False): ''' @param url: The url to resolve @summary: Fetches the title of an url ''' status, ctype, url = visit(url) if url is None: return None else: if status == 302: return 'Redirection loop detected for url %s' % url elif status == 200: try:
status = None ctype = None request = urllib2.Request(url) request.get_method = lambda: 'HEAD' try: response = urllib2.urlopen(request) status = response.getcode() ctype = response.info().getheader('content-type') except urllib2.URLError, e: if e.code in (301, 302, 303, 307): # Redirection loop status = 302 else: status = e.code return status, ctype, url except Exception: Log.error('URL.visit: ') return None, None, None def title(url, only_title=False): ''' @param url: The url to resolve @summary: Fetches the title of an url ''' status, ctype, url = visit(url) if url is None: return None else: if status == 302: return 'Redirection loop detected for url %s' % url elif status == 200: