Пример #1
0
def quote(term, search=False, author=False, num=1):
    '''
        @param term: Term for searching
        @param num: Return the (n)th result
        @summary: Returns a quote from abbreviations.com
    '''
    try:

        if search:
            srch = "SEARCH"
        elif author:
            srch = "AUTHOR"
        else:
            srch = "RANDOM"
        response = urllib2.urlopen(
            'http://www.stands4.com/services/v2/quotes.php?uid=%s&tokenid=%s&searchtype=%s&query=%s'
            % (config['stands4']['userid'], config['stands4']['token'], srch,
               urllib.quote(term)))
        page = response.read()
        response.close()
        soup = BeautifulSoup(page)
        items = soup.findAll('result')
        item = items[num - 1]
        quote = htmlx.unescape(''.join(
            item.find('quote').findAll(text=True))).strip('"')
        author = htmlx.unescape(''.join(
            item.find('author').findAll(text=True)))
        return ('"%s" -%s' % (quote, author)).encode('utf-8')
    except Exception:
        Log.error()
        return None
Пример #2
0
def quote(term, search=False, author=False, num=1):
    '''
        @param term: Term for searching
        @param num: Return the (n)th result
        @summary: Returns a quote from abbreviations.com
    '''      
    try:
        
        if search:
            srch = "SEARCH"
        elif author:
            srch = "AUTHOR"
        else:
            srch = "RANDOM"                
        response = urllib2.urlopen('http://www.stands4.com/services/v2/quotes.php?uid=%s&tokenid=%s&searchtype=%s&query=%s' % (config['stands4']['userid'], config['stands4']['token'], srch, urllib.quote(term)))        
        page = response.read()                            
        response.close()
        soup = BeautifulSoup(page)            
        items = soup.findAll('result')
        item = items[num-1]
        quote = htmlx.unescape(''.join(item.find('quote').findAll(text=True))).strip('"')
        author = htmlx.unescape(''.join(item.find('author').findAll(text=True)))        
        return ('"%s" -%s' % (quote, author)).encode('utf-8')        
    except Exception:
        Log.error()
        return None        
Пример #3
0
def googleimage(query, num=1, single=False):
    '''
        @param query: Query for searching
        @param num: Get the nth result
        @param single: Get only the title 
        @summary: Performs a Google search on thinkdigit forum and returns the result
    '''
    try:
        response = urllib2.urlopen(
            'https://www.googleapis.com/customsearch/v1?key=%s&cx=008715276770992001381:iyfgiiccnki&q=%s&alt=atom&num=%d&searchType=image'
            % (config['app-id'], urllib.quote(query), num))
        page = response.read()
        response.close()
        soup = BeautifulSoup(page)
        item_1 = soup.findAll('entry')[num - 1]
        url = ''.join(item_1.find('id').find(text=True))
        if single:
            desc = htmlx.unescape(
                htmlx.unescape(
                    re.sub(r'<[^&]+>', '',
                           item_1.find('title').find(text=True))))
        else:
            desc = htmlx.unescape(
                htmlx.unescape(
                    re.sub(r'<[^&]+>', '',
                           item_1.find('summary').find(text=True))))
        return ("%s : %s" % (url, desc)).encode('utf-8')
    except Exception:
        Log.error()
        return None
Пример #4
0
def dictionary(term, num=1):
    '''
        @param term: Term for searching
        @param num: Return the (n)th result
        @summary: Performs a abbreviations.com dictionary search and returns the first result
    '''
    try:
        response = urllib2.urlopen(
            'http://www.stands4.com/services/v2/defs.php?uid=%s&tokenid=%s&word=%s'
            % (config['stands4']['userid'], config['stands4']['token'],
               urllib.quote(term)))
        page = response.read()
        response.close()
        soup = BeautifulSoup(page)
        items = soup.findAll('result')
        item = items[num - 1]
        term = htmlx.unescape(''.join(item.find('term').findAll(text=True)))
        part = htmlx.unescape(''.join(
            item.find('partofspeech').findAll(text=True)))
        definition = htmlx.unescape(''.join(
            item.find('definition').findAll(text=True)))
        example = htmlx.unescape(''.join(
            item.find('example').findAll(text=True)))
        return ('%s (%s), %s. Eg: %s' %
                (term, part, definition, example)).encode('utf-8')
    except Exception:
        Log.error()
        return None
Пример #5
0
def urbandefine(term, num=1):
    '''
        @param term: Term for searching
        @param num: Return the (n)th result
        @summary: Performs a urban dictionary search and returns the first result
    '''
    try:
        response = urllib2.urlopen(
            'http://www.urbandictionary.com/define.php?term=%s' %
            urllib.quote(term))
        page = response.read()
        response.close()
        soup = BeautifulSoup(page)
        items = soup.find('table', attrs={
            'id': 'entries'
        }).findAll('td',
                   attrs={
                       'class': 'text',
                       'id': re.compile('entry_\d+')
                   })
        item = items[num - 1]
        define = htmlx.unescape(''.join(
            item.find('div', attrs={
                'class': 'definition'
            }).findAll(text=True)))
        example = htmlx.unescape(''.join(
            item.find('div', attrs={
                'class': 'example'
            }).findAll(text=True)))
        if len(example):
            example = ", Eg: " + example
        return ("%s: %s%s" % (term, define, example)).encode('utf-8')
    except Exception:
        Log.error()
        return None
Пример #6
0
def title(url, only_title=False):
    '''
        @param url: The url to resolve
        @summary: Fetches the title of an url 
    '''
    status, ctype, url = visit(url)
    if url is None:
        return None
    else:
        if status == 302:
            return 'Redirection loop detected for url %s' % url
        elif status == 200:
            try:
                if ctype.startswith('text/'):
                    # Fast Title Search
                    found = None
                    buff = ''
                    m = 0
                    n = 512  # Chunk Size
                    while True:
                        req = urllib2.Request(url)
                        req.headers['Range'] = 'bytes=%s-%s' % (m, m + n - 1)
                        response = urllib2.urlopen(req)
                        buff += response.read()
                        response.close()
                        soup = BeautifulSoup(buff)
                        found = soup.find('title')
                        m += n
                        # If PARTIAL OK (206) and <title> has an ending tag
                        if response.code == 200 or (response.code == 206
                                                    and found
                                                    and found.nextSibling):
                            break
                    if only_title:
                        return 'Title: %s' % htmlx.unescape(u''.join(
                            found.findAll(text=True))).encode('utf-8')
                    else:
                        return '%s : [%s]' % (htmlx.unescape(u''.join(
                            found.findAll(text=True))).encode('utf-8'),
                                              min_url(url))
                else:
                    return 'Title not available for content type %s : url %s' % (
                        ctype, min_url(url))
            except Exception:
                Log.error()
                return None
        else:
            return 'Status Code %s : url %s' % (status, url)
Пример #7
0
def description(url):
    '''
        @param url: The url to resolve
        @summary: Fetches the meta-description of an url 
    '''
    status, ctype, url = visit(url)
    if url is None:
        return None
    else:
        if status == 302:
            return 'Redirection loop detected for url %s' % url
        elif status == 200:
            try:
                if ctype.startswith('text/'):
                    response = urllib2.urlopen(url)
                    page = response.read()
                    response.close()
                    soup = BeautifulSoup(page)
                    desc = soup.find(
                        'meta',
                        {'name': re.compile('description', re.I)})['content']
                    return 'Description %s : [%s]' % (htmlx.unescape(desc),
                                                      min_url(url))
                else:
                    return 'Preview not available for content type %s : [%s]' % (
                        ctype, min_url(url))
            except Exception:
                Log.error()
                return None
        else:
            return 'Status Code %s : url %s' % (status, url)
Пример #8
0
def wiki(word, num=1, single=False):
    '''
        @param word: Word to search for
        @param num: Get the nth result
        @param single: Get only the title        
        @summary: Searches for a word on Wikipedia and returns an abstract
    '''
    try:
        response = urllib2.urlopen(
            'http://en.wikipedia.org/w/api.php?action=opensearch&search=%s&format=xml'
            % urllib.quote(word))
        page = response.read()
        response.close()
        soup = BeautifulSoup(page)
        item_1 = soup.findAll('item')[num - 1]
        if single:
            desc = ''.join(item_1.find('text').find(text=True))
        else:
            desc = ''.join(item_1.find('description').find(text=True))
        url = ''.join(item_1.find('url').find(text=True))
        return ("%s : %s" %
                (url, htmlx.unescape(desc.replace('\n', ' ')))).encode('utf-8')
    except Exception:
        Log.error()
        return None
Пример #9
0
def description(url):
    '''
        @param url: The url to resolve
        @summary: Fetches the meta-description of an url 
    '''
    status, ctype, url = visit(url)
    if url is None:
        return None
    else:
        if status == 302:
            return 'Redirection loop detected for url %s' % url
        elif status == 200:        
            try:
                if ctype.startswith('text/'):
                    response = urllib2.urlopen(url)        
                    page = response.read()                        
                    response.close()
                    soup = BeautifulSoup(page)
                    desc = soup.find('meta', {'name': re.compile('description', re.I)})['content']                    
                    return 'Description %s : [%s]' % (htmlx.unescape(desc), min_url(url))
                else:
                    return 'Preview not available for content type %s : [%s]' % (ctype, min_url(url))
            except Exception:
                Log.error()
                return None
        else:
            return 'Status Code %s : url %s' % (status, url)  
Пример #10
0
 def parse_cleverbot(self, channel, query):
     reply = self._cb.ask(query)
     if reply:                                                                         
         reply = htmlx.unescape(reply)
         self._bot.say(channel, reply)
         return True
     else:
         return False
Пример #11
0
 def parse_response(self, resp):
     for k, v in zip(self.response_keys, resp.split("\r")):
         try:
             self.response[k] = v
             if self.data.has_key(k):
                 self.data[k] = v
         except:
             pass
     return htmlx.unescape(self.response['ttsText'])
Пример #12
0
def googlecalc(query):
    '''
        @param query: Query for calculation
        @summary: Performs calculation on Google Calc and returns the results
    '''    
    try:        
        response = urllib2.urlopen('http://www.google.com/ig/calculator?hl=en&q=%s' % urllib.quote(query))
        page = response.read().replace('\xa0', ' ')                 # Convert &nbsp; to actual space
        page = re.sub(r'(\d+)(\s|\xa0)(\d+)', r'\1,\3', page)       # Replace spaces between numbers by comma
        response.close()        
        result = json.loads(htmlx.fixjson(page))    
        if result['error'] == '':
            return ('%s = %s' % (htmlx.unescape(result['lhs']), htmlx.unescape(result['rhs']))).encode('utf-8')
        else:
            return None
    except Exception:
        Log.error()
        return None        
Пример #13
0
Файл: AI.py Проект: nbaztec/Qirc
 def parse_response(self, resp):                       
     for k,v in zip(self.response_keys, resp.split("\r")):
         try:
             self.response[k] = v
             if self.data.has_key(k):
                 self.data[k] = v
         except:
             pass
     return htmlx.unescape(self.response['ttsText'])
Пример #14
0
def googledefine(query, num=1):    
    '''
        @param query: Query for searching
        @param num: Return the (n)th result
        @summary: Performs a Google search and returns the first result
        @attention: Google's description requires unescaping twice
    '''  
    try:        
        response = urllib2.urlopen('https://www.googleapis.com/customsearch/v1?key=%s&cx=013036536707430787589:_pqjad5hr1a&q=define+%s&alt=atom&num=%d' % (config['google']['app-id'], urllib.quote(query), num))
        page = response.read()                            
        response.close()
        soup = BeautifulSoup(page)            
        item_1 = soup.findAll('entry')[num-1]
        url =  ''.join(item_1.find('id').find(text=True))
        desc = htmlx.unescape(htmlx.unescape(re.sub(r'&lt;[^&]+&gt;','',item_1.find('summary').find(text=True)))) 
        return ("%s, %s" % (url, desc)).encode('utf-8')
    except Exception:
        Log.error()
        return None    
Пример #15
0
def synonyms(term, num=1):
    '''
        @param term: Term for searching
        @param num: Return the (n)th result
        @summary: Performs a abbreviations.com synonym search and returns the results
    '''  
    try:              
        response = urllib2.urlopen('http://www.stands4.com/services/v2/syno.php?uid=%s&tokenid=%s&word=%s' % (config['stands4']['userid'], config['stands4']['token'], urllib.quote(term)))        
        page = response.read()                            
        response.close()
        soup = BeautifulSoup(page)            
        items = soup.findAll('result')
        item = items[num-1]        
        part = htmlx.unescape(''.join(item.find('partofspeech').findAll(text=True)))        
        syno = htmlx.unescape(''.join(item.find('synonyms').findAll(text=True)))
        return ('(%s) %s' % (part, syno)).encode('utf-8')        
    except Exception:
        Log.error()
        return None 
Пример #16
0
def title(url, only_title=False):
    '''
        @param url: The url to resolve
        @summary: Fetches the title of an url 
    '''
    status, ctype, url = visit(url)
    if url is None:
        return None
    else:
        if status == 302:
            return 'Redirection loop detected for url %s' % url
        elif status == 200:        
            try:
                if ctype.startswith('text/'):
                    # Fast Title Search
                    found = None
                    buff = ''
                    m = 0
                    n = 512     # Chunk Size
                    while True:
                        req = urllib2.Request(url)
                        req.headers['Range'] =  'bytes=%s-%s' % (m, m+n-1)
                        response = urllib2.urlopen(req)    
                        buff += response.read()                        
                        response.close()
                        soup = BeautifulSoup(buff)
                        found = soup.find('title')
                        m += n
                        # If PARTIAL OK (206) and <title> has an ending tag
                        if response.code == 200 or (response.code == 206 and found and found.nextSibling):
                            break
                    if only_title:      
                        return 'Title: %s' % htmlx.unescape(u''.join(found.findAll(text=True))).encode('utf-8')
                    else:
                        return '%s : [%s]' % (htmlx.unescape(u''.join(found.findAll(text=True))).encode('utf-8'), min_url(url))
                else:                    
                    return 'Title not available for content type %s : url %s' % (ctype, min_url(url))
            except Exception:
                Log.error()
                return None
        else:
            return 'Status Code %s : url %s' % (status, url)  
Пример #17
0
def dictionary(term, num=1):
    '''
        @param term: Term for searching
        @param num: Return the (n)th result
        @summary: Performs a abbreviations.com dictionary search and returns the first result
    '''  
    try:              
        response = urllib2.urlopen('http://www.stands4.com/services/v2/defs.php?uid=%s&tokenid=%s&word=%s' % (config['stands4']['userid'], config['stands4']['token'], urllib.quote(term)))        
        page = response.read()                            
        response.close()
        soup = BeautifulSoup(page)            
        items = soup.findAll('result')
        item = items[num-1]
        term = htmlx.unescape(''.join(item.find('term').findAll(text=True)))
        part = htmlx.unescape(''.join(item.find('partofspeech').findAll(text=True)))
        definition = htmlx.unescape(''.join(item.find('definition').findAll(text=True)))
        example = htmlx.unescape(''.join(item.find('example').findAll(text=True)))
        return ('%s (%s), %s. Eg: %s' % (term, part, definition, example)).encode('utf-8')        
    except Exception:
        Log.error()
        return None 
Пример #18
0
def urbandefine(term, num=1):    
    '''
        @param term: Term for searching
        @param num: Return the (n)th result
        @summary: Performs a urban dictionary search and returns the first result
    '''  
    try:       
        response = urllib2.urlopen('http://www.urbandictionary.com/define.php?term=%s' % urllib.quote(term))        
        page = response.read()                            
        response.close()
        soup = BeautifulSoup(page)            
        items = soup.find('table', attrs={'id': 'entries'}).findAll('td', attrs={'class': 'text', 'id': re.compile('entry_\d+')})
        item = items[num-1]
        define = htmlx.unescape(''.join(item.find('div', attrs={'class': 'definition'}).findAll(text=True)))
        example = htmlx.unescape(''.join(item.find('div', attrs={'class': 'example'}).findAll(text=True)))
        if len(example):            
            example = ", Eg: " + example
        return ("%s: %s%s" % (term, define, example)).encode('utf-8')        
    except Exception:
        Log.error()
        return None
Пример #19
0
def googleimage(query, num=1, single=False):    
    '''
        @param query: Query for searching
        @param num: Get the nth result
        @param single: Get only the title 
        @summary: Performs a Google search on thinkdigit forum and returns the result
    '''    
    try:        
        response = urllib2.urlopen('https://www.googleapis.com/customsearch/v1?key=%s&cx=008715276770992001381:iyfgiiccnki&q=%s&alt=atom&num=%d&searchType=image' % (config['app-id'], urllib.quote(query), num))
        page = response.read()                            
        response.close()
        soup = BeautifulSoup(page)            
        item_1 = soup.findAll('entry')[num-1]
        url =  ''.join(item_1.find('id').find(text=True))
        if single:
            desc = htmlx.unescape(htmlx.unescape(re.sub(r'&lt;[^&]+&gt;','',item_1.find('title').find(text=True))))
        else:
            desc = htmlx.unescape(htmlx.unescape(re.sub(r'&lt;[^&]+&gt;','',item_1.find('summary').find(text=True)))) 
        return ("%s : %s" % (url, desc)).encode('utf-8')
    except Exception:
        Log.error()
        return None
Пример #20
0
def synonyms(term, num=1):
    '''
        @param term: Term for searching
        @param num: Return the (n)th result
        @summary: Performs a abbreviations.com synonym search and returns the results
    '''
    try:
        response = urllib2.urlopen(
            'http://www.stands4.com/services/v2/syno.php?uid=%s&tokenid=%s&word=%s'
            % (config['stands4']['userid'], config['stands4']['token'],
               urllib.quote(term)))
        page = response.read()
        response.close()
        soup = BeautifulSoup(page)
        items = soup.findAll('result')
        item = items[num - 1]
        part = htmlx.unescape(''.join(
            item.find('partofspeech').findAll(text=True)))
        syno = htmlx.unescape(''.join(
            item.find('synonyms').findAll(text=True)))
        return ('(%s) %s' % (part, syno)).encode('utf-8')
    except Exception:
        Log.error()
        return None
Пример #21
0
def googledefine(query, num=1):
    '''
        @param query: Query for searching
        @param num: Return the (n)th result
        @summary: Performs a Google search and returns the first result
        @attention: Google's description requires unescaping twice
    '''
    try:
        response = urllib2.urlopen(
            'https://www.googleapis.com/customsearch/v1?key=%s&cx=013036536707430787589:_pqjad5hr1a&q=define+%s&alt=atom&num=%d'
            % (config['google']['app-id'], urllib.quote(query), num))
        page = response.read()
        response.close()
        soup = BeautifulSoup(page)
        item_1 = soup.findAll('entry')[num - 1]
        url = ''.join(item_1.find('id').find(text=True))
        desc = htmlx.unescape(
            htmlx.unescape(
                re.sub(r'&lt;[^&]+&gt;', '',
                       item_1.find('summary').find(text=True))))
        return ("%s, %s" % (url, desc)).encode('utf-8')
    except Exception:
        Log.error()
        return None
Пример #22
0
def wolfram(query):
    '''
        @param query: Query for calculation
        @summary: Performs calculation on Wolfram Alpha and returns the results
    '''    
    try:        
        response = urllib2.urlopen('http://api.wolframalpha.com/v2/query?appid=%s&input=%s&format=plaintext' % (config['app-id'], urllib.quote(query)))        
        page = response.read()                            
        response.close()
        soup = BeautifulSoup(page)            
        out = []
        primary_items = soup.findAll('pod', attrs={'primary': 'true'})        
        for primary in primary_items:
            out.append(htmlx.unescape(''.join(primary.find('plaintext').findAll(text=True))))
        if len(out):
            return re.sub(r'^rupee\s*', 'Rs. ', (', '.join(out))).encode('utf-8')
        else:
            return None
    except Exception:
        Log.error()
        return None        
Пример #23
0
def wiki(word, num=1, single=False):
    '''
        @param word: Word to search for
        @param num: Get the nth result
        @param single: Get only the title        
        @summary: Searches for a word on Wikipedia and returns an abstract
    '''
    try:        
        response = urllib2.urlopen('http://en.wikipedia.org/w/api.php?action=opensearch&search=%s&format=xml' % urllib.quote(word))        
        page = response.read()                        
        response.close()
        soup = BeautifulSoup(page)            
        item_1 = soup.findAll('item')[num-1]
        if single:
            desc = ''.join(item_1.find('text').find(text=True))
        else:
            desc = ''.join(item_1.find('description').find(text=True))
        url = ''.join(item_1.find('url').find(text=True))        
        return ("%s : %s" % (url, htmlx.unescape(desc.replace('\n', ' ')))).encode('utf-8')
    except Exception:
        Log.error()
        return None