Esempio n. 1
0
    def get_description_document_info(self, url):
        xml_document = minidom.parse(urllib.urlopen(url, proxies=get_proxy()))

        self.info['name'] = xml_document.getElementsByTagName(
            "ShortName")[0].firstChild.data
        self.info['description'] = xml_document.getElementsByTagName(
            "Description")[0].firstChild.data

        url_elements = xml_document.getElementsByTagName("Url")
        url_element = None

        # try to find the Url for getting rss or atom results:
        for element in url_elements:
            type = element.getAttribute("type").strip()
            if type == 'application/rss+xml' or type == 'application/atom+xml':
                url_element = element
                break

        # if not found, pick the first Url as a default:
        if url_element == None:
            url_element = url_elements[0]
        url_template = url_element.getAttribute("template").strip()

        # cover for a common error in description documents:
        if url_template == '':
            url_template = url_element.firstChild.data.strip()
        if url_template != '':
            self.info['url-template'] = url_template
Esempio n. 2
0
 def get_description_document_info(self, url):
     xml_document = minidom.parse(urllib.urlopen(url, proxies=get_proxy()))
     
     self.info['name'] = xml_document.getElementsByTagName("ShortName")[0].firstChild.data
     self.info['description'] = xml_document.getElementsByTagName("Description")[0].firstChild.data
     
     url_elements = xml_document.getElementsByTagName("Url")
     url_element = None
     
     # try to find the Url for getting rss or atom results:
     for element in url_elements:
         type = element.getAttribute("type").strip()
         if type == 'application/rss+xml' or type == 'application/atom+xml':
             url_element = element
             break
     
     # if not found, pick the first Url as a default:
     if url_element == None:
         url_element = url_elements[0]
     url_template = url_element.getAttribute("template").strip()
     
     # cover for a common error in description documents:
     if url_template == '':
         url_template = url_element.firstChild.data.strip()
     if url_template != '':
         self.info['url-template'] = url_template
Esempio n. 3
0
 def query(self, qstring):        
     args = {'lang': self._lang, 'search': qstring}
     url = WIKIPEDIA_SUGGEST_URL + '?' + urllib.urlencode(args)
     
     try:
         result = urllib.urlopen(url, proxies=get_proxy())
     except (IOError, EOFError), msg:
         # Print error for debugging purposes and end querying
         LOGGER.error("Could not open URL %s: %s, %s" % (url, msg[0], msg[1]))
         return
Esempio n. 4
0
 def query(self, qstring):        
     args = {'lang': self._lang, 'search': qstring}
     url = WIKIPEDIA_SUGGEST_URL + '?' + urllib.urlencode(args)
     
     try:
         result = urllib.urlopen(url, proxies=get_proxy())
     except (IOError, EOFError), msg:
         # Print error for debugging purposes and end querying
         LOGGER.error("Could not open URL %s: %s, %s" % (url, msg[0], msg[1]))
         return
Esempio n. 5
0
    def query(self, query):
        self.load_websites()
        matches = []
        for website in self.websites:
            LOGGER.debug("Website: %s", website[0])

            if not website[4]:
                continue

            url_template = website[3]
            if url_template != None:
                LOGGER.debug("URL template: %s", url_template)

                url = url_template.replace("{searchTerms}",
                                           urllib.quote_plus(query))

                for key, val in DEFAULT_VALUES.items():
                    url = url.replace("{%s}" % key, val)  # required param
                    url = url.replace("{%s?}" % key, val)  # optional param

                # Remove unsupported parameters
                url = re.sub('&(\w+?)={[^}]*}', '', url)

                LOGGER.debug("URL: %s", url)

                try:
                    xml_document = minidom.parse(
                        urllib.urlopen(url, proxies=get_proxy()))
                    root_tag = xml_document.documentElement.tagName

                    # atom:
                    if root_tag == "feed":
                        items = self._parse_atom(xml_document)
                    # rss:
                    elif root_tag == "rss":
                        items = self._parse_rss(xml_document)
                    else:
                        items = []

                    num_results = len(items)
                    for i, (title, link) in enumerate(items):
                        # FIXME: Priority should depend on position of search engine, too
                        prio = self.get_priority() + num_results - i
                        match = OpenSearchMatch(name=website[0], priority=prio)
                        match.add_action(
                            OpenSearchViewResultAction(website[0], title,
                                                       link))
                        matches.append(match)

                except Exception, e:
                    match = OpenSearchMatch(name=website[0],
                                            priority=self.get_priority())
                    match.add_action(
                        OpenSearchSearchAction(website[0], query, url))
                    matches.append(match)
 def query(self, qstring):
     url = BASE_URL % urllib.urlencode(
             {'q': qstring,
              'max-results': MAX_RESULTS})
     
     matches = []
     
     try:
         stream = urllib.urlopen(url, proxies=get_proxy())
     except (IOError, EOFError), msg:
         LOGGER.error("Could not open URL %s: %s, %s", url, msg[0], msg[1])
         return
Esempio n. 7
0
 def query(self, qstring):
     url = self.BASE_URL % urllib.urlencode(
             self._get_parameters_from_query(qstring))
     
     LOGGER.debug("Retrieving %s", url)
     
     matches = []
     
     try:
         stream = urllib.urlopen(url, proxies=get_proxy())
     except (IOError, EOFError), msg:
         LOGGER.error("Could not open URL %s: %s, %s", url, msg[0], msg[1])
         return
Esempio n. 8
0
    def query(self, qstring):
        url = BASE_URL % urllib.urlencode({
            'q': qstring,
            'max-results': MAX_RESULTS
        })

        matches = []

        try:
            stream = urllib.urlopen(url, proxies=get_proxy())
        except (IOError, EOFError), msg:
            LOGGER.error("Could not open URL %s: %s, %s", url, msg[0], msg[1])
            return
Esempio n. 9
0
    def query(self, qstring):
        url = self.BASE_URL % urllib.urlencode(
            self._get_parameters_from_query(qstring))

        LOGGER.debug("Retrieving %s", url)

        matches = []

        try:
            stream = urllib.urlopen(url, proxies=get_proxy())
        except (IOError, EOFError), msg:
            LOGGER.error("Could not open URL %s: %s, %s", url, msg[0], msg[1])
            return
Esempio n. 10
0
 def query(self, query):
     self.load_websites()
     matches = []
     for website in self.websites:
         LOGGER.debug("Website: %s", website[0])
         
         if not website[4]:
             continue
         
         url_template = website[3]
         if url_template != None:
             LOGGER.debug("URL template: %s", url_template)
             
             url = url_template.replace("{searchTerms}", urllib.quote_plus(query))
             
             for key, val in DEFAULT_VALUES.items():
                 url = url.replace("{%s}" % key, val) # required param
                 url = url.replace("{%s?}" % key, val) # optional param
             
             # Remove unsupported parameters
             url = re.sub('&(\w+?)={[^}]*}', '', url)
             
             LOGGER.debug("URL: %s", url)
             
             try:    
                 xml_document = minidom.parse(urllib.urlopen(url, proxies=get_proxy()))
                 root_tag = xml_document.documentElement.tagName
                 
                 # atom:
                 if root_tag == "feed":
                     items = self._parse_atom(xml_document)
                 # rss:
                 elif root_tag == "rss":
                     items = self._parse_rss(xml_document)
                 else:
                     items = []
                     
                 num_results = len(items)
                 for i, (title, link) in enumerate(items):
                     # FIXME: Priority should depend on position of search engine, too
                     prio = self.get_priority() + num_results - i
                     match = OpenSearchMatch(name=website[0], priority=prio)
                     match.add_action(OpenSearchViewResultAction(website[0], title, link))
                     matches.append(match)
                 
             except Exception, e:
                 match = OpenSearchMatch(name=website[0], priority=self.get_priority())
                 match.add_action(OpenSearchSearchAction(website[0], query, url))
                 matches.append(match)
Esempio n. 11
0
 def query(self, qstring):
     url = self.BASE_URL % urllib.urlencode(
             {'appid': YAHOO_API_KEY,
              'results': MAX_RESULTS,
              'query': qstring})
     
     LOGGER.debug("Retrieving %s", url)
 
     matches = []
     
     try:
         stream = urllib.urlopen(url, proxies=get_proxy())
     except (IOError, EOFError), msg:
         LOGGER.error("Could not open URL %s: %s, %s", url, msg[0], msg[1])
         return
Esempio n. 12
0
    def query(self, qstring):
        url = self.BASE_URL % urllib.urlencode({
            'appid': YAHOO_API_KEY,
            'results': MAX_RESULTS,
            'query': qstring
        })

        LOGGER.debug("Retrieving %s", url)

        matches = []

        try:
            stream = urllib.urlopen(url, proxies=get_proxy())
        except (IOError, EOFError), msg:
            LOGGER.error("Could not open URL %s: %s, %s", url, msg[0], msg[1])
            return
Esempio n. 13
0
 def query(self, qstring):        
     params = {'v': self.PROTOCOL_VERSION, 
             'rsz': self.RESULT_SIZE, 
             'lr': self._lang,
             'q': qstring}
     
     url = self.BASE_URL % urllib.urlencode(params)
     
     LOGGER.debug("Retrieving %s", url)
     
     matches = []
     results = []
     
     try:
         stream = urllib.urlopen(url, proxies=get_proxy())
     except (IOError, EOFError), msg:
         LOGGER.error("Could not open URL %s: %s, %s", url, msg[0], msg[1])
         return
Esempio n. 14
0
    def __init__(self, account, extra_widget_factory=None):
        """
        @param account: The L{Account} object to request credentials from
        @param extra_widget_factory: This optional parameter may point to a
                                     C{callable} returning a C{gtk.Widget} which
                                     will be packed into the L{AccountDialog}
                                     when it is spawned. The callable may also
                                     return C{None} if no widgets should be
                                     added.
        """
        proxies = get_proxy()
        urllib.FancyURLopener.__init__(self, proxies)

        self._account = account
        self._thread = None
        self._authentication_retries = 0
        self._success = True
        self._extra_widget_factory = extra_widget_factory

        LOGGER.debug("Using proxies: %s" % proxies)
Esempio n. 15
0
    def query(self, qstring):
        params = {
            'v': self.PROTOCOL_VERSION,
            'rsz': self.RESULT_SIZE,
            'lr': self._lang,
            'q': qstring
        }

        url = self.BASE_URL % urllib.urlencode(params)

        LOGGER.debug("Retrieving %s", url)

        matches = []
        results = []

        try:
            stream = urllib.urlopen(url, proxies=get_proxy())
        except (IOError, EOFError), msg:
            LOGGER.error("Could not open URL %s: %s, %s", url, msg[0], msg[1])
            return
Esempio n. 16
0
    def __init__(self, account, extra_widget_factory=None):
        """
        @param account: The L{Account} object to request credentials from
        @param extra_widget_factory: This optional parameter may point to a
                                     C{callable} returning a C{gtk.Widget} which
                                     will be packed into the L{AccountDialog}
                                     when it is spawned. The callable may also
                                     return C{None} if no widgets should be
                                     added.
        """
        proxies = get_proxy()
        urllib.FancyURLopener.__init__(self, proxies)

        self._account = account
        self._thread = None
        self._authentication_retries = 0
        self._success = True
        self._extra_widget_factory = extra_widget_factory

        LOGGER.debug("Using proxies: %s" % proxies)