def run(self): try: self.progress.status = False self.data = self.urlretrieve(self.URL, self.postdata, self.referer) if self.progress.status: self.data = None except IOError: self.progress.dialog.hide() gutils.urllib_error(_("Connection error"), self.parent_window) self.join()
def run(self): try: self.html = urlretrieve(self.URL, self.destination, self.hook) #self.html = urlretrieve(self.URL.encode('utf-8'), self.destination, self.hook) if self.progress.status: self.html = [] except IOError: self.progress.dialog.hide() gutils.urllib_error(_("Connection error"), self.parent_window) self.join()
def open_search(self, parent_window, destination=None): self.titles = [""] self.ids = [""] if self.url.find('%s') > 0: self.url = self.url % self.title self.url = string.replace(self.url, ' ', '%20') else: if not self.usepostrequest: self.url = string.replace(self.url + self.title, ' ', '%20') try: url = self.url.encode(self.encode) except UnicodeEncodeError: url = self.url.encode('utf-8') self.progress.set_data(parent_window, _("Searching"), _("Wait a moment"), True) if self.usepostrequest: postdata = self.get_postdata() retriever = Retriever(url, parent_window, self.progress, destination, useurllib2=self.useurllib2, postdata=postdata) else: retriever = Retriever(url, parent_window, self.progress, destination, useurllib2=self.useurllib2) retriever.start() while retriever.isAlive(): self.progress.pulse() if self.progress.status: retriever.join() while gtk.events_pending(): gtk.main_iteration() try: if retriever.exception is None: if destination: # caller gave an explicit destination file # don't care about the content return True if retriever.html: ifile = file(retriever.html[0], 'rb') try: self.page = ifile.read() finally: ifile.close() # check for gzip compressed pages before decoding to unicode if len(self.page) > 2 and self.page[0:2] == '\037\213': self.page = gutils.decompress(self.page) self.page = self.page.decode(self.encode, 'replace') else: return False else: self.progress.hide() gutils.urllib_error(_("Connection error"), parent_window) return False except IOError: log.exception('') finally: urlcleanup() return True
def open_search(self, parent_window, destination=None): self.titles = [""] self.ids = [""] if self.url.find('%s') > 0: self.url = self.url % self.title self.url = string.replace(self.url, ' ', '%20') else: self.url = string.replace(self.url + self.title, ' ', '%20') try: url = self.url.encode(self.encode) except UnicodeEncodeError: url = self.url.encode('utf-8') self.progress.set_data(parent_window, _("Searching"), _("Wait a moment"), True) retriever = Retriever(url, parent_window, self.progress, destination, useurllib2=self.useurllib2) retriever.start() while retriever.isAlive(): self.progress.pulse() if self.progress.status: retriever.join() while gtk.events_pending(): gtk.main_iteration() try: if retriever.exception is None: if destination: # caller gave an explicit destination file # don't care about the content return True if retriever.html: ifile = file(retriever.html[0], 'rb') try: self.page = ifile.read() finally: ifile.close() # check for gzip compressed pages before decoding to unicode if len(self.page) > 2 and self.page[0:2] == '\037\213': self.page = gutils.decompress(self.page) self.page = self.page.decode(self.encode, 'replace') else: return False else: self.progress.hide() gutils.urllib_error(_("Connection error"), parent_window) return False except IOError: log.exception('') finally: urlcleanup() return True
def run_get(self): self.result = None try: amazon.setLicense("04GDDMMXX8X9CJ1B22G2") # get by ASIN try: self.result = amazon.searchByASIN(self.title, type="Large", locale=self.locale) except amazon.AmazonError, e: self.debug.show(e.Message) except IOError: self.progress.dialog.hide() gutils.urllib_error(_("Connection error"), self.parent_window) self.suspend()
if len(data) > 2 and data[0:2] == '\037\213': data = gutils.decompress(data) try: # try to decode it strictly if self.encode: data = data.decode(self.encode) except UnicodeDecodeError, exc: # something is wrong, perhaps a wrong character set # or some pages are not as strict as they should be # (like OFDb, mixes utf8 with iso8859-1) # I want to log the error here so that I can find it # but the program should not terminate log.error(exc) data = data.decode(self.encode, 'ignore') else: gutils.urllib_error(_("Connection error"), self.parent_window) except IOError: log.exception('') if url is None: self.page = data urlcleanup() return data def fetch_picture(self): if self.image_url: tmp_dest = tempfile.mktemp(prefix='poster_', dir=self.locations['temp']) self.image = tmp_dest.split('poster_', 1)[1] dest = "%s.jpg" % tmp_dest try: self.progress.set_data(self.parent_window, _("Fetching poster"), _("Wait a moment"), False) retriever = Retriever(self.image_url, self.parent_window, self.progress, dest,
if len(data) > 2 and data[0:2] == '\037\213': data = gutils.decompress(data) try: # try to decode it strictly if self.encode: data = data.decode(self.encode) except UnicodeDecodeError, exc: # something is wrong, perhaps a wrong character set # or some pages are not as strict as they should be # (like OFDb, mixes utf8 with iso8859-1) # I want to log the error here so that I can find it # but the program should not terminate log.error(exc) data = data.decode(self.encode, 'ignore') else: gutils.urllib_error(_("Connection error"), self.parent_window) except IOError: log.exception('') if url is None: self.page = data urlcleanup() return data def fetch_picture(self): if self.image_url: tmp_dest = tempfile.mktemp(prefix='poster_', dir=self.locations['temp']) self.image = tmp_dest.split('poster_', 1)[1] dest = "%s.jpg" % tmp_dest try: self.progress.set_data(self.parent_window,