Exemplo n.º 1
0
    def urlgrab(self, url, filename=None, **kwargs):
        """urlgrab(url) copy the file to the local filesystem."""
        request = self._request(url)
        if filename is None:
            filename = request.get_selector()
            if filename.startswith('/'):
                filename = filename[1:]

        response = None
        retries = self.retries
        delay = self.delay
        out = open(filename, 'w+')
        while retries > 0:
            try:
                response = urllib2.urlopen(request)
                buff = response.read(BUFFER_SIZE)
                while buff:
                    out.write(buff)
                    buff = response.read(BUFFER_SIZE)
            except urllib2.HTTPError, e:
                if retries > 0:
                    time.sleep(delay)
                    delay *= self.backoff
                else:
                    # Wrap exception as URLGrabError so that YumRepository catches it
                    from urlgrabber.grabber import URLGrabError
                    msg = '%s on %s tried' % (e, url)
                    if self.retries > 0:
                        msg += ' tried %d time(s)' % (self.retries)
                        new_e = URLGrabError(14, msg)
                        new_e.code = e.code
                        new_e.exception = e
                        new_e.url = url
                        raise new_e
            finally:
Exemplo n.º 2
0
    def urlgrab(self, url, filename=None, **kwargs):
        """urlgrab(url) copy the file to the local filesystem."""
        request = self._request(url)
        if filename is None:
            filename = request.get_selector()
            if filename.startswith('/'):
                filename = filename[1:]

        response = None
        retries = self.retries
        delay = self.delay
        out = open(filename, 'w+')
        while retries > 0:
            try:
                response = urllib2.urlopen(request)
                buff = response.read(BUFFER_SIZE)
                while buff:
                    out.write(buff)
                    buff = response.read(BUFFER_SIZE)
            except urllib2.HTTPError, e:
                if retries > 0:
                    time.sleep(delay)
                    delay *= self.backoff
                else:
                    # Wrap exception as URLGrabError so that YumRepository catches it
                    from urlgrabber.grabber import URLGrabError
                    msg = '%s on %s tried' % (e, url)
                    if self.retries > 0:
                        msg += ' tried %d time(s)' % (self.retries)
                        new_e = URLGrabError(14, msg)
                        new_e.code = e.code
                        new_e.exception = e
                        new_e.url = url
                        raise new_e
            finally:
Exemplo n.º 3
0
    def urlgrab(self, url, filename=None, **kwargs):
        """urlgrab(url) copy the file to the local filesystem."""
        request = self._request(url)
        if filename is None:
            filename = request.get_selector()
            if filename.startswith("/"):
                filename = filename[1:]

        response = None
        try:
            out = open(filename, "w+")
            response = urllib2.urlopen(request)
            buff = response.read(8192)
            while buff:
                out.write(buff)
                buff = response.read(8192)
        except urllib2.HTTPError, e:
            # Wrap exception as URLGrabError so that YumRepository catches it
            from urlgrabber.grabber import URLGrabError

            new_e = URLGrabError(14, "%s on %s" % (e, url))
            new_e.code = e.code
            new_e.exception = e
            new_e.url = url
            raise new_e
Exemplo n.º 4
0
    def urlgrab(self, url, filename=None, **kwargs):
        print("urlgrab starting %s" % (url))
        """urlgrab(url) copy the file to the local filesystem."""
        request = self._request(url)
        self.pretty_print_POST(request)
        if filename is None:
            filename = request.get_selector()
            if filename.startswith('/'):
                filename = filename[1:]

        response = None
        retries = self.retries
        delay = self.delay
        out = open(filename, 'w+')
        while retries > 0:
            try:
                print("request to open %s:" % (request.get_full_url()))
                print("request header %s:" % (request.header_items()))
                response = urllib2.urlopen(request)
                buff = response.read(BUFFER_SIZE)
                while buff:
                    out.write(buff)
                    buff = response.read(BUFFER_SIZE)
            except urllib2.HTTPError, e:
                print("error opening url %s %s %s" % (e, e.code, e.reason))
                print("error headers %s" % (e.headers))
                if retries > 0:
                    print("retry...")
                    time.sleep(delay)
                    delay *= self.backoff
                else:
                    print("no more retries.  just give up")
                    # Wrap exception as URLGrabError so that YumRepository catches it
                    from urlgrabber.grabber import URLGrabError
                    msg = '%s on %s tried' % (e, url)
                    if self.retries > 0:
                        msg += ' tried %d time(s)' % (self.retries)
                        new_e = URLGrabError(14, msg)
                        new_e.code = e.code
                        new_e.exception = e
                        new_e.url = url
                        raise new_e
            finally:
Exemplo n.º 5
0
 def urlgrab(self, url, filename, text = None, **kwargs):
     if url.startswith('file://'):
         file = url.replace('file://', '', 1)
         if os.path.isfile(file):
             return file
         else:
             raise URLGrabError(2, 'Local file \'%s\' does not exist' % file)
     f = open(filename, 'wb')
     try:
         try:
             for i in streamfile(url, progress_obj=self.progress_obj, text=text):
                 f.write(i)
         except urllib2.HTTPError, e:
             exc = URLGrabError(14, str(e))
             exc.url = url
             exc.exception = e
             exc.code = e.code
             raise exc
         except IOError, e:
             raise URLGrabError(4, str(e))
Exemplo n.º 6
0
Arquivo: fetch.py Projeto: JLahti/osc
 def urlgrab(self, url, filename, text=None, **kwargs):
     if url.startswith("file://"):
         f = url.replace("file://", "", 1)
         if os.path.isfile(f):
             return f
         else:
             raise URLGrabError(2, "Local file '%s' does not exist" % f)
     with file(filename, "wb") as f:
         try:
             for i in streamfile(url, progress_obj=self.progress_obj, text=text):
                 f.write(i)
         except HTTPError as e:
             exc = URLGrabError(14, str(e))
             exc.url = url
             exc.exception = e
             exc.code = e.code
             raise exc
         except IOError as e:
             raise URLGrabError(4, str(e))
     return filename
Exemplo n.º 7
0
Arquivo: fetch.py Projeto: uaoh/osc
 def urlgrab(self, url, filename, text = None, **kwargs):
     if url.startswith('file://'):
         file = url.replace('file://', '', 1)
         if os.path.isfile(file):
             return file
         else:
             raise URLGrabError(2, 'Local file \'%s\' does not exist' % file)
     f = open(filename, 'wb')
     try:
         try:
             for i in streamfile(url, progress_obj=self.progress_obj, text=text):
                 f.write(i)
         except urllib2.HTTPError, e:
             exc = URLGrabError(14, str(e))
             exc.url = url
             exc.exception = e
             exc.code = e.code
             raise exc
         except IOError, e:
             raise URLGrabError(4, str(e))
Exemplo n.º 8
0
    def urlgrab(self, url, filename=None, **kwargs):
        """urlgrab(url) copy the file to the local filesystem."""
        request = self._request(url)
        if filename is None:
            filename = request.get_selector()
            if filename.startswith('/'):
                filename = filename[1:]

        response = None
        try:
            out = open(filename, 'w+')
            response = urllib2.urlopen(request)
            buff = response.read(8192)
            while buff:
                out.write(buff)
                buff = response.read(8192)
        except urllib2.HTTPError, e:
            # Wrap exception as URLGrabError so that YumRepository catches it
            from urlgrabber.grabber import URLGrabError
            new_e = URLGrabError(14, '%s on %s' % (e, url))
            new_e.code = e.code
            new_e.exception = e
            new_e.url = url
            raise new_e