Example #1
0
    def __fetch_url(self, params):
        # Skip existing file if exists and matches checksum
        if not self.parent.force:
            if self.__is_file_done(local_path=params['target_file'],
                                   checksum_type=params['checksum_type'],
                                   checksum=params['checksum']):
                return True

        opts = URLGrabberOptions(ssl_ca_cert=params['ssl_ca_cert'],
                                 ssl_cert=params['ssl_client_cert'],
                                 ssl_key=params['ssl_client_key'],
                                 range=params['bytes_range'],
                                 proxy=params['proxy'],
                                 username=params['proxy_username'],
                                 password=params['proxy_password'],
                                 proxies=params['proxies'])
        mirrors = len(params['urls'])
        for retry in range(max(self.parent.retries, mirrors)):
            fo = None
            url = urlparse.urljoin(params['urls'][self.mirror],
                                   params['relative_path'])
            try:
                try:
                    fo = PyCurlFileObjectThread(url, params['target_file'],
                                                opts, self.curl)
                    # Check target file
                    if not self.__is_file_done(
                            file_obj=fo,
                            checksum_type=params['checksum_type'],
                            checksum=params['checksum']):
                        raise FailedDownloadError(
                            "Target file isn't valid. Checksum should be %s (%s)."
                            % (params['checksum'], params['checksum_type']))
                    break
                except (FailedDownloadError, URLGrabError):
                    e = sys.exc_info()[1]
                    if not self.__can_retry(retry, mirrors, opts, url, e):
                        return False
                    self.__next_mirror(mirrors)
                # RHEL 6 urlgrabber raises KeyboardInterrupt for example when there is no space left
                # but handle also other fatal exceptions
                except (KeyboardInterrupt, Exception):  # pylint: disable=W0703
                    e = sys.exc_info()[1]
                    self.parent.fail_download(e)
                    return False
            finally:
                if fo:
                    fo.close()
                # Delete failed download file
                elif os.path.isfile(params['target_file']):
                    os.unlink(params['target_file'])

        return True
    def __fetch_url(self, params):
        # Skip existing file if exists and matches checksum
        if not self.parent.force:
            if self.__is_file_done(local_path=params['target_file'],
                                   checksum_type=params['checksum_type'],
                                   checksum=params['checksum']):
                return True

        opts = URLGrabberOptions(ssl_ca_cert=params['ssl_ca_cert'],
                                 ssl_cert=params['ssl_client_cert'],
                                 ssl_key=params['ssl_client_key'],
                                 range=params['bytes_range'],
                                 proxy=params['proxy'],
                                 username=params['proxy_username'],
                                 password=params['proxy_password'],
                                 proxies=params['proxies'],
                                 http_headers=tuple(
                                     params['http_headers'].items()))
        mirrors = len(params['urls'])
        for retry in range(max(self.parent.retries, mirrors)):
            fo = None
            url = urlparse.urljoin(params['urls'][self.mirror],
                                   params['relative_path'])
            ## BEWARE: This hack is introduced in order to support SUSE SCC channels
            ## This also needs a patched urlgrabber AFAIK
            if 'authtoken' in params and params['authtoken']:
                (scheme, netloc, path, query,
                 _) = urlparse.urlsplit(params['urls'][self.mirror])
                url = "%s://%s%s/%s?%s" % (scheme, netloc, path,
                                           params['relative_path'],
                                           query.rstrip('/'))
            try:
                try:
                    fo = PyCurlFileObjectThread(url, params['target_file'],
                                                opts, self.curl, self.parent)
                    # Check target file
                    if not self.__is_file_done(
                            file_obj=fo,
                            checksum_type=params['checksum_type'],
                            checksum=params['checksum']):
                        raise FailedDownloadError(
                            "Target file isn't valid. Checksum should be %s (%s)."
                            % (params['checksum'], params['checksum_type']))
                    break
                except (FailedDownloadError, URLGrabError):
                    e = sys.exc_info()[1]
                    # urlgrabber-3.10.1-9 trows URLGrabError for both
                    # 'HTTP Error 404 - Not Found' and 'No space left on device', so
                    # workaround for this is check error message:
                    if 'No space left on device' in str(e):
                        self.parent.fail_download(e)
                        return False

                    if not self.__can_retry(retry, mirrors, opts, url, e):
                        return False
                    self.__next_mirror(mirrors)
                # RHEL 6 urlgrabber raises KeyboardInterrupt for example when there is no space left
                # but handle also other fatal exceptions
                except (KeyboardInterrupt, Exception):  # pylint: disable=W0703
                    e = sys.exc_info()[1]
                    self.parent.fail_download(e)
                    return False
            finally:
                if fo:
                    fo.close()
                # Delete failed download file
                elif os.path.isfile(params['target_file']):
                    os.unlink(params['target_file'])

        return True
Example #3
0
 def __init__(self, resultlist=None):
     self.resultlist = resultlist or []
     self.index = 0
     self.calls = []
     self.opts = URLGrabberOptions()