def _download_package(self, package_id, repository_id, download, download_path, checksum, resume=True): avail_data = self._settings['repositories']['available'] excluded_data = self._settings['repositories']['excluded'] repo = self._entropy.open_repository(repository_id) # grab original repo, if any and use it if available # this is done in order to support "equo repo merge" feature # allowing client-side repository package metadata moves. original_repo = repo.getInstalledPackageRepository(package_id) if (original_repo != repository_id) and ( original_repo not in avail_data) and (original_repo is not None): # build up a new uris list, at least try, hoping that # repository is just shadowing original_repo # for example: original_repo got copied to repository, without # copying packages, which would be useless. like it happens # with sabayon-weekly uris = self._build_uris_list(original_repo, repository_id) else: if original_repo in avail_data: uris = avail_data[original_repo]['packages'][::-1] if repository_id in avail_data: uris += avail_data[repository_id]['packages'][::-1] elif original_repo in excluded_data: uris = excluded_data[original_repo]['packages'][::-1] if repository_id in avail_data: uris += avail_data[repository_id]['packages'][::-1] else: uris = avail_data[repository_id]['packages'][::-1] remaining = set(uris) mirror_status = StatusInterface() mirrorcount = 0 for uri in uris: if not remaining: # tried all the mirrors, quitting for error mirror_status.set_working_mirror(None) return 3 mirror_status.set_working_mirror(uri) mirrorcount += 1 mirror_count_txt = "( mirror #%s ) " % (mirrorcount, ) url = uri + "/" + download # check if uri is sane if mirror_status.get_failing_mirror_status(uri) >= 30: # ohohoh! # set to 30 for convenience mirror_status.set_failing_mirror_status(uri, 30) mytxt = mirror_count_txt mytxt += blue(" %s: ") % (_("Mirror"), ) mytxt += red(self._get_url_name(uri)) mytxt += " - %s." % (_("maximum failure threshold reached"), ) self._entropy.output(mytxt, importance=1, level="warning", header=red(" ## ")) if mirror_status.get_failing_mirror_status(uri) == 30: # put to 75 then decrement by 4 so we # won't reach 30 anytime soon ahahaha mirror_status.add_failing_mirror(uri, 45) elif mirror_status.get_failing_mirror_status(uri) > 31: # now decrement each time this point is reached, # if will be back < 30, then equo will try to use it again mirror_status.add_failing_mirror(uri, -4) else: # put to 0 - reenable mirror, welcome back uri! mirror_status.set_failing_mirror_status(uri, 0) remaining.discard(uri) continue do_resume = resume timeout_try_count = 50 while True: txt = mirror_count_txt txt += blue("%s: ") % (_("Downloading from"), ) txt += red(self._get_url_name(uri)) self._entropy.output(txt, importance=1, level="warning", header=red(" ## ")) resumed = False exit_st, data_transfer = self._try_edelta_fetch( url, download_path, checksum, do_resume) if exit_st > 0: # fallback to package file download exit_st, data_transfer, resumed = self._download_file( url, download_path, package_id=package_id, repository_id=repository_id, digest=checksum, resume=do_resume) if exit_st == 0: txt = mirror_count_txt txt += "%s: " % (blue(_("Successfully downloaded from")), ) txt += red(self._get_url_name(uri)) human_bytes = entropy.tools.bytes_into_human(data_transfer) txt += " %s %s/%s" % ( _("at"), human_bytes, _("second"), ) self._entropy.output(txt, importance=1, level="info", header=red(" ## ")) mirror_status.set_working_mirror(None) return 0 elif resumed and (exit_st not in ( -3, -4, -100, )): do_resume = False continue error_message = mirror_count_txt error_message += blue("%s: %s") % ( _("Error downloading from"), red(self._get_url_name(uri)), ) # something bad happened if exit_st == -1: error_message += " - %s." % ( _("file not available on this mirror"), ) elif exit_st == -2: mirror_status.add_failing_mirror(uri, 1) error_message += " - %s." % (_("wrong checksum"), ) # If file is fetched (with no resume) and its # complete better to enforce resume to False. if (data_transfer < 1) and do_resume: error_message += " %s." % (_("Disabling resume"), ) do_resume = False continue elif exit_st == -3: mirror_status.add_failing_mirror(uri, 3) error_message += " - %s." % (_("not found"), ) elif exit_st == -4: # timeout! timeout_try_count -= 1 if timeout_try_count > 0: error_message += " - %s." % ( _("timeout, retrying on this mirror"), ) else: error_message += " - %s." % (_("timeout, giving up"), ) elif exit_st == -100: error_message += " - %s." % (_("discarded download"), ) else: mirror_status.add_failing_mirror(uri, 5) error_message += " - %s." % (_("unknown reason"), ) self._entropy.output(error_message, importance=1, level="warning", header=red(" ## ")) if exit_st == -4: # timeout if timeout_try_count > 0: continue elif exit_st == -100: # user discarded fetch mirror_status.set_working_mirror(None) return 1 remaining.discard(uri) # make sure we don't have nasty issues if not remaining: mirror_status.set_working_mirror(None) return 3 break mirror_status.set_working_mirror(None) return 0
def _download_packages(self, download_list): """ Internal function. Download packages. """ avail_data = self._settings['repositories']['available'] excluded_data = self._settings['repositories']['excluded'] repo_uris = {} for pkg_id, repository_id, fname, cksum, _signatures in download_list: repo = self._entropy.open_repository(repository_id) # grab original repo, if any and use it if available # this is done in order to support "equo repo merge" feature # allowing client-side repository package metadata moves. original_repo = repo.getInstalledPackageRepository(pkg_id) if (original_repo != repository_id) and ( original_repo not in avail_data) and ( original_repo is not None): # build up a new uris list, at least try, hoping that # repository is just shadowing original_repo # for example: original_repo got copied to repository, without # copying packages, which would be useless. like it happens # with sabayon-weekly uris = self._build_uris_list(original_repo, repository_id) else: if original_repo in avail_data: uris = avail_data[original_repo]['packages'][::-1] uris += avail_data[repository_id]['packages'][::-1] elif original_repo in excluded_data: uris = excluded_data[original_repo]['packages'][::-1] uris += avail_data[repository_id]['packages'][::-1] else: uris = avail_data[repository_id]['packages'][::-1] obj = repo_uris.setdefault(repository_id, []) # append at the beginning new_ones = [x for x in uris if x not in obj][::-1] for new_obj in new_ones: obj.insert(0, new_obj) remaining = repo_uris.copy() mirror_status = StatusInterface() def get_best_mirror(repository_id): try: return remaining[repository_id][0] except IndexError: return None def update_download_list(down_list, failed_down): newlist = [] for pkg_id, repository_id, fname, cksum, signatures in down_list: p_uri = get_best_mirror(repository_id) p_uri = os.path.join(p_uri, fname) if p_uri not in failed_down: continue newlist.append( (pkg_id, repository_id, fname, cksum, signatures) ) return newlist # return True: for failing, return False: for fine def mirror_fail_check(repository_id, best_mirror): # check if uri is sane if not mirror_status.get_failing_mirror_status(best_mirror) >= 30: return False # set to 30 for convenience mirror_status.set_failing_mirror_status(best_mirror, 30) mirrorcount = repo_uris[repository_id].index(best_mirror) + 1 txt = "( mirror #%s ) %s %s - %s" % ( mirrorcount, blue(_("Mirror")), red(self._get_url_name(best_mirror)), _("maximum failure threshold reached"), ) self._entropy.output( txt, importance = 1, level = "warning", header = red(" ## ") ) if mirror_status.get_failing_mirror_status(best_mirror) == 30: mirror_status.add_failing_mirror(best_mirror, 45) elif mirror_status.get_failing_mirror_status(best_mirror) > 31: mirror_status.add_failing_mirror(best_mirror, -4) else: mirror_status.set_failing_mirror_status(best_mirror, 0) try: remaining[repository_id].remove(best_mirror) except ValueError: # ignore pass return True def show_download_summary(down_list): for _pkg_id, repository_id, fname, _cksum, _signatures in down_list: best_mirror = get_best_mirror(repository_id) mirrorcount = repo_uris[repository_id].index(best_mirror) + 1 basef = os.path.basename(fname) txt = "( mirror #%s ) [%s] %s %s" % ( mirrorcount, brown(basef), blue("@"), red(self._get_url_name(best_mirror)), ) self._entropy.output( txt, importance = 1, level = "info", header = red(" ## ") ) def show_successful_download(down_list, data_transfer): for _pkg_id, repository_id, fname, _cksum, _signatures in down_list: best_mirror = get_best_mirror(repository_id) mirrorcount = repo_uris[repository_id].index(best_mirror) + 1 basef = os.path.basename(fname) txt = "( mirror #%s ) [%s] %s %s %s" % ( mirrorcount, brown(basef), darkred(_("success")), blue("@"), red(self._get_url_name(best_mirror)), ) self._entropy.output( txt, importance = 1, level = "info", header = red(" ## ") ) if data_transfer: txt = " %s: %s%s%s" % ( blue(_("Aggregated transfer rate")), bold(entropy.tools.bytes_into_human(data_transfer)), darkred("/"), darkblue(_("second")), ) self._entropy.output( txt, importance = 1, level = "info", header = red(" ## ") ) def show_download_error(down_list, p_exit_st): for _pkg_id, repository_id, _fname, _cksum, _signs in down_list: best_mirror = get_best_mirror(repository_id) mirrorcount = repo_uris[repository_id].index(best_mirror) + 1 txt = "( mirror #%s ) %s: %s" % ( mirrorcount, blue(_("Error downloading from")), red(self._get_url_name(best_mirror)), ) if p_exit_st == -1: txt += " - %s." % ( _("data not available on this mirror"),) elif p_exit_st == -2: mirror_status.add_failing_mirror(best_mirror, 1) txt += " - %s." % (_("wrong checksum"),) elif p_exit_st == -3: txt += " - %s." % (_("not found"),) elif p_exit_st == -4: # timeout! txt += " - %s." % (_("timeout error"),) elif p_exit_st == -100: txt += " - %s." % (_("discarded download"),) else: mirror_status.add_failing_mirror(best_mirror, 5) txt += " - %s." % (_("unknown reason"),) self._entropy.output( txt, importance = 1, level = "warning", header = red(" ## ") ) def remove_failing_mirrors(repos): for repository_id in repos: get_best_mirror(repository_id) if remaining[repository_id]: remaining[repository_id].pop(0) def check_remaining_mirror_failure(repos): return [x for x in repos if not remaining.get(x)] d_list = download_list[:] while True: do_resume = True timeout_try_count = 50 while True: fetch_files_list = [] for pkg_id, repository_id, fname, cksum, signs in d_list: best_mirror = get_best_mirror(repository_id) # set working mirror, dont care if its None mirror_status.set_working_mirror(best_mirror) if best_mirror is not None: mirror_fail_check(repository_id, best_mirror) best_mirror = get_best_mirror(repository_id) if best_mirror is None: # at least one package failed to download # properly, give up with everything return 3, d_list myuri = os.path.join(best_mirror, fname) pkg_path = self.get_standard_fetch_disk_path(fname) fetch_files_list.append( (pkg_id, repository_id, myuri, pkg_path, cksum, signs) ) show_download_summary(d_list) (edelta_fetch_files_list, data_transfer, exit_st) = self._try_edelta_multifetch( fetch_files_list, do_resume) failed_downloads = None if exit_st == 0: # O(nm) but both lists are very small... updated_fetch_files_list = [ x for x in fetch_files_list if x not in edelta_fetch_files_list] if updated_fetch_files_list: (exit_st, failed_downloads, data_transfer) = self._download_files( updated_fetch_files_list, resume = do_resume) if exit_st == 0: show_successful_download( d_list, data_transfer) return 0, [] if failed_downloads: d_list = update_download_list( d_list, failed_downloads) if exit_st not in (-3, -4, -100,) and failed_downloads and \ do_resume: # disable resume do_resume = False continue show_download_error(d_list, exit_st) if exit_st == -4: # timeout timeout_try_count -= 1 if timeout_try_count > 0: continue elif exit_st == -100: # user discarded fetch return 1, [] myrepos = set([x[1] for x in d_list]) remove_failing_mirrors(myrepos) # make sure we don't have nasty issues remaining_failure = check_remaining_mirror_failure( myrepos) if remaining_failure: return 3, d_list break return 0, []
def _download_package(self, package_id, repository_id, download, download_path, checksum, resume = True): avail_data = self._settings['repositories']['available'] excluded_data = self._settings['repositories']['excluded'] repo = self._entropy.open_repository(repository_id) # grab original repo, if any and use it if available # this is done in order to support "equo repo merge" feature # allowing client-side repository package metadata moves. original_repo = repo.getInstalledPackageRepository(package_id) if (original_repo != repository_id) and ( original_repo not in avail_data) and ( original_repo is not None): # build up a new uris list, at least try, hoping that # repository is just shadowing original_repo # for example: original_repo got copied to repository, without # copying packages, which would be useless. like it happens # with sabayon-weekly uris = self._build_uris_list(original_repo, repository_id) else: if original_repo in avail_data: uris = avail_data[original_repo]['packages'][::-1] if repository_id in avail_data: uris += avail_data[repository_id]['packages'][::-1] elif original_repo in excluded_data: uris = excluded_data[original_repo]['packages'][::-1] if repository_id in avail_data: uris += avail_data[repository_id]['packages'][::-1] else: uris = avail_data[repository_id]['packages'][::-1] remaining = set(uris) mirror_status = StatusInterface() mirrorcount = 0 for uri in uris: if not remaining: # tried all the mirrors, quitting for error mirror_status.set_working_mirror(None) return 3 mirror_status.set_working_mirror(uri) mirrorcount += 1 mirror_count_txt = "( mirror #%s ) " % (mirrorcount,) url = uri + "/" + download # check if uri is sane if mirror_status.get_failing_mirror_status(uri) >= 30: # ohohoh! # set to 30 for convenience mirror_status.set_failing_mirror_status(uri, 30) mytxt = mirror_count_txt mytxt += blue(" %s: ") % (_("Mirror"),) mytxt += red(self._get_url_name(uri)) mytxt += " - %s." % (_("maximum failure threshold reached"),) self._entropy.output( mytxt, importance = 1, level = "warning", header = red(" ## ") ) if mirror_status.get_failing_mirror_status(uri) == 30: # put to 75 then decrement by 4 so we # won't reach 30 anytime soon ahahaha mirror_status.add_failing_mirror(uri, 45) elif mirror_status.get_failing_mirror_status(uri) > 31: # now decrement each time this point is reached, # if will be back < 30, then equo will try to use it again mirror_status.add_failing_mirror(uri, -4) else: # put to 0 - reenable mirror, welcome back uri! mirror_status.set_failing_mirror_status(uri, 0) remaining.discard(uri) continue do_resume = resume timeout_try_count = 50 while True: txt = mirror_count_txt txt += blue("%s: ") % (_("Downloading from"),) txt += red(self._get_url_name(uri)) self._entropy.output( txt, importance = 1, level = "warning", header = red(" ## ") ) resumed = False exit_st, data_transfer = self._try_edelta_fetch( url, download_path, checksum, do_resume) if exit_st > 0: # fallback to package file download exit_st, data_transfer, resumed = self._download_file( url, download_path, package_id = package_id, repository_id = repository_id, digest = checksum, resume = do_resume ) if exit_st == 0: txt = mirror_count_txt txt += "%s: " % ( blue(_("Successfully downloaded from")), ) txt += red(self._get_url_name(uri)) human_bytes = entropy.tools.bytes_into_human( data_transfer) txt += " %s %s/%s" % (_("at"), human_bytes, _("second"),) self._entropy.output( txt, importance = 1, level = "info", header = red(" ## ") ) mirror_status.set_working_mirror(None) return 0 elif resumed and (exit_st not in (-3, -4, -100,)): do_resume = False continue error_message = mirror_count_txt error_message += blue("%s: %s") % ( _("Error downloading from"), red(self._get_url_name(uri)), ) # something bad happened if exit_st == -1: error_message += " - %s." % ( _("file not available on this mirror"),) elif exit_st == -2: mirror_status.add_failing_mirror(uri, 1) error_message += " - %s." % (_("wrong checksum"),) # If file is fetched (with no resume) and its # complete better to enforce resume to False. if (data_transfer < 1) and do_resume: error_message += " %s." % ( _("Disabling resume"),) do_resume = False continue elif exit_st == -3: mirror_status.add_failing_mirror(uri, 3) error_message += " - %s." % (_("not found"),) elif exit_st == -4: # timeout! timeout_try_count -= 1 if timeout_try_count > 0: error_message += " - %s." % ( _("timeout, retrying on this mirror"),) else: error_message += " - %s." % ( _("timeout, giving up"),) elif exit_st == -100: error_message += " - %s." % ( _("discarded download"),) else: mirror_status.add_failing_mirror(uri, 5) error_message += " - %s." % (_("unknown reason"),) self._entropy.output( error_message, importance = 1, level = "warning", header = red(" ## ") ) if exit_st == -4: # timeout if timeout_try_count > 0: continue elif exit_st == -100: # user discarded fetch mirror_status.set_working_mirror(None) return 1 remaining.discard(uri) # make sure we don't have nasty issues if not remaining: mirror_status.set_working_mirror(None) return 3 break mirror_status.set_working_mirror(None) return 0