def __pull_repo(self, entropy_server, repository_id): sts = self.__sync_repo(entropy_server, repository_id) if sts != 0: entropy_server.output(red(_("Aborting !")), importance=1, level="error", header=darkred(" !!! ")) return sts mirrors_tainted, mirrors_errors, successfull_mirrors, \ broken_mirrors, check_data = \ entropy_server.Mirrors.sync_packages( repository_id, ask = self._ask, pretend = self._pretend) if mirrors_errors and not successfull_mirrors: entropy_server.output(red(_("Aborting !")), importance=1, level="error", header=darkred(" !!! ")) return 1 if not successfull_mirrors: return 0 if self._ask: q_rc = entropy_server.ask_question( _("Should I cleanup old packages on mirrors ?")) if q_rc == _("No"): return 0 # fall through done = entropy_server.Mirrors.tidy_mirrors( repository_id, ask = self._ask, pretend = self._pretend) if not done: return 1 return 0
def show_successful_download(down_list, data_transfer): for _pkg_id, repository_id, fname, _cksum, _signatures in down_list: best_mirror = get_best_mirror(repository_id) mirrorcount = repo_uris[repository_id].index(best_mirror) + 1 basef = os.path.basename(fname) txt = "( mirror #%s ) [%s] %s %s %s" % ( mirrorcount, brown(basef), darkred(_("success")), blue("@"), red(self._get_url_name(best_mirror)), ) self._entropy.output( txt, importance = 1, level = "info", header = red(" ## ") ) if data_transfer: txt = " %s: %s%s%s" % ( blue(_("Aggregated transfer rate")), bold(entropy.tools.bytes_into_human(data_transfer)), darkred("/"), darkblue(_("second")), ) self._entropy.output( txt, importance = 1, level = "info", header = red(" ## ") )
def mirror_fail_check(repository_id, best_mirror): # check if uri is sane if not mirror_status.get_failing_mirror_status(best_mirror) >= 30: return False # set to 30 for convenience mirror_status.set_failing_mirror_status(best_mirror, 30) mirrorcount = repo_uris[repository_id].index(best_mirror) + 1 txt = "( mirror #%s ) %s %s - %s" % ( mirrorcount, blue(_("Mirror")), red(self._get_url_name(best_mirror)), _("maximum failure threshold reached"), ) self._entropy.output( txt, importance = 1, level = "warning", header = red(" ## ") ) if mirror_status.get_failing_mirror_status(best_mirror) == 30: mirror_status.add_failing_mirror(best_mirror, 45) elif mirror_status.get_failing_mirror_status(best_mirror) > 31: mirror_status.add_failing_mirror(best_mirror, -4) else: mirror_status.set_failing_mirror_status(best_mirror, 0) try: remaining[repository_id].remove(best_mirror) except ValueError: # ignore pass return True
def _execute_package_phase(self, action_metadata, package_metadata, action_name, phase_name): """ Wrapper against Source Package Manager's execute_package_phase. This method handles both fatal and non-fatal exceptions. """ self._entropy.output( "%s: %s" % (brown(_("Package phase")), teal(phase_name),), importance = 0, header = red(" ## ")) spm = self._spm try: spm.execute_package_phase( action_metadata, package_metadata, action_name, phase_name) except spm.PhaseFailure as err: txt = "%s: %s %s, %s. %s." % ( bold(_("QA")), brown(_("Cannot run phase")), bold(phase_name), err.message, brown(_("Please report it")), ) self._entropy.output( txt, importance = 1, header = red(" ## "), level = "warning") return 0 # non-fatal except spm.OutdatedPhaseError as err: err_msg = "%s: %s" % ( brown(_("Source Package Manager is too old, " "please update it")), err) self._entropy.output( err_msg, importance = 1, header = darkred(" ## "), level = "error" ) return 1 except spm.PhaseError as err: err_msg = "%s: %s" % ( brown(_("Source Package Manager phase error")), err) self._entropy.output( err_msg, importance = 1, header = darkred(" ## "), level = "error" ) return 1 return 0
def _get_installed_kernels(self, installed_repository): """ Return a set of kernel packages that are installed on the system. """ installed_package_ids = set() # Resolve the target kernel using the installed packages repository. # First, locate the virtual kernel package (if new virtuals are in # use.) latest_kernel, _k_rc = installed_repository.atomMatch(KERNEL_BINARY_VIRTUAL) if latest_kernel == -1: # Virtual package is not installed. # This happens when kernel packages have been moved to the new # virtual and PROVIDE is broken or no longer supported (EAPI=7?). print_warning("%s: %s %s" % ( red(_("Attention")), KERNEL_BINARY_VIRTUAL, brown(_("is not installed. Unable to resolve kernel " "packages correctly. Please use --from-running or install" "the package. Is your system up-to-date?")))) return installed_package_ids # If we have resolved the package to the virtual, we need to go # one level deep and retrieve the list of available kernel packages. # Do not assume that we hit only one kernel package when scanning, we # may have different packages in the dependency list. virtual_key = entropy.dep.dep_getkey(KERNEL_BINARY_VIRTUAL) latest_key_slot = installed_repository.retrieveKeySlot(latest_kernel) if latest_key_slot: latest_key, _unused = latest_key_slot else: # Cannot find installed package, give up. print_warning("%s: %s" % ( red(_("Attention")), brown(_("Unable to resolve the latest kernel metadata. Try again later.")) )) return installed_package_ids if virtual_key == latest_key: print_info("%s: %s" % ( red(_("Resolving virtual kernel package")), KERNEL_BINARY_VIRTUAL, )) # New virtual package support. virtual_deps = installed_repository.retrieveRuntimeDependencies( latest_kernel) for virtual_dep in virtual_deps: virtual_pkg_id, _v_rc = installed_repository.atomMatch(virtual_dep) if virtual_pkg_id != -1: installed_package_ids.add(virtual_pkg_id) else: # Old virtual package detected (pre EAPI=7). Assume it's a kernel # binary. installed_package_ids.add(latest_kernel) return installed_package_ids
def _remove_phase(self): """ Run the remove phase. """ inst_repo = self._entropy.open_repository(self._repository_id) with inst_repo.exclusive(): if not inst_repo.isPackageIdAvailable(self._package_id): self._entropy.output( darkred(_("The requested package is no longer available")), importance = 1, level = "warning", header = brown(" @@ ") ) # install.py assumes that a zero exit status is returned # in this case. return 0 atom = inst_repo.retrieveAtom(self._package_id) xterm_title = "%s %s: %s" % ( self._xterm_header, _("Removing"), atom, ) self._entropy.set_title(xterm_title) self._entropy.output( "%s: %s" % ( blue(_("Removing")), red(atom), ), importance = 1, level = "info", header = red(" ## ") ) self._entropy.logger.log("[Package]", etpConst['logging']['normal_loglevel_id'], "Removing package: %s" % (atom,)) txt = "%s: %s" % ( blue(_("Removing from Entropy")), red(atom), ) self._entropy.output( txt, importance = 1, level = "info", header = red(" ## ") ) return self._remove_phase_unlocked(inst_repo)
def _run(self): """ Execute the action. Return an exit status. """ self.setup() unpack_dir = self._meta['unpackdir'] if not self._meta.get('fetch_path'): try: os.makedirs(unpack_dir, 0o755) const_setup_directory(unpack_dir) except (OSError, IOError) as err: if err.errno != errno.EEXIST: self._entropy.output( "%s: %s" % ( blue(_("Fetch path setup error")), err, ), importance = 1, level = "info", header = red(" ## ") ) return 1 exit_st = 0 for method in self._meta['phases']: exit_st = method() if exit_st != 0: break return exit_st
def _trigger_call_ext_generic(self): try: return self._do_trigger_call_ext_generic() except Exception as err: mykey = self._pkgdata['category']+"/"+self._pkgdata['name'] tback = entropy.tools.get_traceback() self._entropy.output(tback, importance = 0, level = "error") self._entropy.logger.write(tback) self._entropy.logger.log( "[Trigger]", etpConst['logging']['normal_loglevel_id'], "[POST] ATTENTION Cannot run External trigger for " + \ mykey + "!! " + str(Exception) + ": " + repr(err) ) mytxt = "%s: %s %s. %s." % ( bold(_("QA")), brown(_("Cannot run External trigger for")), bold(mykey), brown(_("Please report it")), ) self._entropy.output( mytxt, importance = 0, header = red(" ## ") ) return 0
def _push_progress_to_output(self): mytxt = _("[F]") eta_txt = _("ETA") sec_txt = _("sec") # as in XX kb/sec current_txt = darkred(" %s: " % (mytxt,)) + \ darkgreen(str(round(float(self.__downloadedsize)/1000, 1))) + "/" \ + red(str(round(self.__remotesize, 1))) + " kB" # create progress bar barsize = 10 bartext = "[" curbarsize = 1 averagesize = (self.__average*barsize)/100 while averagesize > 0: curbarsize += 1 bartext += "=" averagesize -= 1 bartext += ">" diffbarsize = barsize - curbarsize while diffbarsize > 0: bartext += " " diffbarsize -= 1 if self.__show_speed: bartext += "] => %s" % (bytes_into_human(self.__datatransfer),) bartext += "/%s : %s: %s" % (sec_txt, eta_txt, self.__time_remaining,) else: bartext += "]" average = str(self.__average) if len(average) < 2: average = " "+average current_txt += " <-> "+average+"% "+bartext TextInterface.output(current_txt, back = True)
def _update_progress(self, force = False): if self._silent: # stfu ! return upload_percent = 100.0 upload_size = round(self.__filekbcount, 1) if self.__filesize >= 1: kbcount_round = round(self.__filekbcount, 1) upload_percent = round((kbcount_round / self.__filesize) * 100, 1) delta_secs = 0.5 cur_t = time.time() if (cur_t > (self.__oldprogress_t + delta_secs)) or force: upload_percent = str(upload_percent)+"%" # create text mytxt = _("Transfer status") current_txt = brown(" <-> %s: " % (mytxt,)) + \ darkgreen(str(upload_size)) + "/" + \ red(str(self.__filesize)) + " kB " + \ brown("[") + str(upload_percent) + brown("]") + \ " " + self.__time_remaining + " " + \ bytes_into_human(self.__datatransfer) + \ "/" + _("sec") self.output(current_txt, back = True) self.__oldprogress_t = cur_t
def _fetch(path, download, checksum): txt = "%s: %s" % ( blue(_("Downloading")), red(os.path.basename(download)),) self._entropy.output( txt, importance = 1, level = "info", header = red(" ## ") ) return self._download_package( self._package_id, self._repository_id, download, path, checksum )
def _config_phase_unlocked(self, inst_repo): """ _config_phase(), assuming that the installed packages repository lock is held. """ if not inst_repo.isPackageIdAvailable(self._package_id): self._entropy.output( darkred(_("The requested package is no longer available.")), importance = 1, level = "error", header = red(" ## ") ) return 3 metadata = {} metadata['atom'] = inst_repo.retrieveAtom(self._package_id) key, slot = inst_repo.retrieveKeySlot(self._package_id) metadata['key'], metadata['slot'] = key, slot metadata['version'] = inst_repo.retrieveVersion(self._package_id) metadata['category'] = inst_repo.retrieveCategory(self._package_id) metadata['name'] = inst_repo.retrieveName(self._package_id) metadata['spm_repository'] = inst_repo.retrieveSpmRepository( self._package_id) metadata['accept_license'] = self._get_licenses( inst_repo, self._package_id) xterm_title = "%s %s: %s" % ( self._xterm_header, _("Configuring package"), metadata['atom'], ) self._entropy.set_title(xterm_title) txt = "%s: %s" % ( blue(_("Configuring package")), red(metadata['atom']), ) self._entropy.output( txt, importance = 1, level = "info", header = red(" ## ") ) return self._configure_package_unlocked(metadata)
def show_download_summary(down_list): for _pkg_id, repository_id, fname, _cksum, _signatures in down_list: best_mirror = get_best_mirror(repository_id) mirrorcount = repo_uris[repository_id].index(best_mirror) + 1 basef = os.path.basename(fname) txt = "( mirror #%s ) [%s] %s %s" % ( mirrorcount, brown(basef), blue("@"), red(self._get_url_name(best_mirror)), ) self._entropy.output( txt, importance = 1, level = "info", header = red(" ## ") )
def _push_progress_to_output(self, force = False): stats = self._compute_progress_stats() downloaded_size = stats["downloaded_size"] total_size = stats["total_size"] time_remaining = stats["time_remaining"] data_transfer = stats["data_transfer"] average = stats["average"] time_remaining_str = stats["time_remaining_str"] self.__data_transfer = data_transfer self.__average = average self.__time_remaining_secs = time_remaining update_time_delta = 0.5 cur_t = time.time() if ((cur_t > (self.__progress_update_t + update_time_delta)) \ or force or (self.__first_refreshes > 0)) and self.__show_progress: self.__first_refreshes -= 1 self.__progress_update_t = cur_t eta_txt = _("ETA") sec_txt = _("sec") # as in XX kb/sec down_size_txt = str(round(float(downloaded_size) / 1000, 1)) total_size_txt = str(round(total_size, 1)) current_txt = darkgreen(down_size_txt) + "/" + red(total_size_txt) current_txt += " kB" # create progress bar barsize = 10 bartext = "[" curbarsize = 1 averagesize = (average*barsize)/100 while averagesize > 0: curbarsize += 1 bartext += "=" averagesize -= 1 bartext += ">" diffbarsize = barsize-curbarsize while diffbarsize > 0: bartext += " " diffbarsize -= 1 if self.__show_speed: bartext += "] => %s" % (bytes_into_human(data_transfer),) bartext += "/%s : %s: %s" % ( sec_txt, eta_txt, time_remaining_str,) else: bartext += "]" myavg = str(average) if len(myavg) < 2: myavg = " "+myavg current_txt += " <-> "+myavg+"% "+bartext+" " TextInterface.output(current_txt, back = True) self.__old_average = average
def upload(self, load_path, remote_path): self.__connect_if_not() path = os.path.join(self.__ftpdir, remote_path) tmp_path = path + EntropyUriHandler.TMP_TXC_FILE_EXT tries = 0 def updater(buf): self._commit_buffer_update(len(buf)) self._update_speed() self._update_progress() self._speed_limit_loop() while tries < 10: tries += 1 self._init_vars() try: file_size = get_file_size(load_path) self.__filesize = round(float(file_size)/ 1024, 1) self.__filekbcount = 0 with open(load_path, "r") as f: rc = self.__ftpconn.storbinary("STOR " + tmp_path, f, 8192, updater) self._update_progress(force = True) # now we can rename the file with its original name self.rename(tmp_path, path) done = rc.find("226") != -1 return done except Exception as e: # connection reset by peer print_traceback() mytxt = red("%s: %s, %s... #%s") % ( _("Upload issue"), repr(e), _("retrying"), tries+1, ) self.output( mytxt, importance = 1, level = "warning", header = " " ) self._reconnect() # reconnect self.delete(tmp_path) self.delete(path)
def _fetch_not_available_phase(self): """ Execute the fetch not available phase. """ self._entropy.output( blue(_("Source code not available.")), importance = 1, level = "info", header = red(" ## ") ) return 0
def _configure_package_unlocked(self, metadata): """ Configure the package. """ spm = self._entropy.Spm() self._entropy.output( "SPM: %s" % ( brown(_("configuration phase")), ), importance = 0, header = red(" ## ") ) try: spm.execute_package_phase( metadata, metadata, self.NAME, "configure") except spm.PhaseFailure as err: self._entropy.logger.log( "[Package]", etpConst['logging']['normal_loglevel_id'], "Phase execution failed with %s, %d" % ( err.message, err.code)) return err.code except spm.OutdatedPhaseError as err: self._entropy.logger.log( "[Package]", etpConst['logging']['normal_loglevel_id'], "Source Package Manager is too old: %s" % ( err)) err_msg = "%s: %s" % ( brown(_("Source Package Manager is too old, please update it")), err) self._entropy.output( err_msg, importance = 1, header = darkred(" ## "), level = "error" ) return 1 except spm.PhaseError as err: self._entropy.logger.log( "[Package]", etpConst['logging']['normal_loglevel_id'], "Phase execution error: %s" % ( err)) return 1 return 0
def _update_security_advisories(self): try: sec = self._entropy.Security() sec.update() except Exception as e: entropy.tools.print_traceback(f = self._entropy.logger) mytxt = "%s: %s" % (red(_("Advisories fetch error")), e,) self._entropy.output( mytxt, importance = 1, level = "warning", header = darkred(" @@ ") )
def _download_error(exit_st): txt = "%s. %s: %s" % ( red(_("Package cannot be downloaded. " "Try to update repositories")), blue(_("Error")), exit_st, ) self._entropy.output( txt, importance = 1, level = "error", header = darkred(" ## ") )
def _unused(self, entropy_client): """ Command implementation. """ if not self._quiet: entropy_client.output( "%s..." % ( blue(_("Running unused packages test, " "pay attention, there can be false positives")),), header=red(" @@ ")) installed_repo = entropy_client.installed_repository() def _unused_packages_test(): return [x for x in installed_repo.retrieveUnusedPackageIds() \ if entropy_client.validate_package_removal(x)] data = [(installed_repo.retrieveOnDiskSize(x), x, \ installed_repo.retrieveAtom(x),) for x in \ _unused_packages_test()] def _user_filter(item): _size, _pkg_id, _atom = item _source = installed_repo.getInstalledPackageSource(_pkg_id) if _source == etpConst['install_sources']['user']: # remove from list, user installed stuff not going # to be listed return False return True # filter: --by-user not provided -> if package has been installed # by user, exclude from list. if not self._byuser: data = list(filter(_user_filter, data)) if self._sortbysize: data.sort(key = lambda x: x[0]) if self._quiet: entropy_client.output( '\n'.join([x[2] for x in data]), level="generic") else: for disk_size, idpackage, atom in data: disk_size = entropy.tools.bytes_into_human(disk_size) entropy_client.output( "# %s%s%s %s" % ( blue("["), brown(disk_size), blue("]"), darkgreen(atom),)) return 0
def show_download_error(down_list, p_exit_st): for _pkg_id, repository_id, _fname, _cksum, _signs in down_list: best_mirror = get_best_mirror(repository_id) mirrorcount = repo_uris[repository_id].index(best_mirror) + 1 txt = "( mirror #%s ) %s: %s" % ( mirrorcount, blue(_("Error downloading from")), red(self._get_url_name(best_mirror)), ) if p_exit_st == -1: txt += " - %s." % ( _("data not available on this mirror"),) elif p_exit_st == -2: mirror_status.add_failing_mirror(best_mirror, 1) txt += " - %s." % (_("wrong checksum"),) elif p_exit_st == -3: txt += " - %s." % (_("not found"),) elif p_exit_st == -4: # timeout! txt += " - %s." % (_("timeout error"),) elif p_exit_st == -100: txt += " - %s." % (_("discarded download"),) else: mirror_status.add_failing_mirror(best_mirror, 5) txt += " - %s." % (_("unknown reason"),) self._entropy.output( txt, importance = 1, level = "warning", header = red(" ## ") )
def _compress_packages(self, entropy_server, repository_id, packages): """ Compress (and generate package tarball) the list of given spm package names inside the given Entropy repository. """ entropy_server.output( blue(_("Compressing packages")), header=brown(" @@ ")) generated_packages = collections.deque() store_dir = entropy_server._get_local_store_directory(repository_id) if not os.path.isdir(store_dir): try: os.makedirs(store_dir) except (IOError, OSError) as err: entropy_server.output( "%s: %s" % (_("Cannot create store directory"), err), header=brown(" !!! "), importance=1, level="error") return generated_packages, 1 for count, spm_name in enumerate(packages, 1): entropy_server.output( teal(spm_name), header=brown(" # "), count=(count, len(packages))) try: pkg_list = entropy_server.Spm().generate_package(spm_name, store_dir) generated_packages.append(pkg_list) except OSError: entropy.tools.print_traceback() entropy_server.output( bold(_("Ignoring broken Spm entry, please recompile it")), header=brown(" !!! "), importance=1, level="warning") if not generated_packages: entropy_server.output( red(_("Nothing to do, check later.")), header=brown(" * ")) return generated_packages, 0 return generated_packages, None
def _handle_preserved_lib(self, path, atom, preserved_mgr): """ Preserve libraries that would be removed but are still needed by installed packages. This is a safety measure for accidental removals. Proper library dependency ordering should be done during dependencies calculation. """ solved = preserved_mgr.resolve(path) if solved is None: return None paths = preserved_mgr.determine(path) if paths: self._entropy.output( "%s: %s, %s" % ( darkgreen(_("Protecting")), teal(path), darkgreen(_("library needed by:")), ), importance = 1, level = "warning", header = red(" ## ") ) library, elfclass, s_path = solved preserved_mgr.register(library, elfclass, s_path, atom) installed_package_ids = preserved_mgr.needed(path) installed_repository = preserved_mgr.installed_repository() for installed_package_id in installed_package_ids: atom = installed_repository.retrieveAtom(installed_package_id) self._entropy.output( brown(atom), importance = 0, level = "warning", header = darkgreen(" :: ") ) self._entropy.logger.log( "[Package]", etpConst['logging']['normal_loglevel_id'], "Protecting library %s, due to package: %s" % ( path, atom,) ) return paths
def _update_security_advisories(self, _unlocked = False): # update Security Advisories try: security_intf = self._entropy.Security() if _unlocked: security_intf.unlocked_sync(do_cache = False) else: security_intf.sync(do_cache = False) except Exception as e: entropy.tools.print_traceback(f = self._entropy.logger) mytxt = "%s: %s" % (red(_("Advisories fetch error")), e,) self._entropy.output( mytxt, importance = 1, level = "warning", header = darkred(" @@ ") )
def _inject_packages(self, entropy_server, package_matches): """ Mark the given Entropy packages as injected in the repository. """ entropy_server.output( blue(_("These would be marked as injected")), header=brown(" @@ ")) for package_id, repository_id in package_matches: repo = entropy_server.open_repository(repository_id) atom = repo.retrieveAtom(package_id) entropy_server.output( "[%s] %s" % ( blue(repository_id), darkred(atom), ), header=brown(" # ")) if self._ask: rc = entropy_server.ask_question( _("Do it now ?")) if rc == _("No"): return for package_id, repository_id in package_matches: repo = entropy_server.open_repository(repository_id) atom = repo.retrieveAtom(package_id) entropy_server.output( "%s: %s" % ( blue(_("Transforming")), red(atom)), header=brown(" <> ")) entropy_server._transform_package_into_injected( package_id, repository_id) entropy_server.commit_repositories() entropy_server.output(blue(_("Action completed")), header=brown(" @@ "))
def _config_phase(self): """ Execute the config phase. """ inst_repo = self._entropy.installed_repository() with inst_repo.shared(): exit_st = self._config_phase_unlocked(inst_repo) if exit_st == 1: txt = _("An error occurred while trying to configure the package") txt2 = "%s. %s: %s" % ( red(_("Make sure that your system is healthy")), blue(_("Error")), exit_st, ) self._entropy.output( darkred(txt), importance = 1, level = "error", header = red(" ## ") ) self._entropy.output( txt2, importance = 1, level = "error", header = red(" ## ") ) elif exit_st == 2: txt = _("An error occurred while trying to configure the package") txt2 = "%s. %s: %s" % ( red(_("It seems that Source Package Manager entry is missing")), blue(_("Error")), exit_st, ) self._entropy.output( darkred(txt), importance = 1, level = "error", header = red(" ## ") ) self._entropy.output( txt2, importance = 1, level = "error", header = red(" ## ") ) return exit_st
def _unused(self, entropy_client, inst_repo): """ Command implementation. """ if not self._quiet: entropy_client.output( "%s..." % ( blue(_("Running unused packages test")),), header=red(" @@ ")) all_ids = inst_repo.listAllPackageIds() user_packages = self._filter_user_packages(inst_repo, all_ids) wanted_ids = self._get_flat_deps(user_packages, self._get_dep_ids(inst_repo)) not_needed = all_ids - wanted_ids def _sort_key(x): sort_index = 1 if self._sortbysize else 0 return x[sort_index] not_needed_pkgs_data = sorted( [(inst_repo.retrieveAtom(x), inst_repo.retrieveOnDiskSize(x)) for x in not_needed], key=_sort_key) if self._quiet: entropy_client.output( '\n'.join([x[0] for x in not_needed_pkgs_data]), level="generic") else: for atom, disk_size in not_needed_pkgs_data: disk_size = entropy.tools.bytes_into_human(disk_size) entropy_client.output( "# %s%s%s %s" % ( blue("["), brown(disk_size), blue("]"), darkgreen(atom),))
def _show_kernel_warnings(kernel_atom): print_info("%s %s" % (purple(kernel_atom), teal(_("has been installed.")))) print_warning("%s: %s" % (red(_("Attention")), brown(_("some external drivers cannot work across multiple kernels.")))) print_warning(darkgreen(_("Please reboot your computer now !")))
def _login(self, entropy_client): """ Solo Ugc Login command. """ repository = self._nsargs.repo force = self._nsargs.force if repository not in entropy_client.repositories(): entropy_client.output( red("%s: %s." % ( _("Invalid repository"), repository,)), level="error", importance=1) return 1 try: webserv = _get_service( entropy_client, repository, tx_cb = True) except WebService.UnsupportedService: entropy_client.output( "[%s] %s." % ( darkgreen(repository), blue(_("Repository does not support Entropy Services.")), ) ) return 1 username = webserv.get_credentials() if (username is not None) and not force: entropy_client.output( "[%s] %s %s. %s." % ( darkgreen(repository), blue(_("Already logged in as")), bold(username), blue(_("Please logout first")) ) ) return 0 elif (username is not None) and force: webserv.remove_credentials(repository) def fake_callback(*args, **kwargs): return True # use input box to read login input_params = [ ('username', _('Username'), fake_callback, False), ('password', _('Password'), fake_callback, True) ] login_data = entropy_client.input_box( "%s %s %s" % ( _('Please login against'), repository, _('repository'),), input_params, cancel_button = True ) if not login_data: entropy_client.output( "[%s] %s" % ( darkgreen(repository), blue(_("Login aborted. Not logged in.")), ), level="warning", importance=1 ) return 1 username, password = login_data['username'], login_data['password'] webserv.add_credentials(username, password) try: webserv.validate_credentials() except WebService.AuthenticationFailed: entropy_client.output( "[%s] %s" % ( darkgreen(repository), blue(_("Authentication error. Not logged in.")), ), level="warning", importance=1 ) return 1 except WebService.RequestError: entropy_client.output( "[%s] %s" % ( darkgreen(repository), blue(_("Communication error. Not logged in.")), ), level="warning", importance=1 ) return 1 entropy_client.output( "[%s:uid:%s] %s: %s." % ( darkgreen(repository), etpConst['uid'], blue(_("Successfully logged in as")), bold(username) ) ) entropy_client.output( "%s." % ( blue( _("From now on, any UGC action will " "be committed as this user")) ) ) return 0
def _show_install_queue(cls, entropy_client, inst_repo, run_queue, removal_queue, ask, pretend, quiet, verbose): """ Show expanded installation queue to user. """ download_size = 0 unpack_size = 0 on_disk_used_size = 0 on_disk_freed_size = 0 pkgs_install = 0 pkgs_update = 0 pkgs_reinstall = 0 pkgs_downgrade = 0 pkgs_remove = len(removal_queue) client_settings = entropy_client.ClientSettings() splitdebug = client_settings['misc']['splitdebug'] if run_queue and ((ask or pretend) and not quiet): inst_msg = _("These are the packages that would be installed") entropy_client.output( "%s:" % (blue(inst_msg),), header=darkred(" @@ ")) for package_id, repository_id in run_queue: repo = entropy_client.open_repository(repository_id) atom = repo.retrieveAtom(package_id) pkgver = repo.retrieveVersion(package_id) pkgtag = repo.retrieveTag(package_id) pkgrev = repo.retrieveRevision(package_id) pkgfile = repo.retrieveDownloadURL(package_id) on_disk_used_size += repo.retrieveOnDiskSize(package_id) pkgsize = repo.retrieveSize(package_id) extra_downloads = repo.retrieveExtraDownload(package_id) for extra_download in extra_downloads: if not splitdebug and (extra_download['type'] == "debug"): continue pkgsize += extra_download['size'] on_disk_used_size += extra_download['disksize'] unpack_size += int(pkgsize) * 2 fetch_path = PackageAction.get_standard_fetch_disk_path(pkgfile) if not os.path.exists(fetch_path): download_size += int(pkgsize) else: try: f_size = entropy.tools.get_file_size(fetch_path) except OSError: f_size = 0 download_size += pkgsize - f_size installed_ver = '-1' installed_tag = '' installed_rev = 0 inst_repo_s = None inst_pkg_id, inst_pkg_rc = cls._match_pkg_in_installed_repo( repo, package_id, inst_repo) if inst_pkg_rc == 0: installed_ver = inst_repo.retrieveVersion( inst_pkg_id) installed_tag = inst_repo.retrieveTag( inst_pkg_id) installed_rev = inst_repo.retrieveRevision( inst_pkg_id) inst_repo_s = \ inst_repo.getInstalledPackageRepository( inst_pkg_id) if inst_repo_s is None: inst_repo_s = _("Not available") on_disk_freed_size += inst_repo.retrieveOnDiskSize( inst_pkg_id) extra_downloads = inst_repo.retrieveExtraDownload( inst_pkg_id) for extra_download in extra_downloads: on_disk_freed_size += extra_download['disksize'] # statistics generation complete # if --quiet, we're done doing stuff if quiet: continue inst_meta = (installed_ver, installed_tag, installed_rev,) avail_meta = (pkgver, pkgtag, pkgrev,) action = 0 repo_switch = False if (repository_id != inst_repo_s) and \ (inst_repo_s is not None): repo_switch = True if repo_switch: flags = darkred(" [") else: flags = " [" if inst_repo_s is None: inst_repo_s = _('Not available') pkgcmp = entropy_client.get_package_action( (package_id, repository_id)) if pkgcmp == 0: pkgs_reinstall += 1 flags += red("R") action = 1 elif pkgcmp == 1: pkgs_install += 1 flags += darkgreen("N") elif pkgcmp == 2: pkgs_update += 1 if avail_meta == inst_meta: flags += blue("U") + red("R") else: flags += blue("U") action = 2 else: pkgs_downgrade += 1 flags += darkblue("D") action = -1 if repo_switch: flags += darkred("] ") else: flags += "] " if repo_switch: repo_info = "[%s->%s] " % ( brown(inst_repo_s), darkred(repository_id),) else: repo_info = "[%s] " % ( brown(repository_id),) old_info = "" if action != 0: old_info = " [%s|%s" % ( blue(installed_ver), darkred(const_convert_to_unicode(installed_rev)),) old_tag = "]" if installed_tag: old_tag = "|%s%s" % ( darkred(installed_tag), old_tag,) old_info += old_tag entropy_client.output( "%s%s%s|%s%s" % ( flags, repo_info, enlightenatom(atom), darkred(const_convert_to_unicode(pkgrev)), old_info,), header=darkred(" ##")) delta_size = on_disk_used_size - on_disk_freed_size needed_size = delta_size if unpack_size > 0: needed_size += unpack_size if (ask or pretend or verbose) and removal_queue: mytxt = "%s (%s):" % ( blue(_("These are the packages that would be removed")), bold(_("conflicting/substituted")), ) entropy_client.output( mytxt, header=darkred(" @@ ")) for package_id in removal_queue: atom = inst_repo.retrieveAtom(package_id) on_disk_freed_size += inst_repo.retrieveOnDiskSize( package_id) extra_downloads = inst_repo.retrieveExtraDownload( package_id) for extra_download in extra_downloads: on_disk_freed_size += extra_download['disksize'] installedfrom = inst_repo.getInstalledPackageRepository( package_id) if installedfrom is None: installedfrom = _("Not available") mytxt = "[%s] %s%s: %s%s %s" % ( purple("W"), darkred("["), brown(_("from")), bold(installedfrom), darkred("]"), enlightenatom(atom)) entropy_client.output(mytxt, header=darkred(" ## ")) # if --quiet, there is nothing else to show if quiet: return mytxt = "%s: %s" % ( blue(_("Packages needing to be installed/updated/downgraded")), darkred(const_convert_to_unicode(len(run_queue))),) entropy_client.output(mytxt, header=darkred(" @@ ")) mytxt = "%s: %s" % ( blue(_("Packages needing to be removed")), darkred(const_convert_to_unicode(pkgs_remove)),) entropy_client.output(mytxt, header=darkred(" @@ ")) if ask or verbose or pretend: mytxt = "%s: %s" % ( darkgreen(_("Packages needing to be installed")), darkgreen(const_convert_to_unicode(pkgs_install)), ) entropy_client.output( mytxt, header=darkred(" @@ ")) mytxt = "%s: %s" % ( brown(_("Packages needing to be reinstalled")), brown(const_convert_to_unicode(pkgs_reinstall)), ) entropy_client.output( mytxt, header=darkred(" @@ ")) mytxt = "%s: %s" % ( blue(_("Packages needing to be updated")), blue(const_convert_to_unicode(pkgs_update)), ) entropy_client.output( mytxt, header=darkred(" @@ ")) mytxt = "%s: %s" % ( darkred(_("Packages needing to be downgraded")), darkred(const_convert_to_unicode(pkgs_downgrade)), ) entropy_client.output( mytxt, header=darkred(" @@ ")) if download_size > 0: mysize = const_convert_to_unicode( entropy.tools.bytes_into_human(download_size)) else: mysize = const_convert_to_unicode("0b") mytxt = "%s: %s" % ( blue(_("Download size")), bold(mysize), ) entropy_client.output( mytxt, header=darkred(" @@ ")) if delta_size > 0: mysizetxt = _("Used disk space") else: mysizetxt = _("Freed disk space") delta_size = -delta_size delta_human = entropy.tools.bytes_into_human(delta_size) mytxt = "%s: %s" % ( blue(mysizetxt), bold(delta_human), ) entropy_client.output(mytxt, header=darkred(" @@ ")) if needed_size < 0: needed_size = -needed_size mytxt = "%s: %s %s" % ( blue(_("You need at least")), bold(entropy.tools.bytes_into_human(needed_size)), blue(_("of free space")), ) entropy_client.output( mytxt, header=darkred(" @@ ")) # check for disk space and print a warning target_dir = etpConst['entropyunpackdir'] while not os.path.isdir(target_dir): target_dir = os.path.dirname(target_dir) size_match = entropy.tools.check_required_space(target_dir, needed_size) if not size_match: mytxt = "%s: %s" % ( blue(_("You don't have enough space for " "the installation. Free some space into")), darkred(target_dir),) entropy_client.output( bold(_("Attention")), header=darkred(" !!! ")) entropy_client.output( bold(_("Attention")), header=darkred(" !!! ")) entropy_client.output( mytxt, header=darkred(" !!! ")) entropy_client.output( bold(_("Attention")), header=darkred(" !!! ")) entropy_client.output( bold(_("Attention")), header=darkred(" !!! "))
def _match_checksum(self, download_path, repository_id, checksum, signatures): """ Verify package checksum and return an exit status code. """ download_path_mtime = download_path + etpConst['packagemtimefileext'] misc_settings = self._entropy.ClientSettings()['misc'] enabled_hashes = misc_settings['packagehashes'] def do_mtime_validation(): enc = etpConst['conf_encoding'] try: with codecs.open(download_path_mtime, "r", encoding=enc) as mt_f: stored_mtime = mt_f.read().strip() except (OSError, IOError) as err: if err.errno != errno.ENOENT: raise return 1 try: cur_mtime = str(os.path.getmtime(download_path)) except (OSError, IOError) as err: if err.errno != errno.ENOENT: raise return 2 if cur_mtime == stored_mtime: return 0 return 1 def do_store_mtime(): enc = etpConst['conf_encoding'] try: with codecs.open(download_path_mtime, "w", encoding=enc) as mt_f: cur_mtime = str(os.path.getmtime(download_path)) mt_f.write(cur_mtime) except (OSError, IOError) as err: if err.errno != errno.ENOENT: raise def do_compare_gpg(pkg_path, hash_val): try: repo_sec = self._entropy.RepositorySecurity() except RepositorySecurity.GPGServiceNotAvailable: return None # check if we have repository pubkey try: if not repo_sec.is_pubkey_available(repository_id): return None except repo_sec.KeyExpired: # key is expired return None # write gpg signature to disk for verification tmp_fd, tmp_path = const_mkstemp(prefix="do_compare_gpg") with os.fdopen(tmp_fd, "w") as tmp_f: tmp_f.write(hash_val) try: # actually verify valid, err_msg = repo_sec.verify_file(repository_id, pkg_path, tmp_path) finally: os.remove(tmp_path) if valid: return True if err_msg: self._entropy.output("%s: %s, %s" % ( darkred(_("Package signature verification error for")), purple("GPG"), err_msg, ), importance=0, level="error", header=darkred(" ## ")) return False signature_vry_map = { 'sha1': entropy.tools.compare_sha1, 'sha256': entropy.tools.compare_sha256, 'sha512': entropy.tools.compare_sha512, 'gpg': do_compare_gpg, } def do_signatures_validation(signatures): # check signatures, if available if isinstance(signatures, dict): for hash_type in sorted(signatures): hash_val = signatures[hash_type] # NOTE: workaround bug on unreleased # entropy versions if hash_val in signatures: continue if hash_val is None: continue if hash_type not in enabled_hashes: self._entropy.output("%s %s" % ( purple(hash_type.upper()), darkgreen(_("disabled")), ), importance=0, level="info", header=" : ") continue cmp_func = signature_vry_map.get(hash_type) if cmp_func is None: continue down_name = os.path.basename(download_path) valid = cmp_func(download_path, hash_val) if valid is None: self._entropy.output("[%s] %s '%s' %s" % ( brown(down_name), darkred(_("Package signature verification")), purple(hash_type.upper()), darkred(_("temporarily unavailable")), ), importance=0, level="warning", header=darkred(" ## ")) continue if not valid: self._entropy.output("[%s] %s: %s %s" % ( brown(down_name), darkred(_("Package signature")), purple(hash_type.upper()), darkred(_("does not match the recorded one")), ), importance=0, level="error", header=darkred(" ## ")) return 1 self._entropy.output("[%s] %s %s" % ( brown(down_name), purple(hash_type.upper()), darkgreen(_("validated")), ), importance=0, level="info", header=" : ") return 0 self._entropy.output(blue(_("Checking package checksum...")), importance=0, level="info", header=red(" ## ")) download_name = os.path.basename(download_path) valid_checksum = False try: valid_checksum = entropy.tools.compare_md5(download_path, checksum) except (OSError, IOError) as err: valid_checksum = False const_debug_write( __name__, "_match_checksum: %s checksum validation error: %s" % (download_path, err)) txt = "%s: %s, %s" % ( red(_("Checksum validation error")), blue(download_name), err, ) self._entropy.output(txt, importance=1, level="error", header=darkred(" ## ")) return 1 if not valid_checksum: txt = "%s: %s" % ( red(_("Invalid checksum")), blue(download_name), ) self._entropy.output(txt, importance=1, level="warning", header=red(" ## ")) return 1 # check if package has been already checked validated = True if do_mtime_validation() != 0: validated = do_signatures_validation(signatures) == 0 if not validated: txt = "%s: %s" % ( red(_("Invalid signatures")), blue(download_name), ) self._entropy.output(txt, importance=1, level="warning", header=red(" ## ")) return 1 do_store_mtime() return 0
def _transceive(self, uri): fine = set() broken = set() fail = False crippled_uri = EntropyTransceiver.get_uri_name(uri) action = 'push' if self.download: action = 'pull' elif self.remove: action = 'remove' try: txc = EntropyTransceiver(uri) if const_isnumber(self.speed_limit): txc.set_speed_limit(self.speed_limit) txc.set_output_interface(self._entropy) except TransceiverConnectionError: print_traceback() return True, fine, broken # issues maxcount = len(self.myfiles) counter = 0 with txc as handler: for mypath in self.myfiles: base_dir = self.txc_basedir if isinstance(mypath, tuple): if len(mypath) < 2: continue base_dir, mypath = mypath if not handler.is_dir(base_dir): handler.makedirs(base_dir) mypath_fn = os.path.basename(mypath) remote_path = os.path.join(base_dir, mypath_fn) syncer = handler.upload myargs = (mypath, remote_path) if self.download: syncer = handler.download local_path = os.path.join(self.local_basedir, mypath_fn) myargs = (remote_path, local_path) elif self.remove: syncer = handler.delete myargs = (remote_path, ) fallback_syncer, fallback_args = None, None # upload -> remote copy herustic support # if a package file might have been already uploaded # to remote mirror, try to look in other repositories' # package directories if a file, with the same md5 and name # is already available. In this case, use remote copy instead # of upload to save bandwidth. if self._copy_herustic and (syncer == handler.upload): # copy herustic support enabled # we are uploading new_syncer, new_args = self._copy_herustic_support( handler, mypath, base_dir, remote_path) if new_syncer is not None: fallback_syncer, fallback_args = syncer, myargs syncer, myargs = new_syncer, new_args action = "copy" counter += 1 tries = 0 done = False lastrc = None while tries < 5: tries += 1 self._entropy.output("[%s|#%s|(%s/%s)] %s: %s" % ( blue(crippled_uri), darkgreen(str(tries)), blue(str(counter)), bold(str(maxcount)), blue(action), red(os.path.basename(mypath)), ), importance=0, level="info", header=red(" @@ ")) rc = syncer(*myargs) if (not rc) and (fallback_syncer is not None): # if we have a fallback syncer, try it first # before giving up. rc = fallback_syncer(*myargs) if rc and not (self.download or self.remove): remote_md5 = handler.get_md5(remote_path) rc = self.handler_verify_upload(mypath, uri, counter, maxcount, tries, remote_md5=remote_md5) if rc: self._entropy.output("[%s|#%s|(%s/%s)] %s %s: %s" % ( blue(crippled_uri), darkgreen(str(tries)), blue(str(counter)), bold(str(maxcount)), blue(action), _("successful"), red(os.path.basename(mypath)), ), importance=0, level="info", header=darkgreen(" @@ ")) done = True fine.add(uri) break else: self._entropy.output("[%s|#%s|(%s/%s)] %s %s: %s" % ( blue(crippled_uri), darkgreen(str(tries)), blue(str(counter)), bold(str(maxcount)), blue(action), brown(_("failed, retrying")), red(os.path.basename(mypath)), ), importance=0, level="warning", header=brown(" @@ ")) lastrc = rc continue if not done: self._entropy.output("[%s|(%s/%s)] %s %s: %s - %s: %s" % ( blue(crippled_uri), blue(str(counter)), bold(str(maxcount)), blue(action), darkred("failed, giving up"), red(os.path.basename(mypath)), _("error"), lastrc, ), importance=1, level="error", header=darkred(" !!! ")) if mypath not in self.critical_files: self._entropy.output("[%s|(%s/%s)] %s: %s, %s..." % ( blue(crippled_uri), blue(str(counter)), bold(str(maxcount)), blue(_("not critical")), os.path.basename(mypath), blue(_("continuing")), ), importance=1, level="warning", header=brown(" @@ ")) continue fail = True broken.add((uri, lastrc)) # next mirror break return fail, fine, broken
def _print_advisory_information(self, entropy_client, advisory_data, key): """ Print Security Advisory. """ toc = [] # print advisory code toc.append( blue(" @@ ") + \ red("%s " % (_("Advisory Identifier"),)) + bold(key) + \ red(" | ")+blue(advisory_data['url'])) # title toc.append((darkgreen(" %s:" % (_("Title"),)), darkred(advisory_data['title']))) # description description = advisory_data['description'].split("\n") desc_text = darkgreen(" %s:" % (_("Description"),) ) for x in description: toc.append((desc_text, x.strip())) desc_text = " " for item in advisory_data['description_items']: desc_text = " %s " % (darkred("(*)"),) count = 8 mystr = [] for word in item.split(): count -= 1 mystr.append(word) if count < 1: toc.append((" ", desc_text+' '.join(mystr))) desc_text = " " mystr = [] count = 8 if count < 8: toc.append((" ", desc_text+' '.join(mystr))) # background if advisory_data['background']: background = advisory_data['background'].split("\n") bg_text = darkgreen(" %s:" % (_("Background"),)) for x in background: toc.append((bg_text, purple(x.strip()))) bg_text = " " # access if advisory_data['access']: toc.append((darkgreen(" %s:" % (_("Exploitable"),)), bold(advisory_data['access']))) # impact if advisory_data['impact']: impact = advisory_data['impact'].split("\n") imp_text = darkgreen(" %s:" % (_("Impact"),)) for x in impact: toc.append((imp_text, brown(x.strip()))) imp_text = " " # impact type if advisory_data['impacttype']: toc.append((darkgreen(" %s:" % (_("Impact type"),)), bold(advisory_data['impacttype']))) # revised if advisory_data['revised']: toc.append((darkgreen(" %s:" % (_("Revised"),)), brown(advisory_data['revised']))) # announced if advisory_data['announced']: toc.append((darkgreen(" %s:" % (_("Announced"),)), brown(advisory_data['announced']))) # synopsis synopsis = advisory_data['synopsis'].split("\n") syn_text = darkgreen(" %s:" % (_("Synopsis"),)) for x in synopsis: toc.append((syn_text, x.strip())) syn_text = " " # references if advisory_data['references']: toc.append(darkgreen(" %s:" % (_("References"),))) for reference in advisory_data['references']: toc.append((" ", darkblue(reference))) # gentoo bugs if advisory_data['bugs']: toc.append(darkgreen(" %s:" % (_("Upstream bugs"),))) for bug in advisory_data['bugs']: toc.append((" ", darkblue(bug))) # affected if advisory_data['affected']: toc.append(darkgreen(" %s:" % (_("Affected"),))) for key in advisory_data['affected']: toc.append((" ", darkred(key))) affected_data = advisory_data['affected'][key][0] vul_vers = affected_data['vul_vers'] unaff_vers = affected_data['unaff_vers'] if vul_vers: toc.append((" ", brown("%s: " % ( _("vulnerable versions"),))+", ".join(vul_vers))) if unaff_vers: toc.append((" ", brown("%s: " % ( _("unaffected versions"),))+", ".join(unaff_vers))) # workaround workaround = advisory_data['workaround'].split("\n") if advisory_data['workaround']: work_text = darkgreen(" %s:" % (_("Workaround"),)) for x in workaround: toc.append((work_text, darkred(x.strip()))) work_text = " " # resolution if advisory_data['resolution']: res_text = darkgreen(" %s:" % (_("Resolution"),)) resolutions = advisory_data['resolution'] for resolution in resolutions: for x in resolution.split("\n"): toc.append((res_text, x.strip())) res_text = " " print_table(entropy_client, toc, cell_spacing=3)
def _apply_source(self, entropy_client, inst_repo, source_key, packages, pretend, ignore_missing, multiple_versions): source = etpConst['install_sources'][source_key] reverse_install_sources = { 0: _("unknown"), 1: _("manual"), 2: _("dependency") } other_source = _("other") source_txt = reverse_install_sources.get(source, other_source) packages = entropy_client.packages_expand(packages) package_ids = {} allfound = True for package in packages: if package in package_ids: continue pkg_ids, _rc = inst_repo.atomMatch(package, multiMatch=multiple_versions) if not multiple_versions: pkg_ids = set([pkg_ids]) if _rc == 0: package_ids[package] = pkg_ids else: allfound = False entropy_client.output("!!! %s: %s %s." % ( purple(_("Warning")), teal(const_convert_to_unicode(package)), purple(_("is not installed")), )) if not allfound: entropy_client.output(darkred(_("Some packages were not found")), level="info" if ignore_missing else "error", importance=1) if not ignore_missing: return 1 for package in packages: if package not in package_ids: # Package was not found. continue for pkg_id in package_ids[package]: pkg_atom = inst_repo.retrieveAtom(pkg_id) current_source = inst_repo.getInstalledPackageSource(pkg_id) current_source_txt = reverse_install_sources.get( current_source, other_source) if current_source == source: txt = "%s: %s" % (brown(pkg_atom), _("no change")) entropy_client.output(txt, header=blue(" @@ ")) else: txt = "%s: %s => %s" % (brown(pkg_atom), current_source_txt, source_txt) entropy_client.output(txt, header=red(" !! ")) if not pretend: inst_repo.setInstalledPackageSource(pkg_id, source) return 0
def print_package_info(package_id, entropy_client, entropy_repository, installed_search = False, strict_output = False, extended = False, quiet = False, show_download_if_quiet = False, show_repo_if_quiet = False, show_desc_if_quiet = False, show_slot_if_quiet = False): """ Print Entropy Package Metadata in a pretty and uniform way. """ if quiet: atom = entropy_repository.retrieveAtom(package_id) if atom is None: return if not extended: atom = entropy.dep.dep_getkey(atom) repoinfo = "" if show_repo_if_quiet: repoinfo = "[%s] " % (entropy_repository.repository_id(),) desc = "" if show_desc_if_quiet: pkgdesc = entropy_repository.retrieveDescription(package_id) if pkgdesc is None: return desc = " %s" % (pkgdesc,) download = "" if show_download_if_quiet: pkgdown = entropy_repository.retrieveDownloadURL(package_id) if pkgdown is None: return download = " %s" % (pkgdown,) if show_slot_if_quiet: pkgslot = entropy_repository.retrieveSlot(package_id) if pkgslot is None: return atom += etpConst['entropyslotprefix'] atom += pkgslot entropy_client.output( "%s%s%s%s" % (repoinfo, atom, desc, download,), level="generic") return corrupted_str = _("n/a") pkgatom = entropy_repository.retrieveAtom(package_id) or corrupted_str pkghome = entropy_repository.retrieveHomepage(package_id) if pkghome is None: pkghome = corrupted_str pkgslot = entropy_repository.retrieveSlot(package_id) if pkgslot is None: pkgslot = corrupted_str pkgver = entropy_repository.retrieveVersion(package_id) if pkgver is None: pkgver = corrupted_str pkgtag = entropy_repository.retrieveTag(package_id) if pkgtag is None: pkgtag = corrupted_str pkgrev = entropy_repository.retrieveRevision(package_id) if pkgrev is None: pkgrev = 0 pkgdesc = entropy_repository.retrieveDescription(package_id) if pkgdesc is None: pkgdesc = corrupted_str pkgbranch = entropy_repository.retrieveBranch(package_id) if pkgbranch is None: pkgbranch = corrupted_str if not pkgtag: pkgtag = "NoTag" installed_ver = _("Not installed") installed_tag = _("n/a") installed_rev = _("n/a") if not installed_search: inst_repo = entropy_client.installed_repository() pkginstalled = inst_repo.atomMatch( entropy.dep.dep_getkey(pkgatom), matchSlot = pkgslot) if pkginstalled[1] == 0: idx = pkginstalled[0] installed_ver = inst_repo.retrieveVersion(idx) if installed_ver is None: installed_ver = corrupted_str installed_tag = inst_repo.retrieveTag(idx) if not installed_tag: installed_tag = "NoTag" installed_rev = inst_repo.retrieveRevision(idx) if installed_rev is None: installed_rev = const_convert_to_unicode("0") else: installed_rev = const_convert_to_unicode(installed_rev) toc = [] entropy_client.output(red(" @@ %s: " % (_("Package"),) ) + \ bold(pkgatom) + \ " "+ blue("%s: " % (_("branch"),)) + bold(pkgbranch) + \ ", [" + purple(str(entropy_repository.repository_id())) + "] ") if not strict_output and extended: pkgname = entropy_repository.retrieveName(package_id) if pkgname is None: pkgname = corrupted_str pkgcat = entropy_repository.retrieveCategory(package_id) if pkgcat is None: pkgcat = corrupted_str toc.append((darkgreen(" %s:" % (_("Category"),)), blue(pkgcat))) toc.append((darkgreen(" %s:" % (_("Name"),)), blue(pkgname))) if extended: pkgmasked = False masking_reason = "" package_id_masked, idmasking_reason = entropy_repository.maskFilter( package_id) if package_id_masked == -1: pkgmasked = True masking_reason = ", %s" % ( entropy_client.Settings()['pkg_masking_reasons'].get( idmasking_reason),) toc.append((darkgreen(" %s:" % (_("Masked"),)), blue(str(pkgmasked)) + masking_reason,)) avail_str = _("Available") if installed_search: avail_str = _("Installed") toc.append(( darkgreen(" %s:" % (avail_str,)), blue("%s: " % (_("version"),) ) + bold(pkgver) + blue(" ~ tag: ") + \ bold(pkgtag) + blue(" ~ %s: " % (_("revision"),) ) + bold(str(pkgrev)),) ) if not installed_search: toc.append((darkgreen(" %s:" % (_("Installed"),) ), blue("%s: " % (_("version"),) ) + bold(installed_ver) + \ blue(" ~ tag: ") + bold(installed_tag) + \ blue(" ~ %s: " % (_("revision"),) ) + bold(installed_rev),)) if not strict_output: toc.append((darkgreen(" %s:" % (_("Slot"),) ), blue(pkgslot),)) if extended: pkgsize = entropy_repository.retrieveSize(package_id) pkgsize = entropy.tools.bytes_into_human(pkgsize) pkgbin = entropy_repository.retrieveDownloadURL(package_id) if pkgbin is None: pkgbin = corrupted_str pkgdigest = entropy_repository.retrieveDigest(package_id) if pkgdigest is None: pkgdigest = corrupted_str pkgsign = entropy_repository.retrieveSignatures(package_id) pkgdeps = entropy_repository.retrieveDependencies(package_id, extended = True, resolve_conditional_deps = False) pkgconflicts = entropy_repository.retrieveConflicts(package_id) depsorter = lambda x: entropy.dep.dep_getcpv(x[0]) toc.append((darkgreen(" %s:" % (_("Size"),) ), blue(pkgsize),)) toc.append((darkgreen(" %s:" % (_("Download"),) ), brown(pkgbin),)) toc.append((darkgreen(" %s:" % (_("Checksum"),) ), brown(pkgdigest),)) if pkgsign: sha1, sha256, sha512, gpg = pkgsign if not sha1: sha1 = _("N/A") if not sha256: sha256 = _("N/A") toc.append((darkgreen(" %s:" % (_("SHA1"),) ), brown(sha1),)) toc.append((darkgreen(" %s:" % (_("SHA256"),) ), brown(sha256),)) if gpg: gpg_str = _("Yes") else: gpg_str = _("No") toc.append((darkgreen(" %s:" % (_("GPG"),) ), brown(gpg_str),)) if pkgdeps: toc.append(darkred(" ##") + " " + \ darkgreen("%s:" % (_("Dependencies"),) )) for pdep, p_id in sorted(pkgdeps, key = depsorter): toc.append((" %s " % (brown("##"),), "%s%s%s %s" % (blue("["), p_id, blue("]"), brown(pdep),))) len_txt = " %s" % (brown("##"),) toc.append((len_txt, "%s:" % (blue(_("Legend")),),)) dep_leg = show_dependencies_legend(entropy_client, indent = "", get_data = True) toc.extend([(len_txt, x) for x in dep_leg]) if pkgconflicts: toc.append(darkred(" ##") + " " + \ darkgreen("%s:" % (_("Conflicts"),) )) for conflict in sorted(pkgconflicts, key = depsorter): toc.append((" %s" % (darkred("##"),), brown(conflict),)) home_txt = " %s:" % (_("Homepage"),) home_lines = _formatted_print( entropy_client, pkghome, "", "", color = brown, min_chars = 15, get_data = True) for home_line in home_lines: toc.append((darkgreen(home_txt), home_line,)) home_txt = " " if not strict_output: desc_txt = " %s:" % (_("Description"),) desc_lines = _formatted_print( entropy_client, pkgdesc, "", "", get_data = True) for desc_line in desc_lines: toc.append((darkgreen(desc_txt), purple(desc_line))) desc_txt = " " if extended: pkguseflags = entropy_repository.retrieveUseflags(package_id) use_txt = " %s:" % (_("USE flags"),) use_lines = _formatted_print( entropy_client, pkguseflags, "", "", color = teal, get_data = True) for use_line in use_lines: toc.append((darkgreen(use_txt), use_line)) use_txt = " " chost, cflags, cxxflags = \ entropy_repository.retrieveCompileFlags(package_id) sources = entropy_repository.retrieveSources(package_id) etpapi = entropy_repository.retrieveApi(package_id) if etpapi is None: etpapi = corrupted_str toc.append((darkgreen(" %s:" % (_("CHOST"),)), blue(chost))) toc.append((darkgreen(" %s:" % (_("CFLAGS"),)), blue(cflags))) toc.append((darkgreen(" %s:" % (_("CXXFLAGS"),)), blue(cxxflags))) if sources: sources_txt = " %s:" % (_("Sources"),) toc.append(darkgreen(sources_txt)) for source in sources: toc.append((" ", source,)) toc.append((darkgreen(" %s:" % (_("Entry API"),)), purple(str(etpapi)))) toc.append((darkgreen(" %s:" % (_("Compiled with"),)), blue(cflags))) pkgkeywords = ' '.join( sorted(entropy_repository.retrieveKeywords(package_id))) keyword_txt = " %s:" % (_("Keywords"),) keyword_lines = _formatted_print( entropy_client, pkgkeywords, "", "", color = brown, get_data = True) for keyword_line in keyword_lines: toc.append((darkgreen(keyword_txt), brown(keyword_line))) keyword_txt = " " mydate = entropy_repository.retrieveCreationDate(package_id) pkgcreatedate = _("n/a") if mydate: pkgcreatedate = \ entropy.tools.convert_unix_time_to_human_time( float(mydate)) toc.append((darkgreen(" %s:" % (_("Created"),)), purple(pkgcreatedate))) pkglic = entropy_repository.retrieveLicense(package_id) if pkglic is None: pkglic = corrupted_str toc.append((darkgreen(" %s:" % (_("License"),)), teal(pkglic))) print_table(entropy_client, toc, cell_spacing = 3)
def _download_file(self, url, download_path, digest=None, resume=True, package_id=None, repository_id=None): """ Internal method. Try to download the package file. """ def do_stfu_rm(xpath): try: os.remove(xpath) except OSError: pass def do_get_md5sum(path): try: return entropy.tools.md5sum(path) except IOError: return None except OSError: return None download_path_dir = os.path.dirname(download_path) try: os.makedirs(download_path_dir, 0o755) except OSError as err: if err.errno != errno.EEXIST: const_debug_write( __name__, "_download_file.makedirs, %s, error: %s" % (download_path_dir, err)) return -1, 0, False fetch_abort_function = self._meta.get('fetch_abort_function') existed_before = False if os.path.isfile(download_path) and os.path.exists(download_path): existed_before = True avail_data = self._settings['repositories']['available'] repo_data = avail_data[self._repository_id] basic_user = repo_data.get('username') basic_pwd = repo_data.get('password') https_validate_cert = not repo_data.get( 'https_validate_cert') == "false" fetch_intf = self._entropy._url_fetcher( url, download_path, resume=resume, abort_check_func=fetch_abort_function, http_basic_user=basic_user, http_basic_pwd=basic_pwd, https_validate_cert=https_validate_cert) if (package_id is not None) and (repository_id is not None): self._setup_differential_download(self._entropy._url_fetcher, url, resume, download_path, repository_id, package_id) data_transfer = 0 resumed = False try: # make sure that we don't need to abort already # doing the check here avoids timeouts if fetch_abort_function != None: fetch_abort_function() fetch_checksum = fetch_intf.download() data_transfer = fetch_intf.get_transfer_rate() resumed = fetch_intf.is_resumed() except (KeyboardInterrupt, InterruptError): return -100, data_transfer, resumed except NameError: raise except: if const_debug_enabled(): self._entropy.output("fetch_file:", importance=1, level="warning", header=red(" ## ")) entropy.tools.print_traceback() if (not existed_before) or (not resume): do_stfu_rm(download_path) return -1, data_transfer, resumed if fetch_checksum == UrlFetcher.GENERIC_FETCH_ERROR: # !! not found # maybe we already have it? # this handles the case where network is unavailable # but file is already downloaded fetch_checksum = do_get_md5sum(download_path) if (fetch_checksum != digest) or fetch_checksum is None: return -3, data_transfer, resumed elif fetch_checksum == UrlFetcher.TIMEOUT_FETCH_ERROR: # maybe we already have it? # this handles the case where network is unavailable # but file is already downloaded fetch_checksum = do_get_md5sum(download_path) if (fetch_checksum != digest) or fetch_checksum is None: return -4, data_transfer, resumed if digest and (fetch_checksum != digest): # not properly downloaded if (not existed_before) or (not resume): do_stfu_rm(download_path) return -2, data_transfer, resumed return 0, data_transfer, resumed
def _download_package(self, package_id, repository_id, download, download_path, checksum, resume=True): avail_data = self._settings['repositories']['available'] excluded_data = self._settings['repositories']['excluded'] repo = self._entropy.open_repository(repository_id) # grab original repo, if any and use it if available # this is done in order to support "equo repo merge" feature # allowing client-side repository package metadata moves. original_repo = repo.getInstalledPackageRepository(package_id) if (original_repo != repository_id) and ( original_repo not in avail_data) and (original_repo is not None): # build up a new uris list, at least try, hoping that # repository is just shadowing original_repo # for example: original_repo got copied to repository, without # copying packages, which would be useless. like it happens # with sabayon-weekly uris = self._build_uris_list(original_repo, repository_id) else: if original_repo in avail_data: uris = avail_data[original_repo]['packages'][::-1] if repository_id in avail_data: uris += avail_data[repository_id]['packages'][::-1] elif original_repo in excluded_data: uris = excluded_data[original_repo]['packages'][::-1] if repository_id in avail_data: uris += avail_data[repository_id]['packages'][::-1] else: uris = avail_data[repository_id]['packages'][::-1] remaining = set(uris) mirror_status = StatusInterface() mirrorcount = 0 for uri in uris: if not remaining: # tried all the mirrors, quitting for error mirror_status.set_working_mirror(None) return 3 mirror_status.set_working_mirror(uri) mirrorcount += 1 mirror_count_txt = "( mirror #%s ) " % (mirrorcount, ) url = uri + "/" + download # check if uri is sane if mirror_status.get_failing_mirror_status(uri) >= 30: # ohohoh! # set to 30 for convenience mirror_status.set_failing_mirror_status(uri, 30) mytxt = mirror_count_txt mytxt += blue(" %s: ") % (_("Mirror"), ) mytxt += red(self._get_url_name(uri)) mytxt += " - %s." % (_("maximum failure threshold reached"), ) self._entropy.output(mytxt, importance=1, level="warning", header=red(" ## ")) if mirror_status.get_failing_mirror_status(uri) == 30: # put to 75 then decrement by 4 so we # won't reach 30 anytime soon ahahaha mirror_status.add_failing_mirror(uri, 45) elif mirror_status.get_failing_mirror_status(uri) > 31: # now decrement each time this point is reached, # if will be back < 30, then equo will try to use it again mirror_status.add_failing_mirror(uri, -4) else: # put to 0 - reenable mirror, welcome back uri! mirror_status.set_failing_mirror_status(uri, 0) remaining.discard(uri) continue do_resume = resume timeout_try_count = 50 while True: txt = mirror_count_txt txt += blue("%s: ") % (_("Downloading from"), ) txt += red(self._get_url_name(uri)) self._entropy.output(txt, importance=1, level="warning", header=red(" ## ")) resumed = False exit_st, data_transfer = self._try_edelta_fetch( url, download_path, checksum, do_resume) if exit_st > 0: # fallback to package file download exit_st, data_transfer, resumed = self._download_file( url, download_path, package_id=package_id, repository_id=repository_id, digest=checksum, resume=do_resume) if exit_st == 0: txt = mirror_count_txt txt += "%s: " % (blue(_("Successfully downloaded from")), ) txt += red(self._get_url_name(uri)) human_bytes = entropy.tools.bytes_into_human(data_transfer) txt += " %s %s/%s" % ( _("at"), human_bytes, _("second"), ) self._entropy.output(txt, importance=1, level="info", header=red(" ## ")) mirror_status.set_working_mirror(None) return 0 elif resumed and (exit_st not in ( -3, -4, -100, )): do_resume = False continue error_message = mirror_count_txt error_message += blue("%s: %s") % ( _("Error downloading from"), red(self._get_url_name(uri)), ) # something bad happened if exit_st == -1: error_message += " - %s." % ( _("file not available on this mirror"), ) elif exit_st == -2: mirror_status.add_failing_mirror(uri, 1) error_message += " - %s." % (_("wrong checksum"), ) # If file is fetched (with no resume) and its # complete better to enforce resume to False. if (data_transfer < 1) and do_resume: error_message += " %s." % (_("Disabling resume"), ) do_resume = False continue elif exit_st == -3: mirror_status.add_failing_mirror(uri, 3) error_message += " - %s." % (_("not found"), ) elif exit_st == -4: # timeout! timeout_try_count -= 1 if timeout_try_count > 0: error_message += " - %s." % ( _("timeout, retrying on this mirror"), ) else: error_message += " - %s." % (_("timeout, giving up"), ) elif exit_st == -100: error_message += " - %s." % (_("discarded download"), ) else: mirror_status.add_failing_mirror(uri, 5) error_message += " - %s." % (_("unknown reason"), ) self._entropy.output(error_message, importance=1, level="warning", header=red(" ## ")) if exit_st == -4: # timeout if timeout_try_count > 0: continue elif exit_st == -100: # user discarded fetch mirror_status.set_working_mirror(None) return 1 remaining.discard(uri) # make sure we don't have nasty issues if not remaining: mirror_status.set_working_mirror(None) return 3 break mirror_status.set_working_mirror(None) return 0
def download(self, remote_path, save_path): self.__connect_if_not() path = os.path.join(self.__ftpdir, remote_path) tmp_save_path = save_path + EntropyUriHandler.TMP_TXC_FILE_EXT def writer(buf): # writing file buffer f.write(buf) self._commit_buffer_update(len(buf)) self._update_speed() self._update_progress() self._speed_limit_loop() tries = 10 while tries: tries -= 1 self._init_vars() self.__filekbcount = 0 rc = '' try: # get the file size self.__filesize = self._get_file_size_compat(path) if (self.__filesize): self.__filesize = round( float(int(self.__filesize)) / 1000, 1) if (self.__filesize == 0): self.__filesize = 1 elif not self.is_path_available(path): return False else: self.__filesize = 0 with open(tmp_save_path, "wb") as f: rc = self.__ftpconn.retrbinary('RETR ' + path, writer, 8192) self._update_progress(force=True) done = rc.find("226") != -1 if done: # download complete, atomic mv os.rename(tmp_save_path, save_path) except (IOError, self.ftplib.error_reply, socket.error) as e: # connection reset by peer print_traceback() mytxt = red("%s: %s, %s... #%s") % ( _("Download issue"), repr(e), _("retrying"), tries + 1, ) self.output(mytxt, importance=1, level="warning", header=" ") self._reconnect() # reconnect continue finally: if os.path.isfile(tmp_save_path): os.remove(tmp_save_path) return done
def _remove_content_from_system(self, installed_repository, remove_atom, remove_config, sys_root, protect_mask, removecontent_file, automerge_metadata, affected_directories, affected_infofiles, preserved_mgr): """ Remove installed package content (files/directories) from live system. @keyword automerge_metadata: Entropy "automerge metadata" @type automerge_metadata: dict """ # load CONFIG_PROTECT and CONFIG_PROTECT_MASK misc_settings = self._entropy.ClientSettings()['misc'] col_protect = misc_settings['collisionprotect'] # remove files from system directories = set() directories_cache = set() not_removed_due_to_collisions = set() colliding_path_messages = set() if protect_mask is not None: protect, mask = protect_mask else: protect, mask = set(), set() protectskip = self._get_config_protect_skip() remove_content = None try: # simulate a removecontent list/set object remove_content = [] if removecontent_file is not None: remove_content = Content.FileContentReader(removecontent_file) self._remove_content_from_system_loop( installed_repository, remove_atom, remove_content, remove_config, affected_directories, affected_infofiles, directories, directories_cache, preserved_mgr, not_removed_due_to_collisions, colliding_path_messages, automerge_metadata, col_protect, protect, mask, protectskip, sys_root) finally: if hasattr(remove_content, "close"): remove_content.close() if colliding_path_messages: self._entropy.output("%s:" % (_("Collision found during removal of"), ), importance=1, level="warning", header=red(" ## ")) for path in sorted(colliding_path_messages): self._entropy.output(purple(path), importance=0, level="warning", header=red(" ## ")) self._entropy.logger.log( "[Package]", etpConst['logging']['normal_loglevel_id'], "Collision found during removal of %s - cannot overwrite" % (path, )) # removing files not removed from removecontent. # it happened that boot services not removed due to # collisions got removed from their belonging runlevels # by postremove step. # since this is a set, it is a mapped type, so every # other instance around will feature this update if not_removed_due_to_collisions: def _filter(_path): return _path not in not_removed_due_to_collisions Content.filter_content_file(removecontent_file, _filter) # now handle directories directories = sorted(directories, reverse=True) while True: taint = False for directory, dirtype in directories: mydir = "%s%s" % ( sys_root, directory, ) if dirtype == "link": try: mylist = os.listdir(mydir) if not mylist: try: os.remove(mydir) taint = True except OSError: pass except OSError: pass elif dirtype == "dir": try: mylist = os.listdir(mydir) if not mylist: try: os.rmdir(mydir) taint = True except OSError: pass except OSError: pass if not taint: break
def _run_sync(self): self.updated = False sts = EntropyRepositoryBase for repo in self.repo_ids: try: status = self._entropy.get_repository(repo).update( self._entropy, repo, self.force, self._gpg_feature) except PermissionDenied: status = sts.REPOSITORY_PERMISSION_DENIED_ERROR if status == sts.REPOSITORY_ALREADY_UPTODATE: self.already_updated = True elif status == sts.REPOSITORY_NOT_AVAILABLE: self.not_available += 1 elif status == sts.REPOSITORY_UPDATED_OK: self.updated = True self.updated_repos.add(repo) elif status == sts.REPOSITORY_PERMISSION_DENIED_ERROR: self.not_available += 1 self.sync_errors = True else: # fallback self.not_available += 1 if status == sts.REPOSITORY_UPDATED_OK: # execute post update repo hook self._run_post_update_repository_hook(repo) # keep them closed, but trigger schema updates self._entropy.close_repositories() self._entropy._validate_repositories() self._entropy.reopen_installed_repository() self._entropy.close_repositories() # clean caches, fetch security if self.updated: self._entropy.clear_cache() if self.fetch_security: self._update_security_advisories() for repo in self.repo_ids: try: dbc = self._entropy.open_repository(repo) except RepositoryError: # download failed and repo is not available, skip! continue try: self._entropy.repository_packages_spm_sync(repo, dbc) except Error: # EntropyRepository error, missing table? continue self._entropy.close_repositories() if self.sync_errors: self._entropy.output( red(_("Something bad happened. Please have a look.")), importance = 1, level = "warning", header = darkred(" @@ ") ) self.sync_errors = True return 128 if self.updated: pkgs = self._entropy.clean_downloaded_packages(dry_run = True) number_of_pkgs = len(pkgs) if number_of_pkgs > 0: pkgs_size = entropy.tools.sum_file_sizes(pkgs) if pkgs_size > self._pkg_size_warning_th: self.need_packages_cleanup = True pkg_dirs = set((os.path.dirname(x) for x in pkgs)) human_size = entropy.tools.bytes_into_human(pkgs_size) mytxt = "%s: %s %s %s." % ( teal("Packages"), purple(ngettext("there is", "there are", number_of_pkgs)), brown(str(number_of_pkgs)), purple(ngettext("package file that could be removed", "package files that could be removed", number_of_pkgs)), ) self._entropy.output( mytxt, importance = 1, level = "info", header = bold(" !!! ") ) mytxt = "%s %s. %s:" % ( teal("They are taking up to"), brown(human_size), purple(_("Packages are stored in")), ) self._entropy.output( mytxt, importance = 1, level = "info", header = bold(" !!! ") ) for pkg_dir in pkg_dirs: self._entropy.output( brown(pkg_dir), importance = 1, level = "info", header = bold(" ") ) return 0
def _run(self, entropy_client): """ Mask/Unmask code logic. """ found_pkg_atoms = [] for package in self._packages: package_id, repoid = entropy_client.atom_match( package, mask_filter = False) if package_id == -1: mytxt = "!!! %s: %s %s." % ( purple(_("Warning")), teal(const_convert_to_unicode(package)), purple(_("is not available")), ) entropy_client.output("!!!", level="warning", importance=1) entropy_client.output(mytxt, level="warning", importance=1) entropy_client.output("!!!", level="warning", importance=1) if len(package) > 3: self._show_did_you_mean( entropy_client, package, from_installed=False) entropy_client.output("!!!", level="warning", importance=1) continue found_pkg_atoms.append(package) if not found_pkg_atoms: entropy_client.output( "%s." % ( darkred(_("No packages found")), ), level="error", importance=1) return 1 if self._ask or self._pretend: mytxt = "%s:" % ( blue(_("These are the packages that would be handled")), ) entropy_client.output( mytxt, header=red(" @@ ")) match_data = {} for package in found_pkg_atoms: matches, rc = entropy_client.atom_match( package, multi_match = True, multi_repo = True, mask_filter = False) match_data[package] = matches flags = darkgreen(" [") if self._action == "mask": flags += brown("M") else: flags += red("U") flags += darkgreen("] ") entropy_client.output( darkred(" ##") + flags + purple(package)) if rc == 0: # also show found pkgs for package_id, repository_id in matches: repo = entropy_client.open_repository(repository_id) atom = repo.retrieveAtom(package_id) entropy_client.output( " -> " + enlightenatom(atom)) if self._pretend: return 0 if self._ask: answer = entropy_client.ask_question( _("Would you like to continue?")) if answer == _("No"): return 0 for package, matches in match_data.items(): for match in matches: if self._action == "mask": done = entropy_client.mask_package_generic(match, package) else: done = entropy_client.unmask_package_generic(match, package) if not done: mytxt = "!!! %s: %s %s." % ( purple(_("Warning")), teal(const_convert_to_unicode(package)), purple(_("action not executed")), ) entropy_client.output("!!!", level="warning", importance=1) entropy_client.output(mytxt, level="warning", importance=1) entropy_client.output("!!!", level="warning", importance=1) entropy_client.output("Have a nice day.") return 0
def repository_packages_spm_sync(self, repository_identifier, repo_db, force=False): """ Service method used to sync package names with Source Package Manager via metadata stored in Repository dbs collected at server-time. Source Package Manager can change package names, categories or slot and Entropy repositories must be kept in sync. In other words, it checks for /usr/portage/profiles/updates changes, of course indirectly, since there is no way entropy.client can directly depend on Portage. @param repository_identifier: repository identifier which repo_db parameter is bound @type repository_identifier: string @param repo_db: repository database instance @type repo_db: entropy.db.EntropyRepository @return: bool stating if changes have been made @rtype: bool """ inst_repo = self.installed_repository() if not inst_repo: # nothing to do if client db is not availabe return False self._treeupdates_repos.add(repository_identifier) do_rescan = False shell_rescan = os.getenv("ETP_TREEUPDATES_RESCAN") if shell_rescan: do_rescan = True # check database digest stored_digest = repo_db.retrieveRepositoryUpdatesDigest( repository_identifier) if stored_digest == -1: do_rescan = True # check stored value in client database client_digest = "0" if not do_rescan: client_digest = \ inst_repo.retrieveRepositoryUpdatesDigest( repository_identifier) if do_rescan or (str(stored_digest) != str(client_digest)) or force: # reset database tables inst_repo.clearTreeupdatesEntries(repository_identifier) # load updates update_actions = repo_db.retrieveTreeUpdatesActions( repository_identifier) # now filter the required actions update_actions = inst_repo.filterTreeUpdatesActions(update_actions) if update_actions: mytxt = "%s: %s." % ( bold(_("ATTENTION")), red(_("forcing packages metadata update")), ) self.output(mytxt, importance=1, level="info", header=darkred(" * ")) mytxt = "%s %s." % ( red(_("Updating system database using repository")), blue(repository_identifier), ) self.output(mytxt, importance=1, level="info", header=darkred(" * ")) # run stuff inst_repo.runTreeUpdatesActions(update_actions) # store new digest into database inst_repo.setRepositoryUpdatesDigest(repository_identifier, stored_digest) # store new actions inst_repo.addRepositoryUpdatesActions( InstalledPackagesRepository.NAME, update_actions, self._settings['repositories']['branch']) inst_repo.commit() # clear client cache inst_repo.clearCache() return True
def _hop(self, entropy_client, inst_repo): """ Solo Hop command. """ settings = entropy_client.Settings() # set the new branch if self._branch == settings['repositories']['branch']: mytxt = "%s: %s" % ( darkred(_("Already on branch")), purple(self._branch), ) entropy_client.output(mytxt, level="warning", importance=1, header=bold(" !!! ")) return 2 old_repo_paths = [] avail_data = settings['repositories']['available'] for repoid in sorted(avail_data): old_repo_paths.append(avail_data[repoid]['dbpath'][:]) old_branch = settings['repositories']['branch'][:] entropy_client.set_branch(self._branch) status = True repo_conf = settings.get_setting_files_data()['repositories'] try: repo_intf = entropy_client.Repositories(None, force=False, fetch_security=False) except AttributeError as err: entropy_client.output("%s %s [%s]" % ( purple(_("No repositories specified in")), teal(repo_conf), err, ), header=darkred(" * "), level="error", importance=1) status = False else: rc = repo_intf.sync() if rc and rc != 1: # rc != 1 means not all the repos have been downloaded status = False if status: inst_repo.moveSpmUidsToBranch(self._branch) mytxt = "%s: %s" % ( darkgreen(_("Succesfully switched to branch")), purple(self._branch), ) entropy_client.output(mytxt, header=red(" @@ ")) mytxt = "%s %s" % ( brown(" ?? "), darkgreen( _("Now run 'equo upgrade' to " "upgrade your distribution to")), ) entropy_client.output(mytxt) return 0 entropy_client.set_branch(old_branch) mytxt = "%s: %s" % ( darkred(_("Unable to switch to branch")), purple(self._branch), ) entropy_client.output(mytxt, level="error", importance=1, header=bold(" !!! ")) return 3
def __push_repo(self, entropy_server, repository_id): sys_settings_plugin_id = \ etpConst['system_settings_plugins_ids']['server_plugin'] srv_data = self._settings()[sys_settings_plugin_id]['server'] rss_enabled = srv_data['rss']['enabled'] mirrors_tainted, mirrors_errors, successfull_mirrors, \ broken_mirrors, check_data = \ entropy_server.Mirrors.sync_packages( repository_id, ask = self._ask, pretend = self._pretend) if mirrors_errors and not successfull_mirrors: entropy_server.output(red(_("Aborting !")), importance=1, level="error", header=darkred(" !!! ")) return 1 if not successfull_mirrors: return 0 if mirrors_tainted and (self._as_repository_id is None): commit_msg = None if self._ask and rss_enabled: # expected unicode out of here commit_msg = self._commit_message(entropy_server, successfull_mirrors) elif rss_enabled: commit_msg = const_convert_to_unicode("Automatic update") if commit_msg is None: commit_msg = const_convert_to_unicode("no commit message") ServerRssMetadata()['commitmessage'] = commit_msg if self._as_repository_id is not None: # change repository push location ServerSystemSettingsPlugin.set_override_remote_repository( self._settings(), repository_id, self._as_repository_id) sts = self.__sync_repo(entropy_server, repository_id) if sts == 0: # do not touch locking entropy_server.Mirrors.lock_mirrors(repository_id, False, unlock_locally = (self._as_repository_id is None)) if sts != 0: entropy_server.output(red(_("Aborting !")), importance=1, level="error", header=darkred(" !!! ")) return sts if self._ask: q_rc = entropy_server.ask_question( _("Should I cleanup old packages on mirrors ?")) if q_rc == _("No"): return 0 # fall through done = entropy_server.Mirrors.tidy_mirrors( repository_id, ask = self._ask, pretend = self._pretend) if not done: return 1 return 0
def _add_packages(self, entropy_server, repository_id, packages): """ Add the given Source Package Manager packages to the given Entropy repository. """ def asker(spm_name): entropy_server.output(darkred(spm_name), header=brown(" # ")) rc = entropy_server.ask_question(_("Add this package?")) return rc == _("Yes") if self._interactive: entropy_server.output(blue(_("Select packages to add")), header=brown(" @@ ")) packages = list(filter(asker, packages)) if not packages: entropy_server.output(red(_("Nothing to add")), header=brown(" @@ "), importance=1) return 0 entropy_server.output(blue(_("These would be added or updated")), header=brown(" @@ ")) for spm_name in packages: spm_name_txt = purple(spm_name) # TODO: this is a SPM package, we should use SPM functions spm_key = entropy.dep.dep_getkey(spm_name) try: spm_slot = entropy_server.Spm().get_installed_package_metadata( spm_name, "SLOT") spm_repo = entropy_server.Spm().get_installed_package_metadata( spm_name, "repository") except KeyError: spm_slot = None spm_repo = None # inform user about SPM repository sources moves etp_repo = None if spm_repo is not None: pkg_id, repo_id = entropy_server.atom_match( spm_key, match_slot=spm_slot) if pkg_id != -1: repo_db = entropy_server.open_repository(repo_id) etp_repo = repo_db.retrieveSpmRepository(pkg_id) if (etp_repo is not None) and \ (etp_repo != spm_repo): spm_name_txt += ' [%s {%s=>%s}]' % ( bold(_("warning")), darkgreen(etp_repo), blue(spm_repo), ) entropy_server.output(spm_name_txt, header=brown(" # ")) if self._ask: rc = entropy_server.ask_question("%s (%s %s)" % ( _("Would you like to package them now ?"), _("inside"), repository_id, )) if rc == _("No"): return 0 problems = entropy_server._check_config_file_updates() if problems: return 1 generated, exit_st = self._compress_packages(entropy_server, repository_id, packages) if exit_st is not None: return exit_st etp_pkg_files = [(pkg_list, False) for pkg_list in generated] package_ids = entropy_server.add_packages_to_repository( repository_id, etp_pkg_files) entropy_server.commit_repositories() if package_ids: entropy_server.extended_dependencies_test([repository_id]) entropy_server.output("%s: %d" % ( blue(_("Packages handled")), len(package_ids), ), header=darkgreen(" * ")) return 0
def handler_verify_upload(self, local_filepath, uri, counter, maxcount, tries, remote_md5=None): crippled_uri = EntropyTransceiver.get_uri_name(uri) self._entropy.output("[%s|#%s|(%s/%s)] %s: %s" % ( blue(crippled_uri), darkgreen(str(tries)), blue(str(counter)), bold(str(maxcount)), darkgreen(_("verifying upload (if supported)")), blue(os.path.basename(local_filepath)), ), importance=0, level="info", header=red(" @@ "), back=True) valid_remote_md5 = True # if remote server supports MD5 commands, remote_md5 is filled if const_isstring(remote_md5): valid_md5 = is_valid_md5(remote_md5) ckres = False if valid_md5: # seems valid ckres = compare_md5(local_filepath, remote_md5) if ckres: self._entropy.output("[%s|#%s|(%s/%s)] %s: %s: %s" % ( blue(crippled_uri), darkgreen(str(tries)), blue(str(counter)), bold(str(maxcount)), blue(_("digest verification")), os.path.basename(local_filepath), darkgreen(_("so far, so good!")), ), importance=0, level="info", header=red(" @@ ")) return True # ouch! elif not valid_md5: # mmmh... malformed md5, try with handlers self._entropy.output("[%s|#%s|(%s/%s)] %s: %s: %s" % ( blue(crippled_uri), darkgreen(str(tries)), blue(str(counter)), bold(str(maxcount)), blue(_("digest verification")), os.path.basename(local_filepath), bold(_("malformed md5 provided to function")), ), importance=0, level="warning", header=brown(" @@ ")) else: # it's really bad! self._entropy.output("[%s|#%s|(%s/%s)] %s: %s: %s" % ( blue(crippled_uri), darkgreen(str(tries)), blue(str(counter)), bold(str(maxcount)), blue(_("digest verification")), os.path.basename(local_filepath), bold(_("remote md5 is invalid")), ), importance=0, level="warning", header=brown(" @@ ")) valid_remote_md5 = False return valid_remote_md5 # always valid
def _commit(self, entropy_server): key_sorter = lambda x: entropy_server.open_repository(x[ 1]).retrieveAtom(x[0]) repository_id = entropy_server.repository() # First of all, open the repository in write mode # in order to trigger package name updates on SPM. # Failing to do so would cause false positives on the # removal list. entropy_server.open_server_repository(repository_id, read_only=False, no_upload=True) to_be_added = set() to_be_removed = set() to_be_injected = set() entropy_server.output(brown(_("Scanning...")), importance=1) if self._repackage: repack_added = self._repackage_scan(entropy_server) if not repack_added: entropy_server.output(red( _("No valid packages to repackage.")), header=brown(" * "), importance=1, level="error") return 1 to_be_added |= repack_added else: (scan_added, scan_removed, scan_injected) = entropy_server.scan_package_changes() to_be_added |= set((x[0] for x in scan_added)) to_be_removed |= scan_removed to_be_injected |= scan_injected if self._packages: to_be_removed.clear() to_be_injected.clear() def pkg_filter(spm_name): if spm_name in to_be_added: return spm_name try: inst_spm_name = entropy_server.Spm( ).match_installed_package(spm_name) except KeyError: entropy_server.output( "%s: %s" % (darkred(_("Invalid package")), bold(spm_name)), header=darkred(" !!! "), importance=1, level="warning") return None if inst_spm_name in to_be_added: return inst_spm_name return None to_be_added = set(map(pkg_filter, self._packages)) to_be_added.discard(None) if not (to_be_removed or to_be_added or to_be_injected): entropy_server.output(red(_("Zarro thinggz to do")), header=brown(" * "), importance=1) return 0 exit_st = 0 if to_be_injected: injected_s = sorted(to_be_injected, key=key_sorter) self._inject_packages(entropy_server, injected_s) if to_be_removed: removed_s = sorted(to_be_removed, key=key_sorter) self._remove_packages(entropy_server, removed_s) if to_be_added: # drop spm_uid, no longer needed added_s = sorted(to_be_added) exit_st = self._add_packages(entropy_server, repository_id, added_s) return exit_st
def _handle_config_protect(self, protect, mask, protectskip, fromfile, tofile, do_allocation_check=True, do_quiet=False): """ Handle configuration file protection. This method contains the logic for determining if a file should be protected from overwrite. """ protected = False do_continue = False in_mask = False tofile_os = tofile fromfile_os = fromfile if not const_is_python3(): tofile_os = const_convert_to_rawstring(tofile) fromfile_os = const_convert_to_rawstring(fromfile) if tofile in protect: protected = True in_mask = True elif os.path.dirname(tofile) in protect: protected = True in_mask = True else: tofile_testdir = os.path.dirname(tofile) old_tofile_testdir = None while tofile_testdir != old_tofile_testdir: if tofile_testdir in protect: protected = True in_mask = True break old_tofile_testdir = tofile_testdir tofile_testdir = os.path.dirname(tofile_testdir) if protected: # check if perhaps, file is masked, so unprotected if tofile in mask: protected = False in_mask = False elif os.path.dirname(tofile) in mask: protected = False in_mask = False else: tofile_testdir = os.path.dirname(tofile) old_tofile_testdir = None while tofile_testdir != old_tofile_testdir: if tofile_testdir in mask: protected = False in_mask = False break old_tofile_testdir = tofile_testdir tofile_testdir = os.path.dirname(tofile_testdir) if not os.path.lexists(tofile_os): protected = False # file doesn't exist # check if it's a text file if protected: protected = entropy.tools.istextfile(tofile) in_mask = protected if fromfile is not None: if protected and os.path.lexists(fromfile_os) and ( not os.path.exists(fromfile_os)) and ( os.path.islink(fromfile_os)): # broken symlink, don't protect self._entropy.logger.log( "[Package]", etpConst['logging']['normal_loglevel_id'], "WARNING!!! Failed to handle file protection for: " \ "%s, broken symlink in package" % ( tofile, ) ) msg = _("Cannot protect broken symlink") mytxt = "%s:" % (purple(msg), ) self._entropy.output(mytxt, importance=1, level="warning", header=brown(" ## ")) self._entropy.output(tofile, level="warning", header=brown(" ## ")) protected = False if not protected: return in_mask, protected, tofile, do_continue ## ## # file is protected # ##__________________## # check if protection is disabled for this element if tofile in protectskip: self._entropy.logger.log( "[Package]", etpConst['logging']['normal_loglevel_id'], "Skipping config file installation/removal, " \ "as stated in client.conf: %s" % (tofile,) ) if not do_quiet: mytxt = "%s: %s" % ( _("Skipping file installation/removal"), tofile, ) self._entropy.output(mytxt, importance=1, level="warning", header=darkred(" ## ")) do_continue = True return in_mask, protected, tofile, do_continue ## ## # file is protected (2) # ##______________________## prot_status = True if do_allocation_check: spm_class = self._entropy.Spm_class() tofile, prot_status = spm_class.allocate_protected_file( fromfile, tofile) if not prot_status: # a protected file with the same content # is already in place, so not going to protect # the same file twice protected = False return in_mask, protected, tofile, do_continue ## ## # file is protected (3) # ##______________________## oldtofile = tofile if oldtofile.find("._cfg") != -1: oldtofile = os.path.join(os.path.dirname(oldtofile), os.path.basename(oldtofile)[10:]) if not do_quiet: self._entropy.logger.log( "[Package]", etpConst['logging']['normal_loglevel_id'], "Protecting config file: %s" % (oldtofile, )) mytxt = red("%s: %s") % ( _("Protecting config file"), oldtofile, ) self._entropy.output(mytxt, importance=1, level="warning", header=darkred(" ## ")) return in_mask, protected, tofile, do_continue
def _fetch_phase(self): """ Execute the fetch phase. """ m_fetch_len = len(self._meta['multi_fetch_list']) / 2 xterm_title = "%s: %s %s" % ( _("Downloading"), m_fetch_len, ngettext("package", "packages", m_fetch_len), ) self._entropy.set_title(xterm_title) m_fetch_len = len(self._meta['multi_fetch_list']) txt = "%s: %s %s" % ( blue(_("Downloading")), darkred("%s" % (m_fetch_len,)), ngettext("package", "packages", m_fetch_len), ) self._entropy.output( txt, importance = 1, level = "info", header = red(" ## ") ) exit_st, err_list = self._download_packages( self._meta['multi_fetch_list']) if exit_st == 0: return 0 txt = _("Some packages cannot be fetched") txt2 = _("Try to update your repositories and retry") for txt in (txt, txt2,): self._entropy.output( "%s." % ( darkred(txt), ), importance = 0, level = "info", header = red(" ## ") ) self._entropy.output( "%s: %s" % ( brown(_("Error")), exit_st, ), importance = 0, level = "info", header = red(" ## ") ) for _pkg_id, repo, fname, cksum, _signatures in err_list: self._entropy.output( "[%s|%s] %s" % ( blue(repo), darkgreen(cksum), darkred(fname), ), importance = 1, level = "error", header = darkred(" # ") ) return exit_st
def _remove_content_from_system_loop( self, inst_repo, remove_atom, remove_content, remove_config, affected_directories, affected_infofiles, directories, directories_cache, preserved_mgr, not_removed_due_to_collisions, colliding_path_messages, automerge_metadata, col_protect, protect, mask, protectskip, sys_root): """ Body of the _remove_content_from_system() method. """ info_dirs = self._get_info_directories() # collect all the library paths to be preserved # in the final removal loop. preserved_lib_paths = set() if self.PRESERVED_LIBS_ENABLED: for _pkg_id, item, _ftype in remove_content: # determine without sys_root paths = self._handle_preserved_lib(item, remove_atom, preserved_mgr) if paths is not None: preserved_lib_paths.update(paths) for _pkg_id, item, _ftype in remove_content: if not item: continue # empty element?? sys_root_item = sys_root + item sys_root_item_encoded = sys_root_item if not const_is_python3(): # this is coming from the db, and it's pure utf-8 sys_root_item_encoded = const_convert_to_rawstring( sys_root_item, from_enctype=etpConst['conf_raw_encoding']) # collision check if col_protect > 0: if inst_repo.isFileAvailable(item) \ and os.path.isfile(sys_root_item_encoded): # in this way we filter out directories colliding_path_messages.add(sys_root_item) not_removed_due_to_collisions.add(item) continue protected = False in_mask = False if not remove_config: protected_item_test = sys_root_item (in_mask, protected, _x, do_continue) = self._handle_config_protect( protect, mask, protectskip, None, protected_item_test, do_allocation_check=False, do_quiet=True) if do_continue: protected = True # when files have not been modified by the user # and they are inside a config protect directory # we could even remove them directly if in_mask: oldprot_md5 = automerge_metadata.get(item) if oldprot_md5: try: in_system_md5 = entropy.tools.md5sum( protected_item_test) except (OSError, IOError) as err: if err.errno != errno.ENOENT: raise in_system_md5 = "?" if oldprot_md5 == in_system_md5: prot_msg = _("Removing config file, never modified") mytxt = "%s: %s" % ( darkgreen(prot_msg), blue(item), ) self._entropy.output(mytxt, importance=1, level="info", header=red(" ## ")) protected = False do_continue = False # Is file or directory a protected item? if protected: self._entropy.logger.log( "[Package]", etpConst['logging']['verbose_loglevel_id'], "[remove] Protecting config file: %s" % (sys_root_item, )) mytxt = "[%s] %s: %s" % ( red(_("remove")), brown(_("Protecting config file")), sys_root_item, ) self._entropy.output(mytxt, importance=1, level="warning", header=red(" ## ")) continue try: os.lstat(sys_root_item_encoded) except OSError as err: if err.errno in (errno.ENOENT, errno.ENOTDIR): continue # skip file, does not exist raise except UnicodeEncodeError: msg = _("This package contains a badly encoded file !!!") mytxt = brown(msg) self._entropy.output(red("QA: ") + mytxt, importance=1, level="warning", header=darkred(" ## ")) continue # file has a really bad encoding if os.path.isdir(sys_root_item_encoded) and \ os.path.islink(sys_root_item_encoded): # S_ISDIR returns False for directory symlinks, # so using os.path.isdir valid directory symlink if sys_root_item not in directories_cache: # collect for Trigger affected_directories.add(item) directories.add((sys_root_item, "link")) directories_cache.add(sys_root_item) continue if os.path.isdir(sys_root_item_encoded): # plain directory if sys_root_item not in directories_cache: # collect for Trigger affected_directories.add(item) directories.add((sys_root_item, "dir")) directories_cache.add(sys_root_item) continue # files, symlinks or not # just a file or symlink or broken # directory symlink (remove now) # skip file removal if item is a preserved library. if item in preserved_lib_paths: self._entropy.logger.log( "[Package]", etpConst['logging']['normal_loglevel_id'], "[remove] skipping removal of: %s" % (sys_root_item, )) continue try: os.remove(sys_root_item_encoded) except OSError as err: self._entropy.logger.log( "[Package]", etpConst['logging']['normal_loglevel_id'], "[remove] Unable to remove %s, error: %s" % ( sys_root_item, err, )) continue # collect for Trigger dir_name = os.path.dirname(item) affected_directories.add(dir_name) # account for info files, if any if dir_name in info_dirs: for _ext in self._INFO_EXTS: if item.endswith(_ext): affected_infofiles.add(item) break # add its parent directory dirobj = const_convert_to_unicode( os.path.dirname(sys_root_item_encoded)) if dirobj not in directories_cache: if os.path.isdir(dirobj) and os.path.islink(dirobj): directories.add((dirobj, "link")) elif os.path.isdir(dirobj): directories.add((dirobj, "dir")) directories_cache.add(dirobj)
def _fetch_source(self, url, download_path): """ Fetch the source code tarball(s). """ self._entropy.output("%s: %s" % ( blue(_("Downloading")), brown(url), ), importance=1, level="info", header=red(" ## ")) lock = None try: lock = self.path_lock(download_path) with lock.exclusive(): exit_st, data_transfer, _resumed = self._download_file( url, download_path, digest=None, resume=False) finally: if lock is not None: lock.close() if exit_st == 0: human_bytes = entropy.tools.bytes_into_human(data_transfer) txt = "%s: %s %s %s/%s" % ( blue(_("Successfully downloaded from")), red(self._get_url_name(url)), _("at"), human_bytes, _("second"), ) self._entropy.output(txt, importance=1, level="info", header=red(" ## ")) self._entropy.output("%s: %s" % ( blue(_("Local path")), brown(download_path), ), importance=1, level="info", header=red(" # ")) return exit_st error_message = "%s: %s" % ( blue(_("Error downloading from")), red(self._get_url_name(url)), ) if exit_st == -1: error_message += " - %s." % ( _("file not available on this mirror"), ) elif exit_st == -3: error_message += " - not found." elif exit_st == -100: error_message += " - %s." % (_("discarded download"), ) else: error_message += " - %s: %s" % ( _("unknown reason"), exit_st, ) self._entropy.output(error_message, importance=1, level="warning", header=red(" ## ")) return exit_st