def _set_root_columns(self, addl, pkg_info, pkg): """sets the indent level and formats the output @param addl: already defined string to add to @param pkg_info: dictionary @param pkg: _emerge.Package instance @rtype string Modifies self.verboseadd """ if self.conf.quiet: myprint = addl + " " + self.indent + \ self.pkgprint(pkg_info.cp, pkg_info) myprint = myprint+" "+green(pkg_info.ver)+" " myprint = myprint+pkg_info.oldbest self.verboseadd = None else: if not pkg_info.merge: addl = self.empty_space_in_brackets() myprint = "[%s%s] %s%s" % \ (self.pkgprint(pkg_info.operation.ljust(13), pkg_info), addl, self.indent, self.pkgprint(pkg.cp, pkg_info)) else: myprint = "[%s %s] %s%s" % \ (self.pkgprint(pkg.type_name, pkg_info), addl, self.indent, self.pkgprint(pkg.cp, pkg_info)) if (self.newlp-nc_len(myprint)) > 0: myprint = myprint+(" "*(self.newlp-nc_len(myprint))) myprint = myprint+green(" ["+pkg_info.ver+"] ") if (self.oldlp-nc_len(myprint)) > 0: myprint = myprint+(" "*(self.oldlp-nc_len(myprint))) myprint += pkg_info.oldbest return myprint
def priming_commit(self, myupdates, myremoved, commitmessage): myfiles = myupdates + myremoved commitmessagedir = tempfile.mkdtemp(".repoman.msg") commitmessagefile = os.path.join(commitmessagedir, "COMMIT_EDITMSG") with open(commitmessagefile, "wb") as mymsg: mymsg.write(_unicode_encode(commitmessage)) separator = '-' * 78 print() print(green("Using commit message:")) print(green(separator)) print(commitmessage) print(green(separator)) print() # Having a leading ./ prefix on file paths can trigger a bug in # the cvs server when committing files to multiple directories, # so strip the prefix. myfiles = [f.lstrip("./") for f in myfiles] retval = self.vcs_settings.changes.commit(myfiles, commitmessagefile) # cleanup the commit message before possibly exiting try: shutil.rmtree(commitmessagedir) except OSError: pass if retval != os.EX_OK: writemsg_level( "!!! Exiting on %s (shell) " "error code: %s\n" % (self.vcs_settings.vcs, retval), level=logging.ERROR, noiselevel=-1) sys.exit(retval)
def detect_conflicts(options): """Determine if the checkout has cvs conflicts. TODO(antarus): Also this should probably not call sys.exit() as repoman is run on >1 packages and one failure should not cause subsequent packages to fail. Returns: None (calls sys.exit on fatal problems) """ cmd = ("cvs -n up 2>/dev/null | " "egrep '^[^\?] .*' | " "egrep -v '^. .*/digest-[^/]+|^cvs server: .* -- ignored$'") msg = ("Performing a %s with a little magic grep to check for updates." % green("cvs -n up")) logging.info(msg) # Use Popen instead of getstatusoutput(), in order to avoid # unicode handling problems (see bug #310789). args = [BASH_BINARY, "-c", cmd] args = [_unicode_encode(x) for x in args] proc = subprocess.Popen( args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) out = _unicode_decode(proc.communicate()[0]) proc.wait() mylines = out.splitlines() myupdates = [] for line in mylines: if not line: continue # [ ] Unmodified (SVN) [U] Updates [P] Patches # [M] Modified [A] Added [R] Removed / Replaced # [D] Deleted if line[0] not in " UPMARD": # Stray Manifest is fine, we will readd it anyway. if line[0] == '?' and line[1:].lstrip() == 'Manifest': continue logging.error(red( "!!! Please fix the following issues reported " "from cvs: %s" % green("(U,P,M,A,R,D are ok)"))) logging.error(red( "!!! Note: This is a pretend/no-modify pass...")) logging.error(out) sys.exit(1) elif line[0] in "UP": myupdates.append(line[2:]) if myupdates: logging.info(green("Fetching trivial updates...")) if options.pretend: logging.info("(cvs update " + " ".join(myupdates) + ")") retval = os.EX_OK else: retval = os.system("cvs update " + " ".join(myupdates)) if retval != os.EX_OK: logging.fatal("!!! cvs exited with an error. Terminating.") sys.exit(retval) return False
def detect_conflicts(options): """Determine if the checkout has problems like cvs conflicts. If you want more vcs support here just keep adding if blocks... This could be better. TODO(antarus): Also this should probably not call sys.exit() as repoman is run on >1 packages and one failure should not cause subsequent packages to fail. Args: vcs - A string identifying the version control system in use Returns: boolean (calls sys.exit on fatal problems) """ cmd = "svn status -u 2>&1 | egrep -v '^. +.*/digest-[^/]+' | head -n-1" msg = "Performing a %s with a little magic grep to check for updates." % green("svn status -u") logging.info(msg) # Use Popen instead of getstatusoutput(), in order to avoid # unicode handling problems (see bug #310789). args = [BASH_BINARY, "-c", cmd] args = [_unicode_encode(x) for x in args] proc = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) out = _unicode_decode(proc.communicate()[0]) proc.wait() mylines = out.splitlines() myupdates = [] for line in mylines: if not line: continue # [ ] Unmodified (SVN) [U] Updates [P] Patches # [M] Modified [A] Added [R] Removed / Replaced # [D] Deleted if line[0] not in " UPMARD": # Stray Manifest is fine, we will readd it anyway. if line[0] == "?" and line[1:].lstrip() == "Manifest": continue logging.error( red("!!! Please fix the following issues reported " "from cvs: %s" % green("(U,P,M,A,R,D are ok)")) ) logging.error(red("!!! Note: This is a pretend/no-modify pass...")) logging.error(out) sys.exit(1) elif line[8] == "*": myupdates.append(line[9:].lstrip(" 1234567890")) if myupdates: logging.info(green("Fetching trivial updates...")) if options.pretend: logging.info("(svn update " + " ".join(myupdates) + ")") retval = os.EX_OK else: retval = os.system("svn update " + " ".join(myupdates)) if retval != os.EX_OK: logging.fatal("!!! svn exited with an error. Terminating.") sys.exit(retval) return False
def detect_vcs_conflicts(options, vcs): """Determine if the checkout has problems like cvs conflicts. If you want more vcs support here just keep adding if blocks... This could be better. TODO(antarus): Also this should probably not call sys.exit() as repoman is run on >1 packages and one failure should not cause subsequent packages to fail. Args: vcs - A string identifying the version control system in use Returns: None (calls sys.exit on fatal problems) """ retval = ("","") if vcs == 'cvs': logging.info("Performing a " + output.green("cvs -n up") + \ " with a little magic grep to check for updates.") retval = subprocess_getstatusoutput("cvs -n up 2>/dev/null | " + \ "egrep '^[^\?] .*' | " + \ "egrep -v '^. .*/digest-[^/]+|^cvs server: .* -- ignored$'") if vcs == 'svn': logging.info("Performing a " + output.green("svn status -u") + \ " with a little magic grep to check for updates.") retval = subprocess_getstatusoutput("svn status -u 2>&1 | " + \ "egrep -v '^. +.*/digest-[^/]+' | " + \ "head -n-1") if vcs in ['cvs', 'svn']: mylines = retval[1].splitlines() myupdates = [] for line in mylines: if not line: continue if line[0] not in " UPMARD": # unmodified(svn),Updates,Patches,Modified,Added,Removed/Replaced(svn),Deleted(svn) # Stray Manifest is fine, we will readd it anyway. if line[0] == '?' and line[1:].lstrip() == 'Manifest': continue logging.error(red("!!! Please fix the following issues reported " + \ "from cvs: ")+green("(U,P,M,A,R,D are ok)")) logging.error(red("!!! Note: This is a pretend/no-modify pass...")) logging.error(retval[1]) sys.exit(1) elif vcs == 'cvs' and line[0] in "UP": myupdates.append(line[2:]) elif vcs == 'svn' and line[8] == '*': myupdates.append(line[9:].lstrip(" 1234567890")) if myupdates: logging.info(green("Fetching trivial updates...")) if options.pretend: logging.info("(" + vcs + " update " + " ".join(myupdates) + ")") retval = os.EX_OK else: retval = os.system(vcs + " update " + " ".join(myupdates)) if retval != os.EX_OK: logging.fatal("!!! " + vcs + " exited with an error. Terminating.") sys.exit(retval)
def run(self): "the entry point for the distutils clean class" sys.stdout.write(blue("Cleaning headers:")) os.path.walk("Inventor", self.remove_headers, None) os.path.walk("VolumeViz", self.remove_headers, None) # remove the SWIG generated wrappers for wrapper_file in self.REMOVE_FILES: if os.path.isfile(wrapper_file): sys.stdout.write(' ' + turquoise(wrapper_file)) os.remove(wrapper_file) print green(".") clean.run(self)
def __init__(self, scanned_files, logger, searchlibs=None, searchbits=None, all_masks=None, masked_dirs=None): '''LibCheck init function. @param scanned_files: optional dictionary if the type created by scan_files(). Defaults to the class instance of scanned_files @param logger: python style Logging function to use for output. @param searchlibs: optional set() of libraries to search for. If defined it toggles several settings to configure this class for a target search rather than a broken libs search. ''' self.scanned_files = scanned_files self.logger = logger self.searchlibs = searchlibs self.searchbits = sorted(searchbits) or ['32', '64'] self.all_masks = all_masks self.masked_dirs = masked_dirs self.logger.debug("\tLibCheck.__init__(), new searchlibs: %s" %(self.searchbits)) if searchlibs: self.smsg = '\tLibCheck.search(), Checking for %s bit dependants' self.pmsg = yellow(" * ") + 'Files that depend on: %s (%s bits)' self.setlibs = self._setslibs self.check = self._checkforlib else: self.smsg = '\tLibCheck.search(), Checking for broken %s bit libs' self.pmsg = green(' * ') + bold('Broken files that require:') + ' %s (%s bits)' self.setlibs = self._setlibs self.check = self._checkbroken self.sfmsg = "\tLibCheck.search(); Total found: %(count)d libs, %(deps)d files in %(time)d milliseconds" self.alllibs = None
def thick_manifest(self, updates, headers, no_expansion, expansion): '''Create a thick manifest @param updates: @param headers: @param no_expansion: @param expansion: ''' headerstring = r"'\$(Header|Id).*\$'" for _file in updates: # for CVS, no_expansion contains files that are excluded from expansion if _file in no_expansion: continue _out = repoman_getstatusoutput( "egrep -q %s %s" % (headerstring, portage._shell_quote(_file))) if _out[0] == 0: headers.append(_file) print("%s have headers that will change." % green(str(len(headers)))) print( "* Files with headers will" " cause the manifests to be changed and committed separately.")
def gettree(tree, config): emsg("Importing " + tree + " portage tree", config) if '.pyc' in config['esearchdbfile']: ext = ".pyc" else: ext = ".py" try: target = tmp_prefix + tree + "tree" + ext if os.path.exists(target): os.unlink(target) os.symlink(os.path.join(config['esearchdbdir'], config['esearchdbfile']), target) except OSError as e: if e.errno != 17: error(str(e), fatal=True) try: if tree == "old": from esyncoldtree import db try: from esyncoldtree import dbversion if dbversion < config['needdbversion']: outofdateerror() except ImportError: outofdateerror() else: from esyncnewtree import db except ImportError: error("Could not find " + tree + "esearch-index. Please run " + green("eupdatedb") + " as root first", fatal=True) os.unlink(target) return db
def keyword(string, stable=True, hard_masked=False): """Returns a keyword string.""" if stable: return output.green(string) if hard_masked: return output.red(string) # keyword masked: return output.blue(string)
def __str__(self): output = [] if self.interactive: output.append(colorize("WARN", "I")) else: output.append(" ") if self.new or self.force_reinstall: if self.force_reinstall: output.append(red("r")) else: output.append(green("N")) else: output.append(" ") if self.new_slot or self.replace: if self.replace: output.append(yellow("R")) else: output.append(green("S")) else: output.append(" ") if self.fetch_restrict or self.fetch_restrict_satisfied: if self.fetch_restrict_satisfied: output.append(green("f")) else: output.append(red("F")) else: output.append(" ") if self.new_version: output.append(turquoise("U")) else: output.append(" ") if self.downgrade: output.append(blue("D")) else: output.append(" ") if self.mask is not None: output.append(self.mask) return "".join(output)
def priming_commit(self, myupdates, myremoved, commitmessage): myfiles = myupdates + myremoved fd, commitmessagefile = tempfile.mkstemp(".repoman.msg") mymsg = os.fdopen(fd, "wb") mymsg.write(_unicode_encode(commitmessage)) mymsg.close() separator = '-' * 78 print() print(green("Using commit message:")) print(green(separator)) print(commitmessage) print(green(separator)) print() # Having a leading ./ prefix on file paths can trigger a bug in # the cvs server when committing files to multiple directories, # so strip the prefix. myfiles = [f.lstrip("./") for f in myfiles] commit_cmd = [self.vcs_settings.vcs] commit_cmd.extend(self.vcs_settings.vcs_global_opts) commit_cmd.append("commit") commit_cmd.extend(self.vcs_settings.vcs_local_opts) commit_cmd.extend(["-F", commitmessagefile]) commit_cmd.extend(myfiles) try: if self.options.pretend: print("(%s)" % (" ".join(commit_cmd),)) else: retval = spawn(commit_cmd, env=self.repo_settings.commit_env) if retval != os.EX_OK: writemsg_level( "!!! Exiting on %s (shell) " "error code: %s\n" % (self.vcs_settings.vcs, retval), level=logging.ERROR, noiselevel=-1) sys.exit(retval) finally: try: os.unlink(commitmessagefile) except OSError: pass
def update_scroll(self): if self._return_early(): return if(self.spinpos >= len(self.scroll_sequence)): sys.stdout.write(darkgreen(" \b\b\b" + self.scroll_sequence[ len(self.scroll_sequence) - 1 - (self.spinpos % len(self.scroll_sequence))])) else: sys.stdout.write(green("\b " + self.scroll_sequence[self.spinpos])) sys.stdout.flush() self.spinpos = (self.spinpos + 1) % (2 * len(self.scroll_sequence))
def pivy_configure(self): "configure Pivy" print turquoise(self.PIVY_SNAKES) print blue("Platform...%s" % sys.platform) self.check_python_version() self.check_swig_version(self.SWIG) self.check_coin_version() self.get_coin_features() if self.SOGUI: self.check_gui_bindings() if 'simvoleon' in self.MODULES and self.check_simvoleon_version(): if sys.platform == "win32": INCLUDE_DIR = os.getenv("SIMVOLEONDIR") + "\\include" else: INCLUDE_DIR = self.do_os_popen("simvoleon-config --includedir") sys.stdout.write(blue("Preparing") + green(" VolumeViz ") + blue("headers:")) os.path.walk("VolumeViz", self.copy_and_swigify_headers, INCLUDE_DIR) print green(".") if sys.platform == "win32": INCLUDE_DIR = os.path.join(os.getenv("COINDIR"), "include") else: INCLUDE_DIR = self.do_os_popen("coin-config --includedir") sys.stdout.write(blue("Preparing") + green(" Inventor ") + blue("headers:")) os.path.walk("Inventor", self.copy_and_swigify_headers, INCLUDE_DIR) print green(".")
def _new_slot(self, pkg, pkg_info): """New slot, mark it new. @returns addl: formatted slot info @returns myoldbest: installed version list Modifies self.counters.newslot, self.counters.binary """ addl = " " + green("NS") + pkg_info.fetch_symbol + " " if pkg_info.ordered: self.counters.newslot += 1 if pkg.type_name == "binary": self.counters.binary += 1 return addl
def _get_installed_best(self, pkg, pkg_info): """ we need to use "--emptrytree" testing here rather than "empty" param testing because "empty" param is used for -u, where you still *do* want to see when something is being upgraded. @param pkg: _emerge.Package.Package instance @param pkg_info: dictionay @rtype addl, myoldbest: list, myinslotlist: list Modifies self.counters.reinst, self.counters.binary, self.counters.new """ myoldbest = [] myinslotlist = None installed_versions = self.vardb.match_pkgs(pkg.cp) if self.vardb.cpv_exists(pkg.cpv): addl = " "+yellow("R")+pkg_info.fetch_symbol+" " installed_version = self.vardb.match_pkgs(pkg.cpv)[0] if not self.quiet_repo_display and installed_version.repo != pkg.repo: myoldbest = [installed_version] if pkg_info.ordered: if pkg_info.merge: self.counters.reinst += 1 if pkg.type_name == "binary": self.counters.binary += 1 elif pkg_info.operation == "uninstall": self.counters.uninst += 1 # filter out old-style virtual matches elif installed_versions and \ installed_versions[0].cp == pkg.cp: myinslotlist = self.vardb.match_pkgs(pkg.slot_atom) # If this is the first install of a new-style virtual, we # need to filter out old-style virtual matches. if myinslotlist and \ myinslotlist[0].cp != pkg.cp: myinslotlist = None if myinslotlist: myoldbest = myinslotlist[:] addl = self._insert_slot(pkg, pkg_info, myinslotlist) else: myoldbest = installed_versions addl = self._new_slot(pkg, pkg_info) if self.conf.changelog: self.do_changelog(pkg, pkg_info) else: addl = " " + green("N") + " " + pkg_info.fetch_symbol + " " if pkg_info.ordered: self.counters.new += 1 if pkg.type_name == "binary": self.counters.binary += 1 return addl, myoldbest, myinslotlist
def thick_manifest(self, myupdates, myheaders, no_expansion, expansion): if self.vcs_settings.vcs == 'cvs': headerstring = "'\$(Header|Id).*\$'" elif self.vcs_settings.vcs == "svn": svn_keywords = dict((k.lower(), k) for k in [ "Rev", "Revision", "LastChangedRevision", "Date", "LastChangedDate", "Author", "LastChangedBy", "URL", "HeadURL", "Id", "Header", ]) for myfile in myupdates: # for CVS, no_expansion contains files that are excluded from expansion if self.vcs_settings.vcs == "cvs": if myfile in no_expansion: continue # for SVN, expansion contains files that are included in expansion elif self.vcs_settings.vcs == "svn": if myfile not in expansion: continue # Subversion keywords are case-insensitive # in svn:keywords properties, # but case-sensitive in contents of files. enabled_keywords = [] for k in expansion[myfile]: keyword = svn_keywords.get(k.lower()) if keyword is not None: enabled_keywords.append(keyword) headerstring = "'\$(%s).*\$'" % "|".join(enabled_keywords) myout = repoman_getstatusoutput( "egrep -q %s %s" % (headerstring, portage._shell_quote(myfile))) if myout[0] == 0: myheaders.append(myfile) print("%s have headers that will change." % green(str(len(myheaders)))) print( "* Files with headers will" " cause the manifests to be changed and committed separately.")
def _set_root_columns(self, pkg, pkg_info): """sets the indent level and formats the output @param pkg: _emerge.Package.Package instance @param pkg_info: dictionary @rtype string Modifies self.verboseadd """ ver_str = pkg_info.ver if self.conf.verbosity == 3: ver_str = self._append_slot(ver_str, pkg, pkg_info) ver_str = self._append_repository(ver_str, pkg, pkg_info) if self.conf.quiet: myprint = _unicode(pkg_info.attr_display) + " " + self.indent + \ self.pkgprint(pkg_info.cp, pkg_info) myprint = myprint+" "+green(ver_str)+" " myprint = myprint+pkg_info.oldbest self.verboseadd = None else: if not pkg_info.merge: addl = self.empty_space_in_brackets() myprint = "[%s%s] %s%s" % \ (self.pkgprint(pkg_info.operation.ljust(13), pkg_info), addl, self.indent, self.pkgprint(pkg.cp, pkg_info)) else: myprint = "[%s %s] %s%s" % \ (self.pkgprint(pkg.type_name, pkg_info), pkg_info.attr_display, self.indent, self.pkgprint(pkg.cp, pkg_info)) if (self.newlp-nc_len(myprint)) > 0: myprint = myprint+(" "*(self.newlp-nc_len(myprint))) myprint = myprint+" "+green("["+ver_str+"]")+" " if (self.oldlp-nc_len(myprint)) > 0: myprint = myprint+(" "*(self.oldlp-nc_len(myprint))) myprint += pkg_info.oldbest return myprint
def loaddb(config): """Loads the esearchdb""" try: sys.path.append(config['esearchdbdir']) from esearchdb import db except (ImportError, SyntaxError): error("Could not find esearch-index. Please run " + green("eupdatedb") + " as root first", stderr=config['stderr']) try: from esearchdb import dbversion if dbversion < config['needdbversion']: outofdateerror(config['stderr']) except ImportError: outofdateerror(config['stderr']) return db
def _set_root_columns(self, addl, pkg_info, pkg): """sets the indent level and formats the output @param addl: already defined string to add to @param pkg_info: dictionary @param pkg: _emerge.Package.Package instance @rtype string Modifies self.verboseadd """ ver_str = pkg_info.ver if self.conf.verbosity == 3 and not self.quiet_repo_display and (self.verbose_main_repo_display or any(x.repo != self.portdb.repositories.mainRepo().name for x in pkg_info.oldbest_list + [pkg])): ver_str += _repo_separator + pkg.repo if self.conf.quiet: myprint = addl + " " + self.indent + \ self.pkgprint(pkg_info.cp, pkg_info) myprint = myprint+" "+green(ver_str)+" " myprint = myprint+pkg_info.oldbest self.verboseadd = None else: if not pkg_info.merge: addl = self.empty_space_in_brackets() myprint = "[%s%s] %s%s" % \ (self.pkgprint(pkg_info.operation.ljust(13), pkg_info), addl, self.indent, self.pkgprint(pkg.cp, pkg_info)) else: myprint = "[%s %s] %s%s" % \ (self.pkgprint(pkg.type_name, pkg_info), addl, self.indent, self.pkgprint(pkg.cp, pkg_info)) if (self.newlp-nc_len(myprint)) > 0: myprint = myprint+(" "*(self.newlp-nc_len(myprint))) myprint = myprint+" "+green("["+ver_str+"]")+" " if (self.oldlp-nc_len(myprint)) > 0: myprint = myprint+(" "*(self.oldlp-nc_len(myprint))) myprint += pkg_info.oldbest return myprint
def assign_packages(broken, logger, settings): ''' Finds and returns packages that owns files placed in broken. Broken is list of files ''' stime = current_milli_time() broken_matcher = _file_matcher() for filename in broken: broken_matcher.add(filename) assigned_pkgs = set() assigned_filenames = set() for group in os.listdir(settings['PKG_DIR']): grppath = settings['PKG_DIR'] + group if not os.path.isdir(grppath): continue for pkg in os.listdir(grppath): pkgpath = settings['PKG_DIR'] + group + '/' + pkg if not os.path.isdir(pkgpath): continue f = pkgpath + '/CONTENTS' if os.path.exists(f): contents_matcher = _file_matcher() try: with io.open(f, 'r', encoding='utf_8') as cnt: for line in cnt.readlines(): m = re.match('^obj (/[^ ]+)', line) if m is not None: contents_matcher.add(m.group(1)) except Exception as e: logger.warning(red(' !! Failed to read ' + f)) logger.warning(red(' !! Error was:' + str(e))) else: for m in contents_matcher.intersection(broken_matcher): found = group+'/'+pkg assigned_pkgs.add(found) assigned_filenames.add(m) logger.info('\t' + green('* ') + m + ' -> ' + bold(found)) broken_filenames = set(broken) orphaned = broken_filenames.difference(assigned_filenames) ftime = current_milli_time() logger.debug("\tassign_packages(); assigned " "%d packages, %d orphans in %d milliseconds" % (len(assigned_pkgs), len(orphaned), ftime-stime)) return (assigned_pkgs, orphaned)
def masking(mask): """Returns a 'masked by' string.""" if 'package.mask' in mask or 'profile' in mask: # use porthole wrap style to help clarify meaning return output.red("M["+mask[0]+"]") if mask is not []: for status in mask: if 'keyword' in status: # keyword masked | " [missing keyword] " <=looks better return output.blue("["+status+"]") if status in archlist: return output.green(status) if 'unknown' in status: return output.yellow(status) return output.red(status) return ''
def thick_manifest(self, updates, headers, no_expansion, expansion): '''Create a thick manifest @param updates: @param headers: @param no_expansion: @param expansion: ''' svn_keywords = dict((k.lower(), k) for k in [ "Rev", "Revision", "LastChangedRevision", "Date", "LastChangedDate", "Author", "LastChangedBy", "URL", "HeadURL", "Id", "Header", ]) for _file in updates: # for SVN, expansion contains files that are included in expansion if _file not in expansion: continue # Subversion keywords are case-insensitive # in svn:keywords properties, # but case-sensitive in contents of files. enabled_keywords = [] for k in expansion[_file]: keyword = svn_keywords.get(k.lower()) if keyword is not None: enabled_keywords.append(keyword) headerstring = r"'\$(%s).*\$'" % "|".join(enabled_keywords) _out = repoman_getstatusoutput( "egrep -q %s %s" % (headerstring, portage._shell_quote(_file))) if _out[0] == 0: headers.append(_file) print("%s have headers that will change." % green(str(len(headers)))) print( "* Files with headers will" " cause the manifests to be changed and committed separately.")
def do_normal(pkg, verbose): data = [] if not pkg[4]: installed = "[ Not Installed ]" else: installed = pkg[4] if pkg[2]: masked = red(" [ Masked ]") else: masked = "" data.append("%s %s%s\n %s %s\n %s %s" % \ (green("*"), bold(pkg[1]), masked, darkgreen("Latest version available:"), pkg[3], darkgreen("Latest version installed:"), installed)) if verbose: mpv = best(portdb.xmatch("match-all", pkg[1])) iuse_split, final_use = get_flags(mpv, final_setting=True) iuse = "" use_list = [] for ebuild_iuse in iuse_split: use = ebuild_iuse.lstrip('+-') if use in final_use: use_list.append(red("+" + use) + " ") else: use_list.append(blue("-" + use) + " ") use_list.sort() iuse = ' '.join(use_list) if iuse == "": iuse = "-" data.append(" %s %s\n %s %s" % \ (darkgreen("Unstable version:"), pkg_version(mpv), darkgreen("Use Flags (stable):"), iuse)) data.append(" %s %s\n %s %s\n %s %s\n %s %s\n" % \ (darkgreen("Size of downloaded files:"), pkg[5], darkgreen("Homepage:"), pkg[6], darkgreen("Description:"), pkg[7], darkgreen("License:"), pkg[8])) return data
def set_pkg_info(self, pkg, ordered): """Sets various pkg_info dictionary variables @param pkg: _emerge.Package.Package instance @param ordered: bool @rtype pkg_info dictionary Modifies self.counters.restrict_fetch, self.counters.restrict_fetch_satisfied """ pkg_info = PkgInfo() pkg_info.ordered = ordered pkg_info.fetch_symbol = " " pkg_info.operation = pkg.operation pkg_info.merge = ordered and pkg_info.operation == "merge" if not pkg_info.merge and pkg_info.operation == "merge": pkg_info.operation = "nomerge" pkg_info.built = pkg.type_name != "ebuild" pkg_info.ebuild_path = None pkg_info.repo_name = pkg.repo if pkg.type_name == "ebuild": pkg_info.ebuild_path = self.portdb.findname( pkg.cpv, myrepo=pkg_info.repo_name) if pkg_info.ebuild_path is None: raise AssertionError( "ebuild not found for '%s'" % pkg.cpv) pkg_info.repo_path_real = os.path.dirname(os.path.dirname( os.path.dirname(pkg_info.ebuild_path))) else: pkg_info.repo_path_real = \ self.portdb.getRepositoryPath(pkg.metadata["repository"]) pkg_info.use = list(self.conf.pkg_use_enabled(pkg)) if not pkg.built and pkg.operation == 'merge' and \ 'fetch' in pkg.metadata.restrict: pkg_info.fetch_symbol = red("F") if pkg_info.ordered: self.counters.restrict_fetch += 1 if not self.portdb.getfetchsizes(pkg.cpv, useflags=pkg_info.use, myrepo=pkg.repo): pkg_info.fetch_symbol = green("f") if pkg_info.ordered: self.counters.restrict_fetch_satisfied += 1 return pkg_info
def repoman_sez(msg): print(green("RepoMan sez:"), msg)
def fetch_metadata_xsd(metadata_xsd, repoman_settings): """ Fetch metadata.xsd if it doesn't exist or the ctime is older than metadata_xsd_ctime_interval. @rtype: bool @return: True if successful, otherwise False """ must_fetch = True metadata_xsd_st = None current_time = int(time.time()) try: metadata_xsd_st = os.stat(metadata_xsd) except EnvironmentError as e: if e.errno not in (errno.ENOENT, errno.ESTALE): raise del e else: # Trigger fetch if metadata.xsd mtime is old or clock is wrong. if abs(current_time - metadata_xsd_st.st_ctime) \ < metadata_xsd_ctime_interval: must_fetch = False if must_fetch: print() print( "%s the local copy of metadata.xsd " "needs to be refetched, doing that now" % green("***")) print() parsed_url = urlparse(metadata_xsd_uri) setting = 'FETCHCOMMAND_' + parsed_url.scheme.upper() fcmd = repoman_settings.get(setting) if not fcmd: fcmd = repoman_settings.get('FETCHCOMMAND') if not fcmd: logging.error("FETCHCOMMAND is unset") return False destdir = repoman_settings["DISTDIR"] fd, metadata_xsd_tmp = tempfile.mkstemp( prefix='metadata.xsd.', dir=destdir) os.close(fd) try: if not portage.getbinpkg.file_get( metadata_xsd_uri, destdir, fcmd=fcmd, filename=os.path.basename(metadata_xsd_tmp)): logging.error( "failed to fetch metadata.xsd from '%s'" % metadata_xsd_uri) return False try: portage.util.apply_secpass_permissions( metadata_xsd_tmp, gid=portage.data.portage_gid, mode=0o664, mask=0o2) except portage.exception.PortageException: pass shutil.move(metadata_xsd_tmp, metadata_xsd) finally: try: os.unlink(metadata_xsd_tmp) except OSError: pass return True
def printUsage(_error=None, help=None): """Print help message. May also print partial help to stderr if an error from {'options','actions'} is specified.""" out = sys.stdout if _error: out = sys.stderr if not _error in ('actions', 'global-options', \ 'packages-options', 'distfiles-options', \ 'merged-packages-options', 'merged-distfiles-options', \ 'time', 'size'): _error = None if not _error and not help: help = 'all' if _error == 'time': print(pp.error("Wrong time specification"), file=out) print("Time specification should be an integer followed by a" + " single letter unit.", file=out) print("Available units are: y (years), m (months), w (weeks), " + "d (days) and h (hours).", file=out) print("For instance: \"1y\" is \"one year\", \"2w\" is \"two" + " weeks\", etc. ", file=out) return if _error == 'size': print(pp.error("Wrong size specification"), file=out) print("Size specification should be an integer followed by a" + " single letter unit.", file=out) print("Available units are: G, M, K and B.", file=out) print("For instance: \"10M\" is \"ten megabytes\", \"200K\" " + "is \"two hundreds kilobytes\", etc.", file=out) return if _error in ('global-options', 'packages-options', 'distfiles-options', \ 'merged-packages-options', 'merged-distfiles-options',): print(pp.error("Wrong option on command line."), file=out) print(file=out) elif _error == 'actions': print(pp.error("Wrong or missing action name on command line."), file=out) print(file=out) print(white("Usage:"), file=out) if _error in ('actions','global-options', 'packages-options', \ 'distfiles-options') or help == 'all': print(" " + turquoise(__productname__), yellow("[global-option] ..."), green("<action>"), yellow("[action-option] ..."), file=out) if _error == 'merged-distfiles-options' or help in ('all', 'distfiles'): print(" " + turquoise(__productname__ + '-dist'), yellow("[global-option, distfiles-option] ..."), file=out) if _error == 'merged-packages-options' or help in ('all', 'packages'): print(" " + turquoise(__productname__ + '-pkg'), yellow("[global-option, packages-option] ..."), file=out) if _error in ('global-options', 'actions'): print(" " + turquoise(__productname__), yellow("[--help, --version]"), file=out) if help == 'all': print(" " + turquoise(__productname__ + "(-dist,-pkg)"), yellow("[--help, --version]"), file=out) if _error == 'merged-packages-options' or help == 'packages': print(" " + turquoise(__productname__ + '-pkg'), yellow("[--help, --version]"), file=out) if _error == 'merged-distfiles-options' or help == 'distfiles': print(" " + turquoise(__productname__ + '-dist'), yellow("[--help, --version]"), file=out) print(file=out) if _error in ('global-options', 'merged-packages-options', \ 'merged-distfiles-options') or help: print("Available global", yellow("options") + ":", file=out) print(yellow(" -C, --nocolor") + " - turn off colors on output", file=out) print(yellow(" -d, --deep") + " - only keep the minimum for a reinstallation", file=out) print(yellow(" -e, --exclude-file=<path>") + " - path to the exclusion file", file=out) print(yellow(" -i, --interactive") + " - ask confirmation before deletions", file=out) print(yellow(" -n, --package-names") + " - protect all versions (when --deep)", file=out) print(yellow(" -p, --pretend") + " - only display what would be cleaned", file=out) print(yellow(" -q, --quiet") + " - be as quiet as possible", file=out) print(yellow(" -t, --time-limit=<time>") + " - don't delete files modified since " + yellow("<time>"), file=out) print(" " + yellow("<time>"), "is a duration: \"1y\" is" + " \"one year\", \"2w\" is \"two weeks\", etc. ", file=out) print(" " + "Units are: y (years), m (months), w (weeks), " + "d (days) and h (hours).", file=out) print( yellow(" -h, --help")+ \ " - display the help screen", file=out) print(yellow(" -V, --version") + " - display version info", file=out) print(file=out) if _error == 'actions' or help == 'all': print("Available", green("actions") + ":", file=out) print(green(" packages") + " - clean outdated binary packages from PKGDIR", file=out) print(green(" distfiles") + " - clean outdated packages sources files from DISTDIR", file=out) print(file=out) if _error in ('packages-options','merged-packages-options') \ or help in ('all','packages'): print("Available", yellow("options"), "for the", green("packages"), "action:", file=out) print(yellow(" -i, --ignore-failure") + " - ignore failure to locate PKGDIR", file=out) print(file=out) if _error in ('distfiles-options', 'merged-distfiles-options') \ or help in ('all','distfiles'): print("Available", yellow("options"), "for the", green("distfiles"), "action:", file=out) print(yellow(" -f, --fetch-restricted") + " - protect fetch-restricted files (when --deep)", file=out) print(yellow(" -s, --size-limit=<size>") + " - don't delete distfiles bigger than " + yellow("<size>"), file=out) print(" " + yellow("<size>"), "is a size specification: " + "\"10M\" is \"ten megabytes\", \"200K\" is", file=out) print(" " + "\"two hundreds kilobytes\", etc. Units are: " + "G, M, K and B.", file=out) print(file=out) print("More detailed instruction can be found in", turquoise("`man %s`" % __productname__), file=out)
def _create_use_string(conf, name, cur_iuse, iuse_forced, cur_use, old_iuse, old_use, is_new, feature_flags, reinst_flags): if not conf.print_use_string: return "" enabled = [] if conf.alphabetical: disabled = enabled removed = enabled else: disabled = [] removed = [] cur_iuse = set(cur_iuse) enabled_flags = cur_iuse.intersection(cur_use) removed_iuse = set(old_iuse).difference(cur_iuse) any_iuse = cur_iuse.union(old_iuse) any_iuse = list(any_iuse) any_iuse.sort() for flag in any_iuse: flag_str = None isEnabled = False reinst_flag = reinst_flags and flag in reinst_flags if flag in enabled_flags: isEnabled = True if is_new or flag in old_use and \ (conf.all_flags or reinst_flag): flag_str = red(flag) elif flag not in old_iuse: flag_str = yellow(flag) + "%*" elif flag not in old_use: flag_str = green(flag) + "*" elif flag in removed_iuse: if conf.all_flags or reinst_flag: flag_str = yellow("-" + flag) + "%" if flag in old_use: flag_str += "*" flag_str = "(" + flag_str + ")" removed.append(flag_str) continue else: if is_new or flag in old_iuse and \ flag not in old_use and \ (conf.all_flags or reinst_flag): flag_str = blue("-" + flag) elif flag not in old_iuse: flag_str = yellow("-" + flag) if flag not in iuse_forced: flag_str += "%" elif flag in old_use: flag_str = green("-" + flag) + "*" if flag_str: if flag in feature_flags: flag_str = "{" + flag_str + "}" elif flag in iuse_forced: flag_str = "(" + flag_str + ")" if isEnabled: enabled.append(flag_str) else: disabled.append(flag_str) if conf.alphabetical: ret = " ".join(enabled) else: ret = " ".join(enabled + disabled + removed) if ret: ret = '%s="%s" ' % (name, ret) return ret
def detect_conflicts(options): """Determine if the checkout has cvs conflicts. TODO(antarus): Also this should probably not call sys.exit() as repoman is run on >1 packages and one failure should not cause subsequent packages to fail. Returns: None (calls sys.exit on fatal problems) """ cmd = (r"cvs -n up 2>/dev/null | " r"egrep '^[^\?] .*' | " r"egrep -v '^. .*/digest-[^/]+|^cvs server: .* -- ignored$'") msg = ( "Performing a %s with a little magic grep to check for updates." % green("cvs -n up")) logging.info(msg) # Use Popen instead of getstatusoutput(), in order to avoid # unicode handling problems (see bug #310789). args = [BASH_BINARY, "-c", cmd] args = [_unicode_encode(x) for x in args] proc = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) out = _unicode_decode(proc.communicate()[0]) proc.wait() mylines = out.splitlines() myupdates = [] for line in mylines: if not line: continue # [ ] Unmodified (SVN) [U] Updates [P] Patches # [M] Modified [A] Added [R] Removed / Replaced # [D] Deleted if line[0] not in " UPMARD": # Stray Manifest is fine, we will readd it anyway. if line[0] == '?' and line[1:].lstrip() == 'Manifest': continue logging.error( red("!!! Please fix the following issues reported " "from cvs: %s" % green("(U,P,M,A,R,D are ok)"))) logging.error( red("!!! Note: This is a pretend/no-modify pass...")) logging.error(out) sys.exit(1) elif line[0] in "UP": myupdates.append(line[2:]) if myupdates: logging.info(green("Fetching trivial updates...")) if options.pretend: logging.info("(cvs update " + " ".join(myupdates) + ")") retval = os.EX_OK else: retval = os.system("cvs update " + " ".join(myupdates)) if retval != os.EX_OK: logging.fatal("!!! cvs exited with an error. Terminating.") sys.exit(retval) return False
def analyse(settings, logger, libraries=None, la_libraries=None, libraries_links=None, binaries=None, _libs_to_check=None): """Main program body. It will collect all info and determine the pkgs needing rebuilding. @param logger: logger used for logging messages, instance of logging.Logger class. Can be logging (RootLogger). @param _libs_to_check Libraries that need to be checked only @rtype list: list of pkgs that need rebuilding """ searchbits = set() '''if _libs_to_check: for lib in _libs_to_check: if "lib64" in lib: searchbits.add('64') elif "lib32" in lib: searchbits.add('32') else: _libs_to_check = set()''' searchbits.update(['64', '32']) masked_dirs, masked_files, ld = parse_revdep_config( settings['REVDEP_CONFDIR']) masked_dirs.update([ '/lib/modules', '/lib32/modules', '/lib64/modules', ]) if '64' not in searchbits: masked_dirs.update(['/lib64', '/usr/lib64']) elif '32' not in searchbits: masked_dirs.update(['/lib32', '/usr/lib32']) all_masks = masked_dirs.copy() all_masks.update(masked_files) logger.debug("\tall_masks:") for x in sorted(all_masks): logger.debug('\t\t%s' % (x)) if libraries and la_libraries and libraries_links and binaries: logger.info( blue(' * ') + bold('Found a valid cache, skipping collecting phase')) else: #TODO: add partial cache (for ex. only libraries) # when found for some reason stime = current_milli_time() logger.warning( green(' * ') + bold('Collecting system binaries and libraries')) bin_dirs, lib_dirs = prepare_search_dirs(logger, settings) lib_dirs.update(ld) bin_dirs.update(ld) logger.debug('\tanalyse(), bin directories:') for x in sorted(bin_dirs): logger.debug('\t\t%s' % (x)) logger.debug('\tanalyse(), lib directories:') for x in sorted(lib_dirs): logger.debug('\t\t%s' % (x)) logger.debug('\tanalyse(), masked directories:') for x in sorted(masked_dirs): logger.debug('\t\t%s' % (x)) logger.debug('\tanalyse(), masked files:') for x in sorted(masked_files): logger.debug('\t\t%s' % (x)) ftime = current_milli_time() logger.debug('\ttime to complete task: %d milliseconds' % (ftime - stime)) stime = current_milli_time() logger.info( green(' * ') + bold('Collecting dynamic linking informations')) libraries, la_libraries, libraries_links = \ collect_libraries_from_dir(lib_dirs, all_masks, logger) binaries = collect_binaries_from_dir(bin_dirs, all_masks, logger) ftime = current_milli_time() logger.debug('\ttime to complete task: %d milliseconds' % (ftime - stime)) if settings['USE_TMP_FILES']: save_cache(logger=logger, to_save={ 'libraries': libraries, 'la_libraries': la_libraries, 'libraries_links': libraries_links, 'binaries': binaries }, temp_path=settings['DEFAULT_TMP_DIR']) logger.debug( '\tanalyse(), Found %i libraries (+%i symlinks) and %i binaries' % (len(libraries), len(libraries_links), len(binaries))) logger.info(green(' * ') + bold('Scanning files')) libs_and_bins = libraries.union(binaries) scanned_files = scan_files(libs_and_bins, settings['CMD_MAX_ARGS'], logger, searchbits) logger.warning(green(' * ') + bold('Checking dynamic linking consistency')) logger.debug( '\tanalyse(), Searching for %i libs, bins within %i libraries and links' % (len(libs_and_bins), len(libraries) + len(libraries_links))) libcheck = LibCheck(scanned_files, logger, _libs_to_check, searchbits, all_masks, masked_dirs) broken_pathes = libcheck.process_results(libcheck.search()) broken_la = extract_dependencies_from_la(la_libraries, libraries.union(libraries_links), _libs_to_check, logger) broken_pathes += broken_la if broken_pathes: logger.warning(green(' * ') + bold('Assign files to packages')) return assign_packages(broken_pathes, logger, settings) return None, None # no need to assign anything
def emerge_help(): print(bold("emerge:") + " command-line interface to the Portage system") print(bold("Usage:")) print(" " + turquoise("emerge") + " [ " + green("options") + " ] [ " + green("action") + " ] [ " + turquoise("ebuild") + " | " + turquoise("tbz2") + " | " + turquoise("file") + " | " + turquoise("@set") + " | " + turquoise("atom") + " ] [ ... ]") print(" " + turquoise("emerge") + " [ " + green("options") + " ] [ " + green("action") + " ] < " + turquoise("@system") + " | " + turquoise("@world") + " >") print(" " + turquoise("emerge") + " < " + turquoise("--sync") + " | " + turquoise("--metadata") + " | " + turquoise("--info") + " >") print(" " + turquoise("emerge") + " " + turquoise("--resume") + " [ " + green("--pretend") + " | " + green("--ask") + " | " + green("--skipfirst") + " ]") print(" " + turquoise("emerge") + " " + turquoise("--help")) print( bold("Options:") + " " + green("-") + "[" + green("abBcCdDefgGhjkKlnNoOpPqrsStuUvVw") + "]") print(" [ " + green("--color") + " < " + turquoise("y") + " | " + turquoise("n") + " > ] [ " + green("--columns") + " ]") print(" [ " + green("--complete-graph") + " ] [ " + green("--deep") + " ]") print(" [ " + green("--jobs") + " " + turquoise("JOBS") + " ] [ " + green("--keep-going") + " ] [ " + green("--load-average") + " " + turquoise("LOAD") + " ]") print(" [ " + green("--newrepo") + " ] [ " + green("--newuse") + " ] [ " + green("--noconfmem") + " ] [ " + green("--nospinner") + " ]") print(" [ " + green("--oneshot") + " ] [ " + green("--onlydeps") + " ] [ " + green("--quiet-build") + " [ " + turquoise("y") + " | " + turquoise("n") + " ] ]") print(" [ " + green("--reinstall ") + turquoise("changed-use") + " ] [ " + green("--with-bdeps") + " < " + turquoise("y") + " | " + turquoise("n") + " > ]") print( bold("Actions:") + " [ " + green("--depclean") + " | " + green("--list-sets") + " | " + green("--search") + " | " + green("--sync") + " | " + green("--version") + " ]") print() print(" For more help consult the man page.")
def unmerge(root_config, myopts, unmerge_action, unmerge_files, ldpath_mtimes, autoclean=0, clean_world=1, clean_delay=1, ordered=0, raise_on_error=0, scheduler=None, writemsg_level=portage.util.writemsg_level): if clean_world: clean_world = myopts.get('--deselect') != 'n' quiet = "--quiet" in myopts enter_invalid = '--ask-enter-invalid' in myopts settings = root_config.settings sets = root_config.sets vartree = root_config.trees["vartree"] candidate_catpkgs=[] global_unmerge=0 xterm_titles = "notitles" not in settings.features out = portage.output.EOutput() pkg_cache = {} db_keys = list(vartree.dbapi._aux_cache_keys) def _pkg(cpv): pkg = pkg_cache.get(cpv) if pkg is None: pkg = Package(built=True, cpv=cpv, installed=True, metadata=zip(db_keys, vartree.dbapi.aux_get(cpv, db_keys)), operation="uninstall", root_config=root_config, type_name="installed") pkg_cache[cpv] = pkg return pkg vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH) try: # At least the parent needs to exist for the lock file. portage.util.ensure_dirs(vdb_path) except portage.exception.PortageException: pass vdb_lock = None try: if os.access(vdb_path, os.W_OK): vdb_lock = portage.locks.lockdir(vdb_path) realsyslist = sets["system"].getAtoms() syslist = [] for x in realsyslist: mycp = portage.dep_getkey(x) if mycp in settings.getvirtuals(): providers = [] for provider in settings.getvirtuals()[mycp]: if vartree.dbapi.match(provider): providers.append(provider) if len(providers) == 1: syslist.extend(providers) else: syslist.append(mycp) mysettings = portage.config(clone=settings) if not unmerge_files: if unmerge_action == "unmerge": print() print(bold("emerge unmerge") + " can only be used with specific package names") print() return 0 else: global_unmerge = 1 localtree = vartree # process all arguments and add all # valid db entries to candidate_catpkgs if global_unmerge: if not unmerge_files: candidate_catpkgs.extend(vartree.dbapi.cp_all()) else: #we've got command-line arguments if not unmerge_files: print("\nNo packages to unmerge have been provided.\n") return 0 for x in unmerge_files: arg_parts = x.split('/') if x[0] not in [".","/"] and \ arg_parts[-1][-7:] != ".ebuild": #possible cat/pkg or dep; treat as such candidate_catpkgs.append(x) elif unmerge_action in ["prune","clean"]: print("\n!!! Prune and clean do not accept individual" + \ " ebuilds as arguments;\n skipping.\n") continue else: # it appears that the user is specifying an installed # ebuild and we're in "unmerge" mode, so it's ok. if not os.path.exists(x): print("\n!!! The path '"+x+"' doesn't exist.\n") return 0 absx = os.path.abspath(x) sp_absx = absx.split("/") if sp_absx[-1][-7:] == ".ebuild": del sp_absx[-1] absx = "/".join(sp_absx) sp_absx_len = len(sp_absx) vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH) vdb_len = len(vdb_path) sp_vdb = vdb_path.split("/") sp_vdb_len = len(sp_vdb) if not os.path.exists(absx+"/CONTENTS"): print("!!! Not a valid db dir: "+str(absx)) return 0 if sp_absx_len <= sp_vdb_len: # The Path is shorter... so it can't be inside the vdb. print(sp_absx) print(absx) print("\n!!!",x,"cannot be inside "+ \ vdb_path+"; aborting.\n") return 0 for idx in range(0,sp_vdb_len): if idx >= sp_absx_len or sp_vdb[idx] != sp_absx[idx]: print(sp_absx) print(absx) print("\n!!!", x, "is not inside "+\ vdb_path+"; aborting.\n") return 0 print("="+"/".join(sp_absx[sp_vdb_len:])) candidate_catpkgs.append( "="+"/".join(sp_absx[sp_vdb_len:])) newline="" if (not "--quiet" in myopts): newline="\n" if settings["ROOT"] != "/": writemsg_level(darkgreen(newline+ \ ">>> Using system located in ROOT tree %s\n" % \ settings["ROOT"])) if (("--pretend" in myopts) or ("--ask" in myopts)) and \ not ("--quiet" in myopts): writemsg_level(darkgreen(newline+\ ">>> These are the packages that would be unmerged:\n")) # Preservation of order is required for --depclean and --prune so # that dependencies are respected. Use all_selected to eliminate # duplicate packages since the same package may be selected by # multiple atoms. pkgmap = [] all_selected = set() for x in candidate_catpkgs: # cycle through all our candidate deps and determine # what will and will not get unmerged try: mymatch = vartree.dbapi.match(x) except portage.exception.AmbiguousPackageName as errpkgs: print("\n\n!!! The short ebuild name \"" + \ x + "\" is ambiguous. Please specify") print("!!! one of the following fully-qualified " + \ "ebuild names instead:\n") for i in errpkgs[0]: print(" " + green(i)) print() sys.exit(1) if not mymatch and x[0] not in "<>=~": mymatch = localtree.dep_match(x) if not mymatch: portage.writemsg("\n--- Couldn't find '%s' to %s.\n" % \ (x, unmerge_action), noiselevel=-1) continue pkgmap.append( {"protected": set(), "selected": set(), "omitted": set()}) mykey = len(pkgmap) - 1 if unmerge_action=="unmerge": for y in mymatch: if y not in all_selected: pkgmap[mykey]["selected"].add(y) all_selected.add(y) elif unmerge_action == "prune": if len(mymatch) == 1: continue best_version = mymatch[0] best_slot = vartree.getslot(best_version) best_counter = vartree.dbapi.cpv_counter(best_version) for mypkg in mymatch[1:]: myslot = vartree.getslot(mypkg) mycounter = vartree.dbapi.cpv_counter(mypkg) if (myslot == best_slot and mycounter > best_counter) or \ mypkg == portage.best([mypkg, best_version]): if myslot == best_slot: if mycounter < best_counter: # On slot collision, keep the one with the # highest counter since it is the most # recently installed. continue best_version = mypkg best_slot = myslot best_counter = mycounter pkgmap[mykey]["protected"].add(best_version) pkgmap[mykey]["selected"].update(mypkg for mypkg in mymatch \ if mypkg != best_version and mypkg not in all_selected) all_selected.update(pkgmap[mykey]["selected"]) else: # unmerge_action == "clean" slotmap={} for mypkg in mymatch: if unmerge_action == "clean": myslot = localtree.getslot(mypkg) else: # since we're pruning, we don't care about slots # and put all the pkgs in together myslot = 0 if myslot not in slotmap: slotmap[myslot] = {} slotmap[myslot][localtree.dbapi.cpv_counter(mypkg)] = mypkg for mypkg in vartree.dbapi.cp_list( portage.cpv_getkey(mymatch[0])): myslot = vartree.getslot(mypkg) if myslot not in slotmap: slotmap[myslot] = {} slotmap[myslot][vartree.dbapi.cpv_counter(mypkg)] = mypkg for myslot in slotmap: counterkeys = list(slotmap[myslot]) if not counterkeys: continue counterkeys.sort() pkgmap[mykey]["protected"].add( slotmap[myslot][counterkeys[-1]]) del counterkeys[-1] for counter in counterkeys[:]: mypkg = slotmap[myslot][counter] if mypkg not in mymatch: counterkeys.remove(counter) pkgmap[mykey]["protected"].add( slotmap[myslot][counter]) #be pretty and get them in order of merge: for ckey in counterkeys: mypkg = slotmap[myslot][ckey] if mypkg not in all_selected: pkgmap[mykey]["selected"].add(mypkg) all_selected.add(mypkg) # ok, now the last-merged package # is protected, and the rest are selected numselected = len(all_selected) if global_unmerge and not numselected: portage.writemsg_stdout("\n>>> No outdated packages were found on your system.\n") return 0 if not numselected: portage.writemsg_stdout( "\n>>> No packages selected for removal by " + \ unmerge_action + "\n") return 0 finally: if vdb_lock: vartree.dbapi.flush_cache() portage.locks.unlockdir(vdb_lock) from portage._sets.base import EditablePackageSet # generate a list of package sets that are directly or indirectly listed in "selected", # as there is no persistent list of "installed" sets installed_sets = ["selected"] stop = False pos = 0 while not stop: stop = True pos = len(installed_sets) for s in installed_sets[pos - 1:]: if s not in sets: continue candidates = [x[len(SETPREFIX):] for x in sets[s].getNonAtoms() if x.startswith(SETPREFIX)] if candidates: stop = False installed_sets += candidates installed_sets = [x for x in installed_sets if x not in root_config.setconfig.active] del stop, pos # we don't want to unmerge packages that are still listed in user-editable package sets # listed in "world" as they would be remerged on the next update of "world" or the # relevant package sets. unknown_sets = set() for cp in range(len(pkgmap)): for cpv in pkgmap[cp]["selected"].copy(): try: pkg = _pkg(cpv) except KeyError: # It could have been uninstalled # by a concurrent process. continue if unmerge_action != "clean" and \ root_config.root == "/" and \ portage.match_from_list( portage.const.PORTAGE_PACKAGE_ATOM, [pkg]): msg = ("Not unmerging package %s since there is no valid " + \ "reason for portage to unmerge itself.") % (pkg.cpv,) for line in textwrap.wrap(msg, 75): out.eerror(line) # adjust pkgmap so the display output is correct pkgmap[cp]["selected"].remove(cpv) all_selected.remove(cpv) pkgmap[cp]["protected"].add(cpv) continue parents = [] for s in installed_sets: # skip sets that the user requested to unmerge, and skip world # user-selected set, since the package will be removed from # that set later on. if s in root_config.setconfig.active or s == "selected": continue if s not in sets: if s in unknown_sets: continue unknown_sets.add(s) out = portage.output.EOutput() out.eerror(("Unknown set '@%s' in %s%s") % \ (s, root_config.root, portage.const.WORLD_SETS_FILE)) continue # only check instances of EditablePackageSet as other classes are generally used for # special purposes and can be ignored here (and are usually generated dynamically, so the # user can't do much about them anyway) if isinstance(sets[s], EditablePackageSet): # This is derived from a snippet of code in the # depgraph._iter_atoms_for_pkg() method. for atom in sets[s].iterAtomsForPackage(pkg): inst_matches = vartree.dbapi.match(atom) inst_matches.reverse() # descending order higher_slot = None for inst_cpv in inst_matches: try: inst_pkg = _pkg(inst_cpv) except KeyError: # It could have been uninstalled # by a concurrent process. continue if inst_pkg.cp != atom.cp: continue if pkg >= inst_pkg: # This is descending order, and we're not # interested in any versions <= pkg given. break if pkg.slot_atom != inst_pkg.slot_atom: higher_slot = inst_pkg break if higher_slot is None: parents.append(s) break if parents: print(colorize("WARN", "Package %s is going to be unmerged," % cpv)) print(colorize("WARN", "but still listed in the following package sets:")) print(" %s\n" % ", ".join(parents)) del installed_sets numselected = len(all_selected) if not numselected: writemsg_level( "\n>>> No packages selected for removal by " + \ unmerge_action + "\n") return 0 # Unmerge order only matters in some cases if not ordered: unordered = {} for d in pkgmap: selected = d["selected"] if not selected: continue cp = portage.cpv_getkey(next(iter(selected))) cp_dict = unordered.get(cp) if cp_dict is None: cp_dict = {} unordered[cp] = cp_dict for k in d: cp_dict[k] = set() for k, v in d.items(): cp_dict[k].update(v) pkgmap = [unordered[cp] for cp in sorted(unordered)] for x in range(len(pkgmap)): selected = pkgmap[x]["selected"] if not selected: continue for mytype, mylist in pkgmap[x].items(): if mytype == "selected": continue mylist.difference_update(all_selected) cp = portage.cpv_getkey(next(iter(selected))) for y in localtree.dep_match(cp): if y not in pkgmap[x]["omitted"] and \ y not in pkgmap[x]["selected"] and \ y not in pkgmap[x]["protected"] and \ y not in all_selected: pkgmap[x]["omitted"].add(y) if global_unmerge and not pkgmap[x]["selected"]: #avoid cluttering the preview printout with stuff that isn't getting unmerged continue if not (pkgmap[x]["protected"] or pkgmap[x]["omitted"]) and cp in syslist: writemsg_level(colorize("BAD","\a\n\n!!! " + \ "'%s' is part of your system profile.\n" % cp), level=logging.WARNING, noiselevel=-1) writemsg_level(colorize("WARN","\a!!! Unmerging it may " + \ "be damaging to your system.\n\n"), level=logging.WARNING, noiselevel=-1) if clean_delay and "--pretend" not in myopts and "--ask" not in myopts: countdown(int(settings["EMERGE_WARNING_DELAY"]), colorize("UNMERGE_WARN", "Press Ctrl-C to Stop")) if not quiet: writemsg_level("\n %s\n" % (bold(cp),), noiselevel=-1) else: writemsg_level(bold(cp) + ": ", noiselevel=-1) for mytype in ["selected","protected","omitted"]: if not quiet: writemsg_level((mytype + ": ").rjust(14), noiselevel=-1) if pkgmap[x][mytype]: sorted_pkgs = [portage.catpkgsplit(mypkg)[1:] for mypkg in pkgmap[x][mytype]] sorted_pkgs.sort(key=cmp_sort_key(portage.pkgcmp)) for pn, ver, rev in sorted_pkgs: if rev == "r0": myversion = ver else: myversion = ver + "-" + rev if mytype == "selected": writemsg_level( colorize("UNMERGE_WARN", myversion + " "), noiselevel=-1) else: writemsg_level( colorize("GOOD", myversion + " "), noiselevel=-1) else: writemsg_level("none ", noiselevel=-1) if not quiet: writemsg_level("\n", noiselevel=-1) if quiet: writemsg_level("\n", noiselevel=-1) writemsg_level("\nAll selected packages: %s\n" % " ".join(all_selected), noiselevel=-1) writemsg_level("\n>>> " + colorize("UNMERGE_WARN", "'Selected'") + \ " packages are slated for removal.\n") writemsg_level(">>> " + colorize("GOOD", "'Protected'") + \ " and " + colorize("GOOD", "'omitted'") + \ " packages will not be removed.\n\n") if "--pretend" in myopts: #we're done... return return 0 if "--ask" in myopts: if userquery("Would you like to unmerge these packages?", enter_invalid) == "No": # enter pretend mode for correct formatting of results myopts["--pretend"] = True print() print("Quitting.") print() return 0 #the real unmerging begins, after a short delay.... if clean_delay and not autoclean: countdown(int(settings["CLEAN_DELAY"]), ">>> Unmerging") for x in range(len(pkgmap)): for y in pkgmap[x]["selected"]: writemsg_level(">>> Unmerging "+y+"...\n", noiselevel=-1) emergelog(xterm_titles, "=== Unmerging... ("+y+")") mysplit = y.split("/") #unmerge... retval = portage.unmerge(mysplit[0], mysplit[1], settings["ROOT"], mysettings, unmerge_action not in ["clean","prune"], vartree=vartree, ldpath_mtimes=ldpath_mtimes, scheduler=scheduler) if retval != os.EX_OK: emergelog(xterm_titles, " !!! unmerge FAILURE: "+y) if raise_on_error: raise UninstallFailure(retval) sys.exit(retval) else: if clean_world and hasattr(sets["selected"], "cleanPackage")\ and hasattr(sets["selected"], "lock"): sets["selected"].lock() if hasattr(sets["selected"], "load"): sets["selected"].load() sets["selected"].cleanPackage(vartree.dbapi, y) sets["selected"].unlock() emergelog(xterm_titles, " >>> unmerge success: "+y) if clean_world and hasattr(sets["selected"], "remove")\ and hasattr(sets["selected"], "lock"): sets["selected"].lock() # load is called inside remove() for s in root_config.setconfig.active: sets["selected"].remove(SETPREFIX + s) sets["selected"].unlock() return 1
def repoman_sez(msg): print (green("RepoMan sez:"), msg)
def perform(self, qa_output): myautoadd = self._vcs_autoadd() self._vcs_deleted() changes = self.get_vcs_changed() mynew, mychanged, myremoved, no_expansion, expansion = changes # Manifests need to be regenerated after all other commits, so don't commit # them now even if they have changed. mymanifests = set() myupdates = set() for f in mychanged + mynew: if "Manifest" == os.path.basename(f): mymanifests.add(f) else: myupdates.add(f) myupdates.difference_update(myremoved) myupdates = list(myupdates) mymanifests = list(mymanifests) myheaders = [] commitmessage = self.options.commitmsg if self.options.commitmsgfile: try: f = io.open(_unicode_encode(self.options.commitmsgfile, encoding=_encodings['fs'], errors='strict'), mode='r', encoding=_encodings['content'], errors='replace') commitmessage = f.read() f.close() del f except (IOError, OSError) as e: if e.errno == errno.ENOENT: portage.writemsg("!!! File Not Found:" " --commitmsgfile='%s'\n" % self.options.commitmsgfile) else: raise if commitmessage[:9].lower() in ("cat/pkg: ", ): commitmessage = self.msg_prefix() + commitmessage[9:] if not commitmessage or not commitmessage.strip(): commitmessage = self.get_new_commit_message(qa_output) commitmessage = commitmessage.rstrip() # Update copyright for new and changed files year = time.strftime('%Y', time.gmtime()) for fn in chain(mynew, mychanged): if fn.endswith('.diff') or fn.endswith('.patch'): continue update_copyright(fn, year, pretend=self.options.pretend) myupdates, broken_changelog_manifests = self.changelogs( myupdates, mymanifests, myremoved, mychanged, myautoadd, mynew, commitmessage) lines = commitmessage.splitlines() lastline = lines[-1] if len(lines) == 1 or re.match(r'^\S+:\s', lastline) is None: commitmessage += '\n' commit_footer = self.get_commit_footer() commitmessage += commit_footer print("* %s files being committed..." % green(str(len(myupdates))), end=' ') if not self.vcs_settings.needs_keyword_expansion: # With some VCS types there's never any keyword expansion, so # there's no need to regenerate manifests and all files will be # committed in one big commit at the end. logging.debug("VCS type doesn't need keyword expansion") print() elif not self.repo_settings.repo_config.thin_manifest: logging.debug("perform: Calling thick_manifest()") self.vcs_settings.changes.thick_manifest(myupdates, myheaders, no_expansion, expansion) logging.info("myupdates: %s", myupdates) logging.info("myheaders: %s", myheaders) uq = UserQuery(self.options) if self.options.ask and uq.query('Commit changes?', True) != 'Yes': print("* aborting commit.") sys.exit(128 + signal.SIGINT) # Handle the case where committed files have keywords which # will change and need a priming commit before the Manifest # can be committed. if (myupdates or myremoved) and myheaders: self.priming_commit(myupdates, myremoved, commitmessage) # When files are removed and re-added, the cvs server will put /Attic/ # inside the $Header path. This code detects the problem and corrects it # so that the Manifest will generate correctly. See bug #169500. # Use binary mode in order to avoid potential character encoding issues. self.vcs_settings.changes.clear_attic(myheaders) if self.scanner.repolevel == 1: utilities.repoman_sez("\"You're rather crazy... " "doing the entire repository.\"\n") self.vcs_settings.changes.digest_regen(myupdates, myremoved, mymanifests, self.scanner, broken_changelog_manifests) if self.repo_settings.sign_manifests: self.sign_manifest(myupdates, myremoved, mymanifests) self.vcs_settings.changes.update_index(mymanifests, myupdates) self.add_manifest(mymanifests, myheaders, myupdates, myremoved, commitmessage) if self.options.quiet: return print() if self.vcs_settings.vcs: print("Commit complete.") else: print("repoman was too scared" " by not seeing any familiar version control file" " that he forgot to commit anything") utilities.repoman_sez( "\"If everyone were like you, I'd be out of business!\"\n") return
def _unmerge_display(root_config, myopts, unmerge_action, unmerge_files, clean_delay=1, ordered=0, writemsg_level=portage.util.writemsg_level): """ Returns a tuple of (returncode, pkgmap) where returncode is os.EX_OK if no errors occur, and 1 otherwise. """ quiet = "--quiet" in myopts settings = root_config.settings sets = root_config.sets vartree = root_config.trees["vartree"] candidate_catpkgs = [] global_unmerge = 0 out = portage.output.EOutput() pkg_cache = {} db_keys = list(vartree.dbapi._aux_cache_keys) def _pkg(cpv): pkg = pkg_cache.get(cpv) if pkg is None: pkg = Package(built=True, cpv=cpv, installed=True, metadata=zip(db_keys, vartree.dbapi.aux_get(cpv, db_keys)), operation="uninstall", root_config=root_config, type_name="installed") pkg_cache[cpv] = pkg return pkg vdb_path = os.path.join(settings["EROOT"], portage.VDB_PATH) try: # At least the parent needs to exist for the lock file. portage.util.ensure_dirs(vdb_path) except portage.exception.PortageException: pass vdb_lock = None try: if os.access(vdb_path, os.W_OK): vartree.dbapi.lock() vdb_lock = True realsyslist = [] sys_virt_map = {} for x in sets["system"].getAtoms(): for atom in expand_new_virt(vartree.dbapi, x): if not atom.blocker: realsyslist.append(atom) if atom.cp != x.cp: sys_virt_map[atom.cp] = x.cp syslist = [] for x in realsyslist: mycp = x.cp # Since Gentoo stopped using old-style virtuals in # 2011, typically it's possible to avoid getvirtuals() # calls entirely. It will not be triggered here by # new-style virtuals since those are expanded to # non-virtual atoms above by expand_new_virt(). if mycp.startswith("virtual/") and \ mycp in settings.getvirtuals(): providers = [] for provider in settings.getvirtuals()[mycp]: if vartree.dbapi.match(provider): providers.append(provider) if len(providers) == 1: syslist.extend(providers) else: syslist.append(mycp) syslist = frozenset(syslist) if not unmerge_files: if unmerge_action == "unmerge": print() print( bold("emerge unmerge") + " can only be used with specific package names") print() return 1, {} else: global_unmerge = 1 localtree = vartree # process all arguments and add all # valid db entries to candidate_catpkgs if global_unmerge: if not unmerge_files: candidate_catpkgs.extend(vartree.dbapi.cp_all()) else: #we've got command-line arguments if not unmerge_files: print("\nNo packages to unmerge have been provided.\n") return 1, {} for x in unmerge_files: arg_parts = x.split('/') if x[0] not in [".","/"] and \ arg_parts[-1][-7:] != ".ebuild": #possible cat/pkg or dep; treat as such candidate_catpkgs.append(x) elif unmerge_action in ["prune", "clean"]: print("\n!!! Prune and clean do not accept individual" + \ " ebuilds as arguments;\n skipping.\n") continue else: # it appears that the user is specifying an installed # ebuild and we're in "unmerge" mode, so it's ok. if not os.path.exists(x): print("\n!!! The path '" + x + "' doesn't exist.\n") return 1, {} absx = os.path.abspath(x) sp_absx = absx.split("/") if sp_absx[-1][-7:] == ".ebuild": del sp_absx[-1] absx = "/".join(sp_absx) sp_absx_len = len(sp_absx) vdb_path = os.path.join(settings["EROOT"], portage.VDB_PATH) sp_vdb = vdb_path.split("/") sp_vdb_len = len(sp_vdb) if not os.path.exists(absx + "/CONTENTS"): print("!!! Not a valid db dir: " + str(absx)) return 1, {} if sp_absx_len <= sp_vdb_len: # The Path is shorter... so it can't be inside the vdb. print(sp_absx) print(absx) print("\n!!!",x,"cannot be inside "+ \ vdb_path+"; aborting.\n") return 1, {} for idx in range(0, sp_vdb_len): if idx >= sp_absx_len or sp_vdb[idx] != sp_absx[idx]: print(sp_absx) print(absx) print("\n!!!", x, "is not inside "+\ vdb_path+"; aborting.\n") return 1, {} print("=" + "/".join(sp_absx[sp_vdb_len:])) candidate_catpkgs.append("=" + "/".join(sp_absx[sp_vdb_len:])) newline = "" if (not "--quiet" in myopts): newline = "\n" if settings["ROOT"] != "/": writemsg_level(darkgreen(newline+ \ ">>> Using system located in ROOT tree %s\n" % \ settings["ROOT"])) if (("--pretend" in myopts) or ("--ask" in myopts)) and \ not ("--quiet" in myopts): writemsg_level(darkgreen(newline+\ ">>> These are the packages that would be unmerged:\n")) # Preservation of order is required for --depclean and --prune so # that dependencies are respected. Use all_selected to eliminate # duplicate packages since the same package may be selected by # multiple atoms. pkgmap = [] all_selected = set() for x in candidate_catpkgs: # cycle through all our candidate deps and determine # what will and will not get unmerged try: mymatch = vartree.dbapi.match(x) except portage.exception.AmbiguousPackageName as errpkgs: print("\n\n!!! The short ebuild name \"" + \ x + "\" is ambiguous. Please specify") print("!!! one of the following fully-qualified " + \ "ebuild names instead:\n") for i in errpkgs[0]: print(" " + green(i)) print() sys.exit(1) if not mymatch and x[0] not in "<>=~": mymatch = localtree.dep_match(x) if not mymatch: portage.writemsg("\n--- Couldn't find '%s' to %s.\n" % \ (x.replace("null/", ""), unmerge_action), noiselevel=-1) continue pkgmap.append({ "protected": set(), "selected": set(), "omitted": set() }) mykey = len(pkgmap) - 1 if unmerge_action == "unmerge": for y in mymatch: if y not in all_selected: pkgmap[mykey]["selected"].add(y) all_selected.add(y) elif unmerge_action == "prune": if len(mymatch) == 1: continue best_version = mymatch[0] best_slot = vartree.getslot(best_version) best_counter = vartree.dbapi.cpv_counter(best_version) for mypkg in mymatch[1:]: myslot = vartree.getslot(mypkg) mycounter = vartree.dbapi.cpv_counter(mypkg) if (myslot == best_slot and mycounter > best_counter) or \ mypkg == portage.best([mypkg, best_version]): if myslot == best_slot: if mycounter < best_counter: # On slot collision, keep the one with the # highest counter since it is the most # recently installed. continue best_version = mypkg best_slot = myslot best_counter = mycounter pkgmap[mykey]["protected"].add(best_version) pkgmap[mykey]["selected"].update(mypkg for mypkg in mymatch \ if mypkg != best_version and mypkg not in all_selected) all_selected.update(pkgmap[mykey]["selected"]) else: # unmerge_action == "clean" slotmap = {} for mypkg in mymatch: if unmerge_action == "clean": myslot = localtree.getslot(mypkg) else: # since we're pruning, we don't care about slots # and put all the pkgs in together myslot = 0 if myslot not in slotmap: slotmap[myslot] = {} slotmap[myslot][localtree.dbapi.cpv_counter(mypkg)] = mypkg for mypkg in vartree.dbapi.cp_list( portage.cpv_getkey(mymatch[0])): myslot = vartree.getslot(mypkg) if myslot not in slotmap: slotmap[myslot] = {} slotmap[myslot][vartree.dbapi.cpv_counter(mypkg)] = mypkg for myslot in slotmap: counterkeys = list(slotmap[myslot]) if not counterkeys: continue counterkeys.sort() pkgmap[mykey]["protected"].add( slotmap[myslot][counterkeys[-1]]) del counterkeys[-1] for counter in counterkeys[:]: mypkg = slotmap[myslot][counter] if mypkg not in mymatch: counterkeys.remove(counter) pkgmap[mykey]["protected"].add( slotmap[myslot][counter]) #be pretty and get them in order of merge: for ckey in counterkeys: mypkg = slotmap[myslot][ckey] if mypkg not in all_selected: pkgmap[mykey]["selected"].add(mypkg) all_selected.add(mypkg) # ok, now the last-merged package # is protected, and the rest are selected numselected = len(all_selected) if global_unmerge and not numselected: portage.writemsg_stdout( "\n>>> No outdated packages were found on your system.\n") return 1, {} if not numselected: portage.writemsg_stdout( "\n>>> No packages selected for removal by " + \ unmerge_action + "\n") return 1, {} finally: if vdb_lock: vartree.dbapi.flush_cache() vartree.dbapi.unlock() # generate a list of package sets that are directly or indirectly listed in "selected", # as there is no persistent list of "installed" sets installed_sets = ["selected"] stop = False pos = 0 while not stop: stop = True pos = len(installed_sets) for s in installed_sets[pos - 1:]: if s not in sets: continue candidates = [ x[len(SETPREFIX):] for x in sets[s].getNonAtoms() if x.startswith(SETPREFIX) ] if candidates: stop = False installed_sets += candidates installed_sets = [ x for x in installed_sets if x not in root_config.setconfig.active ] del stop, pos # we don't want to unmerge packages that are still listed in user-editable package sets # listed in "world" as they would be remerged on the next update of "world" or the # relevant package sets. unknown_sets = set() for cp in range(len(pkgmap)): for cpv in pkgmap[cp]["selected"].copy(): try: pkg = _pkg(cpv) except KeyError: # It could have been uninstalled # by a concurrent process. continue if unmerge_action != "clean" and root_config.root == "/": skip_pkg = False if portage.match_from_list(portage.const.PORTAGE_PACKAGE_ATOM, [pkg]): msg = ( "Not unmerging package %s since there is no valid reason " "for Portage to unmerge itself.") % (pkg.cpv, ) skip_pkg = True elif vartree.dbapi._dblink(cpv).isowner( portage._python_interpreter): msg = ( "Not unmerging package %s since there is no valid reason " "for Portage to unmerge currently used Python interpreter." ) % (pkg.cpv, ) skip_pkg = True if skip_pkg: for line in textwrap.wrap(msg, 75): out.eerror(line) # adjust pkgmap so the display output is correct pkgmap[cp]["selected"].remove(cpv) all_selected.remove(cpv) pkgmap[cp]["protected"].add(cpv) continue parents = [] for s in installed_sets: # skip sets that the user requested to unmerge, and skip world # user-selected set, since the package will be removed from # that set later on. if s in root_config.setconfig.active or s == "selected": continue if s not in sets: if s in unknown_sets: continue unknown_sets.add(s) out = portage.output.EOutput() out.eerror(("Unknown set '@%s' in %s%s") % \ (s, root_config.settings['EROOT'], portage.const.WORLD_SETS_FILE)) continue # only check instances of EditablePackageSet as other classes are generally used for # special purposes and can be ignored here (and are usually generated dynamically, so the # user can't do much about them anyway) if isinstance(sets[s], EditablePackageSet): # This is derived from a snippet of code in the # depgraph._iter_atoms_for_pkg() method. for atom in sets[s].iterAtomsForPackage(pkg): inst_matches = vartree.dbapi.match(atom) inst_matches.reverse() # descending order higher_slot = None for inst_cpv in inst_matches: try: inst_pkg = _pkg(inst_cpv) except KeyError: # It could have been uninstalled # by a concurrent process. continue if inst_pkg.cp != atom.cp: continue if pkg >= inst_pkg: # This is descending order, and we're not # interested in any versions <= pkg given. break if pkg.slot_atom != inst_pkg.slot_atom: higher_slot = inst_pkg break if higher_slot is None: parents.append(s) break if parents: print( colorize("WARN", "Package %s is going to be unmerged," % cpv)) print( colorize( "WARN", "but still listed in the following package sets:")) print(" %s\n" % ", ".join(parents)) del installed_sets numselected = len(all_selected) if not numselected: writemsg_level( "\n>>> No packages selected for removal by " + \ unmerge_action + "\n") return 1, {} # Unmerge order only matters in some cases if not ordered: unordered = {} for d in pkgmap: selected = d["selected"] if not selected: continue cp = portage.cpv_getkey(next(iter(selected))) cp_dict = unordered.get(cp) if cp_dict is None: cp_dict = {} unordered[cp] = cp_dict for k in d: cp_dict[k] = set() for k, v in d.items(): cp_dict[k].update(v) pkgmap = [unordered[cp] for cp in sorted(unordered)] for x in range(len(pkgmap)): selected = pkgmap[x]["selected"] if not selected: continue for mytype, mylist in pkgmap[x].items(): if mytype == "selected": continue mylist.difference_update(all_selected) cp = portage.cpv_getkey(next(iter(selected))) for y in localtree.dep_match(cp): if y not in pkgmap[x]["omitted"] and \ y not in pkgmap[x]["selected"] and \ y not in pkgmap[x]["protected"] and \ y not in all_selected: pkgmap[x]["omitted"].add(y) if global_unmerge and not pkgmap[x]["selected"]: #avoid cluttering the preview printout with stuff that isn't getting unmerged continue if not (pkgmap[x]["protected"] or pkgmap[x]["omitted"]) and cp in syslist: virt_cp = sys_virt_map.get(cp) if virt_cp is None: cp_info = "'%s'" % (cp, ) else: cp_info = "'%s' (%s)" % (cp, virt_cp) writemsg_level(colorize("BAD","\n\n!!! " + \ "%s is part of your system profile.\n" % (cp_info,)), level=logging.WARNING, noiselevel=-1) writemsg_level(colorize("WARN","!!! Unmerging it may " + \ "be damaging to your system.\n\n"), level=logging.WARNING, noiselevel=-1) if not quiet: writemsg_level("\n %s\n" % (bold(cp), ), noiselevel=-1) else: writemsg_level(bold(cp) + ": ", noiselevel=-1) for mytype in ["selected", "protected", "omitted"]: if not quiet: writemsg_level((mytype + ": ").rjust(14), noiselevel=-1) if pkgmap[x][mytype]: sorted_pkgs = [] for mypkg in pkgmap[x][mytype]: try: sorted_pkgs.append(mypkg.cpv) except AttributeError: sorted_pkgs.append(_pkg_str(mypkg)) sorted_pkgs.sort(key=cpv_sort_key()) for mypkg in sorted_pkgs: if mytype == "selected": writemsg_level(colorize("UNMERGE_WARN", mypkg.version + " "), noiselevel=-1) else: writemsg_level(colorize("GOOD", mypkg.version + " "), noiselevel=-1) else: writemsg_level("none ", noiselevel=-1) if not quiet: writemsg_level("\n", noiselevel=-1) if quiet: writemsg_level("\n", noiselevel=-1) writemsg_level("\nAll selected packages: %s\n" % " ".join(all_selected), noiselevel=-1) writemsg_level("\n>>> " + colorize("UNMERGE_WARN", "'Selected'") + \ " packages are slated for removal.\n") writemsg_level(">>> " + colorize("GOOD", "'Protected'") + \ " and " + colorize("GOOD", "'omitted'") + \ " packages will not be removed.\n\n") return os.EX_OK, pkgmap
def __init__(self, repo_settings, myreporoot, config_root, options, vcs_settings, mydir, env): '''Class __init__''' self.repo_settings = repo_settings self.config_root = config_root self.options = options self.vcs_settings = vcs_settings self.env = env # Repoman sets it's own ACCEPT_KEYWORDS and we don't want it to # behave incrementally. self.repoman_incrementals = tuple(x for x in portage.const.INCREMENTALS if x != 'ACCEPT_KEYWORDS') self.categories = [] for path in self.repo_settings.repo_config.eclass_db.porttrees: self.categories.extend( portage.util.grabfile( os.path.join(path, 'profiles', 'categories'))) self.repo_settings.repoman_settings.categories = frozenset( portage.util.stack_lists([self.categories], incremental=1)) self.categories = self.repo_settings.repoman_settings.categories metadata_dtd = None for path in reversed( self.repo_settings.repo_config.eclass_db.porttrees): path = os.path.join(path, 'metadata/dtd/metadata.dtd') if os.path.exists(path): metadata_dtd = path break self.portdb = repo_settings.portdb self.portdb.settings = self.repo_settings.repoman_settings # We really only need to cache the metadata that's necessary for visibility # filtering. Anything else can be discarded to reduce memory consumption. if self.options.mode != "manifest" and self.options.digest != "y": # Don't do this when generating manifests, since that uses # additional keys if spawn_nofetch is called (RESTRICT and # DEFINED_PHASES). self.portdb._aux_cache_keys.clear() self.portdb._aux_cache_keys.update( ["EAPI", "IUSE", "KEYWORDS", "repository", "SLOT"]) self.reposplit = myreporoot.split(os.path.sep) self.repolevel = len(self.reposplit) if self.options.mode == 'commit': repochecks.commit_check(self.repolevel, self.reposplit) repochecks.conflict_check(self.vcs_settings, self.options) # Make startdir relative to the canonical repodir, so that we can pass # it to digestgen and it won't have to be canonicalized again. if self.repolevel == 1: startdir = self.repo_settings.repodir else: startdir = normalize_path(mydir) startdir = os.path.join( self.repo_settings.repodir, *startdir.split(os.sep)[-2 - self.repolevel + 3:]) # get lists of valid keywords, licenses, and use new_data = repo_metadata(self.portdb, self.repo_settings.repoman_settings) kwlist, liclist, uselist, profile_list, \ global_pmaskdict, liclist_deprecated = new_data self.repo_metadata = { 'kwlist': kwlist, 'liclist': liclist, 'uselist': uselist, 'profile_list': profile_list, 'pmaskdict': global_pmaskdict, 'lic_deprecated': liclist_deprecated, } self.repo_settings.repoman_settings['PORTAGE_ARCHLIST'] = ' '.join( sorted(kwlist)) self.repo_settings.repoman_settings.backup_changes('PORTAGE_ARCHLIST') self.profiles = setup_profile(profile_list) check_profiles(self.profiles, self.repo_settings.repoman_settings.archlist()) scanlist = scan(self.repolevel, self.reposplit, startdir, self.categories, self.repo_settings) self.dev_keywords = dev_profile_keywords(self.profiles) self.qatracker = QATracker() if self.options.echangelog is None and self.repo_settings.repo_config.update_changelog: self.options.echangelog = 'y' if self.vcs_settings.vcs is None: self.options.echangelog = 'n' self.check = {} # The --echangelog option causes automatic ChangeLog generation, # which invalidates changelog.ebuildadded and changelog.missing # checks. # Note: Some don't use ChangeLogs in distributed SCMs. # It will be generated on server side from scm log, # before package moves to the rsync server. # This is needed because they try to avoid merge collisions. # Gentoo's Council decided to always use the ChangeLog file. # TODO: shouldn't this just be switched on the repo, iso the VCS? is_echangelog_enabled = self.options.echangelog in ('y', 'force') self.vcs_settings.vcs_is_cvs_or_svn = self.vcs_settings.vcs in ('cvs', 'svn') self.check[ 'changelog'] = not is_echangelog_enabled and self.vcs_settings.vcs_is_cvs_or_svn if self.options.mode == "manifest": pass elif self.options.pretend: print(green("\nRepoMan does a once-over of the neighborhood...")) else: print(green("\nRepoMan scours the neighborhood...")) self.changed = Changes(self.options) # bypass unneeded VCS operations if not needed if (self.options.if_modified == "y" or self.options.mode not in ("manifest", "manifest-check")): self.changed.scan(self.vcs_settings) self.have = { 'pmasked': False, 'dev_keywords': False, } # NOTE: match-all caches are not shared due to potential # differences between profiles in _get_implicit_iuse. self.caches = { 'arch': {}, 'arch_xmatch': {}, 'shared_xmatch': { "cp-list": {} }, } self.include_arches = None if self.options.include_arches: self.include_arches = set() self.include_arches.update( *[x.split() for x in self.options.include_arches]) # Disable the "ebuild.notadded" check when not in commit mode and # running `svn status` in every package dir will be too expensive. self.check['ebuild_notadded'] = not \ (self.vcs_settings.vcs == "svn" and self.repolevel < 3 and self.options.mode != "commit") self.effective_scanlist = scanlist if self.options.if_modified == "y": self.effective_scanlist = sorted( vcs_files_to_cps( chain(self.changed.changed, self.changed.new, self.changed.removed), self.repolevel, self.reposplit, self.categories)) self.live_eclasses = portage.const.LIVE_ECLASSES # initialize our checks classes here before the big xpkg loop self.manifester = Manifests(self.options, self.qatracker, self.repo_settings.repoman_settings) self.is_ebuild = IsEbuild(self.repo_settings.repoman_settings, self.repo_settings, self.portdb, self.qatracker) self.filescheck = FileChecks(self.qatracker, self.repo_settings.repoman_settings, self.repo_settings, self.portdb, self.vcs_settings) self.status_check = VCSStatus(self.vcs_settings, self.qatracker) self.fetchcheck = FetchChecks(self.qatracker, self.repo_settings, self.portdb, self.vcs_settings) self.pkgmeta = PkgMetadata(self.options, self.qatracker, self.repo_settings.repoman_settings, metadata_dtd=metadata_dtd) self.thirdparty = ThirdPartyMirrors( self.repo_settings.repoman_settings, self.qatracker) self.use_flag_checks = USEFlagChecks(self.qatracker, uselist) self.keywordcheck = KeywordChecks(self.qatracker, self.options) self.liveeclasscheck = LiveEclassChecks(self.qatracker) self.rubyeclasscheck = RubyEclassChecks(self.qatracker) self.eapicheck = EAPIChecks(self.qatracker, self.repo_settings) self.descriptioncheck = DescriptionChecks(self.qatracker) self.licensecheck = LicenseChecks(self.qatracker, liclist, liclist_deprecated) self.restrictcheck = RestrictChecks(self.qatracker)
def is_masked(ebuild): if get_masking_status(ebuild): logger.warning(' !!! ' + red('All ebuilds that could satisfy: ') + green(ebuild) + red(' have been masked')) return True return False
def output(self): """Outputs the results of the search.""" msg = [] msg.append("\b\b \n[ Results for search key : " + \ bold(self.searchkey) + " ]\n") msg.append("[ Applications found : " + \ bold(str(self.mlen)) + " ]\n\n") vardb = self.vartree.dbapi for mtype in self.matches: for match,masked in self.matches[mtype]: full_package = None if mtype == "pkg": catpack = match full_package = self.portdb.xmatch( "bestmatch-visible", match) if not full_package: #no match found; we don't want to query description masked=1 full_package = portage.best( self.portdb.xmatch("match-all",match)) elif mtype == "desc": full_package = match match = portage.cpv_getkey(match) elif mtype == "set": msg.append(green("*") + " " + bold(match) + "\n") if self.verbose: msg.append(" " + darkgreen("Description:") + \ " " + \ self.sdict[match].getMetadata("DESCRIPTION") \ + "\n\n") writemsg_stdout(''.join(msg), noiselevel=-1) if full_package: try: desc, homepage, license = self.portdb.aux_get( full_package, ["DESCRIPTION","HOMEPAGE","LICENSE"]) except KeyError: msg.append("emerge: search: aux_get() failed, skipping\n") continue if masked: msg.append(green("*") + " " + \ white(match) + " " + red("[ Masked ]") + "\n") else: msg.append(green("*") + " " + bold(match) + "\n") myversion = self.getVersion(full_package, search.VERSION_RELEASE) mysum = [0,0] file_size_str = None mycat = match.split("/")[0] mypkg = match.split("/")[1] mycpv = match + "-" + myversion myebuild = self.portdb.findname(mycpv) if myebuild: pkgdir = os.path.dirname(myebuild) from portage import manifest mf = manifest.Manifest( pkgdir, self.settings["DISTDIR"]) try: uri_map = self.portdb.getFetchMap(mycpv) except portage.exception.InvalidDependString as e: file_size_str = "Unknown (%s)" % (e,) del e else: try: mysum[0] = mf.getDistfilesSize(uri_map) except KeyError as e: file_size_str = "Unknown (missing " + \ "digest for %s)" % (e,) del e available = False for db in self._dbs: if db is not vardb and \ db.cpv_exists(mycpv): available = True if not myebuild and hasattr(db, "bintree"): myebuild = db.bintree.getname(mycpv) try: mysum[0] = os.stat(myebuild).st_size except OSError: myebuild = None break if myebuild and file_size_str is None: mystr = str(mysum[0] // 1024) mycount = len(mystr) while (mycount > 3): mycount -= 3 mystr = mystr[:mycount] + "," + mystr[mycount:] file_size_str = mystr + " kB" if self.verbose: if available: msg.append(" %s %s\n" % \ (darkgreen("Latest version available:"), myversion)) msg.append(" %s\n" % \ self.getInstallationStatus(mycat+'/'+mypkg)) if myebuild: msg.append(" %s %s\n" % \ (darkgreen("Size of files:"), file_size_str)) msg.append(" " + darkgreen("Homepage:") + \ " " + homepage + "\n") msg.append(" " + darkgreen("Description:") \ + " " + desc + "\n") msg.append(" " + darkgreen("License:") + \ " " + license + "\n\n") writemsg_stdout(''.join(msg), noiselevel=-1)
def emsg(msg, config): if config['showtitles']: xtermTitle(msg) if config['verbose'] == -1: return print(green(" *"), msg)
def _create_use_string( conf, name, cur_iuse, iuse_forced, cur_use, old_iuse, old_use, is_new, feature_flags, reinst_flags ): if not conf.print_use_string: return "" enabled = [] if conf.alphabetical: disabled = enabled removed = enabled else: disabled = [] removed = [] cur_iuse = set(cur_iuse) enabled_flags = cur_iuse.intersection(cur_use) removed_iuse = set(old_iuse).difference(cur_iuse) any_iuse = cur_iuse.union(old_iuse) any_iuse = list(any_iuse) any_iuse.sort() for flag in any_iuse: flag_str = None isEnabled = False reinst_flag = reinst_flags and flag in reinst_flags if flag in enabled_flags: isEnabled = True if is_new or flag in old_use and (conf.all_flags or reinst_flag): flag_str = red(flag) elif flag not in old_iuse: flag_str = yellow(flag) + "%*" elif flag not in old_use: flag_str = green(flag) + "*" elif flag in removed_iuse: if conf.all_flags or reinst_flag: flag_str = yellow("-" + flag) + "%" if flag in old_use: flag_str += "*" flag_str = "(" + flag_str + ")" removed.append(flag_str) continue else: if is_new or flag in old_iuse and flag not in old_use and (conf.all_flags or reinst_flag): flag_str = blue("-" + flag) elif flag not in old_iuse: flag_str = yellow("-" + flag) if flag not in iuse_forced: flag_str += "%" elif flag in old_use: flag_str = green("-" + flag) + "*" if flag_str: if flag in feature_flags: flag_str = "{" + flag_str + "}" elif flag in iuse_forced: flag_str = "(" + flag_str + ")" if isEnabled: enabled.append(flag_str) else: disabled.append(flag_str) if conf.alphabetical: ret = " ".join(enabled) else: ret = " ".join(enabled + disabled + removed) if ret: ret = '%s="%s" ' % (name, ret) return ret
def output(self): """Outputs the results of the search.""" class msg: @staticmethod def append(msg): writemsg_stdout(msg, noiselevel=-1) msg.append("\b\b \n[ Results for search key : " + \ bold(self.searchkey) + " ]\n") vardb = self._vardb metadata_keys = set(Package.metadata_keys) metadata_keys.update(["DESCRIPTION", "HOMEPAGE", "LICENSE", "SRC_URI"]) metadata_keys = tuple(metadata_keys) if self._results_specified: # Handle results added via addCP addCP_matches = [] for mytype, matches in self.matches.items(): for match in matches: addCP_matches.append((mytype, match)) iterator = iter(addCP_matches) else: # Do a normal search iterator = self._iter_search() for mtype, match in iterator: self.mlen += 1 masked = False full_package = None if mtype in ("pkg", "desc"): full_package = self._xmatch("bestmatch-visible", match) if not full_package: masked = True full_package = self._xmatch("match-all", match) if full_package: full_package = full_package[-1] elif mtype == "set": msg.append(green("*") + " " + bold(match) + "\n") if self.verbose: msg.append(" " + darkgreen("Description:") + \ " " + \ self.sdict[match].getMetadata("DESCRIPTION") \ + "\n\n") if full_package: try: metadata = dict( zip(metadata_keys, self._aux_get(full_package, metadata_keys))) except KeyError: self._aux_get_error(full_package) continue desc = metadata["DESCRIPTION"] homepage = metadata["HOMEPAGE"] license = metadata["LICENSE"] # pylint: disable=redefined-builtin if masked: msg.append(green("*") + " " + \ bold(match) + " " + red("[ Masked ]") + "\n") else: msg.append(green("*") + " " + bold(match) + "\n") myversion = self.getVersion(full_package, search.VERSION_RELEASE) mysum = [0, 0] file_size_str = None mycat = match.split("/")[0] mypkg = match.split("/")[1] mycpv = match + "-" + myversion myebuild = self._findname(mycpv) if myebuild: pkg = Package(built=False, cpv=mycpv, installed=False, metadata=metadata, root_config=self.root_config, type_name="ebuild") pkgdir = os.path.dirname(myebuild) mf = self.settings.repositories.get_repo_for_location( os.path.dirname(os.path.dirname(pkgdir))) mf = mf.load_manifest(pkgdir, self.settings["DISTDIR"]) try: uri_map = _parse_uri_map(mycpv, metadata, use=pkg.use.enabled) except portage.exception.InvalidDependString as e: file_size_str = "Unknown (%s)" % (e, ) del e else: try: mysum[0] = mf.getDistfilesSize(uri_map) except KeyError as e: file_size_str = "Unknown (missing " + \ "digest for %s)" % (e,) del e available = False for db in self._dbs: if db is not vardb and \ db.cpv_exists(mycpv): available = True if not myebuild and hasattr(db, "bintree"): myebuild = db.bintree.getname(mycpv) try: mysum[0] = os.stat(myebuild).st_size except OSError: myebuild = None break if myebuild and file_size_str is None: file_size_str = localized_size(mysum[0]) if self.verbose: if available: msg.append(" %s %s\n" % \ (darkgreen("Latest version available:"), myversion)) msg.append(" %s\n" % \ self.getInstallationStatus(mycat+'/'+mypkg)) if myebuild: msg.append(" %s %s\n" % \ (darkgreen("Size of files:"), file_size_str)) msg.append(" " + darkgreen("Homepage:") + \ " " + homepage + "\n") msg.append(" " + darkgreen("Description:") \ + " " + desc + "\n") msg.append(" " + darkgreen("License:") + \ " " + license + "\n\n") msg.append("[ Applications found : " + \ bold(str(self.mlen)) + " ]\n\n") # This method can be called multiple times, so # reset the match count for the next call. Don't # reset it at the beginning of this method, since # that would lose modfications from the addCP # method. self.mlen = 0
def detect_conflicts(options): """Determine if the checkout has problems like cvs conflicts. If you want more vcs support here just keep adding if blocks... This could be better. TODO(antarus): Also this should probably not call sys.exit() as repoman is run on >1 packages and one failure should not cause subsequent packages to fail. Args: vcs - A string identifying the version control system in use Returns: boolean (calls sys.exit on fatal problems) """ cmd = "svn status -u 2>&1 | egrep -v '^. +.*/digest-[^/]+' | head -n-1" msg = "Performing a %s with a little magic grep to check for updates." % green( "svn status -u") logging.info(msg) # Use Popen instead of getstatusoutput(), in order to avoid # unicode handling problems (see bug #310789). args = [BASH_BINARY, "-c", cmd] args = [_unicode_encode(x) for x in args] proc = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) out = _unicode_decode(proc.communicate()[0]) proc.wait() mylines = out.splitlines() myupdates = [] for line in mylines: if not line: continue # [ ] Unmodified (SVN) [U] Updates [P] Patches # [M] Modified [A] Added [R] Removed / Replaced # [D] Deleted if line[0] not in " UPMARD": # Stray Manifest is fine, we will readd it anyway. if line[0] == "?" and line[1:].lstrip() == "Manifest": continue logging.error( red("!!! Please fix the following issues reported " "from cvs: %s" % green("(U,P,M,A,R,D are ok)"))) logging.error( red("!!! Note: This is a pretend/no-modify pass...")) logging.error(out) sys.exit(1) elif line[8] == "*": myupdates.append(line[9:].lstrip(" 1234567890")) if myupdates: logging.info(green("Fetching trivial updates...")) if options.pretend: logging.info("(svn update " + " ".join(myupdates) + ")") retval = os.EX_OK else: retval = os.system("svn update " + " ".join(myupdates)) if retval != os.EX_OK: logging.fatal("!!! svn exited with an error. Terminating.") sys.exit(retval) return False
def __init__(self, repo_settings, myreporoot, config_root, options, vcs_settings, mydir, env): '''Class __init__''' self.repo_settings = repo_settings self.config_root = config_root self.options = options self.vcs_settings = vcs_settings self.env = env # Repoman sets it's own ACCEPT_KEYWORDS and we don't want it to # behave incrementally. self.repoman_incrementals = tuple(x for x in portage.const.INCREMENTALS if x != 'ACCEPT_KEYWORDS') self.categories = [] for path in self.repo_settings.repo_config.eclass_db.porttrees: self.categories.extend( portage.util.grabfile( os.path.join(path, 'profiles', 'categories'))) self.repo_settings.repoman_settings.categories = frozenset( portage.util.stack_lists([self.categories], incremental=1)) self.categories = self.repo_settings.repoman_settings.categories self.portdb = repo_settings.portdb self.portdb.settings = self.repo_settings.repoman_settings digest_only = self.options.mode != 'manifest-check' \ and self.options.digest == 'y' self.generate_manifest = digest_only or self.options.mode in \ ("manifest", 'commit', 'fix') # We really only need to cache the metadata that's necessary for visibility # filtering. Anything else can be discarded to reduce memory consumption. if not self.generate_manifest: # Don't do this when generating manifests, since that uses # additional keys if spawn_nofetch is called (RESTRICT and # DEFINED_PHASES). self.portdb._aux_cache_keys.clear() self.portdb._aux_cache_keys.update( ["EAPI", "IUSE", "KEYWORDS", "repository", "SLOT"]) self.reposplit = myreporoot.split(os.path.sep) self.repolevel = len(self.reposplit) if self.options.mode == 'commit': repochecks.commit_check(self.repolevel, self.reposplit) repochecks.conflict_check(self.vcs_settings, self.options) # Make startdir relative to the canonical repodir, so that we can pass # it to digestgen and it won't have to be canonicalized again. if self.repolevel == 1: startdir = self.repo_settings.repodir else: startdir = normalize_path(mydir) startdir = os.path.join( self.repo_settings.repodir, *startdir.split(os.sep)[-2 - self.repolevel + 3:]) # get lists of valid keywords, licenses, and use new_data = repo_metadata(self.portdb, self.repo_settings.repoman_settings) kwlist, liclist, uselist, profile_list, \ global_pmaskdict, liclist_deprecated = new_data self.repo_metadata = { 'kwlist': kwlist, 'liclist': liclist, 'uselist': uselist, 'profile_list': profile_list, 'pmaskdict': global_pmaskdict, 'lic_deprecated': liclist_deprecated, } self.repo_settings.repoman_settings['PORTAGE_ARCHLIST'] = ' '.join( sorted(kwlist)) self.repo_settings.repoman_settings.backup_changes('PORTAGE_ARCHLIST') profiles = setup_profile(profile_list) check_profiles(profiles, self.repo_settings.repoman_settings.archlist()) scanlist = scan(self.repolevel, self.reposplit, startdir, self.categories, self.repo_settings) self.dev_keywords = dev_profile_keywords(profiles) self.qatracker = self.vcs_settings.qatracker if self.options.echangelog is None and self.repo_settings.repo_config.update_changelog: self.options.echangelog = 'y' if self.vcs_settings.vcs is None: self.options.echangelog = 'n' # Initialize the ModuleConfig class here # TODO Add layout.conf masters repository.yml config to the list to load/stack self.moduleconfig = ModuleConfig( self.repo_settings.masters_list, self.repo_settings.repoman_settings.valid_versions, repository_modules=self.options.experimental_repository_modules == 'y') checks = {} # The --echangelog option causes automatic ChangeLog generation, # which invalidates changelog.ebuildadded and changelog.missing # checks. # Note: Some don't use ChangeLogs in distributed SCMs. # It will be generated on server side from scm log, # before package moves to the rsync server. # This is needed because they try to avoid merge collisions. # Gentoo's Council decided to always use the ChangeLog file. # TODO: shouldn't this just be switched on the repo, iso the VCS? is_echangelog_enabled = self.options.echangelog in ('y', 'force') self.vcs_settings.vcs_is_cvs_or_svn = self.vcs_settings.vcs in ('cvs', 'svn') checks[ 'changelog'] = not is_echangelog_enabled and self.vcs_settings.vcs_is_cvs_or_svn if self.options.mode == "manifest" or self.options.quiet: pass elif self.options.pretend: print(green("\nRepoMan does a once-over of the neighborhood...")) else: print(green("\nRepoMan scours the neighborhood...")) self.changed = self.vcs_settings.changes # bypass unneeded VCS operations if not needed if (self.options.if_modified == "y" or self.options.mode not in ("manifest", "manifest-check")): self.changed.scan() self.have = { 'pmasked': False, 'dev_keywords': False, } # NOTE: match-all caches are not shared due to potential # differences between profiles in _get_implicit_iuse. self.caches = { 'arch': {}, 'arch_xmatch': {}, 'shared_xmatch': { "cp-list": {} }, } self.include_arches = None if self.options.include_arches: self.include_arches = set() self.include_arches.update( *[x.split() for x in self.options.include_arches]) # Disable the "self.modules['Ebuild'].notadded" check when not in commit mode and # running `svn status` in every package dir will be too expensive. checks['ebuild_notadded'] = not \ (self.vcs_settings.vcs == "svn" and self.repolevel < 3 and self.options.mode != "commit") self.effective_scanlist = scanlist if self.options.if_modified == "y": self.effective_scanlist = sorted( vcs_files_to_cps( chain(self.changed.changed, self.changed.new, self.changed.removed), self.repo_settings.repodir, self.repolevel, self.reposplit, self.categories)) # Create our kwargs dict here to initialize the plugins with self.kwargs = { "repo_settings": self.repo_settings, "portdb": self.portdb, "qatracker": self.qatracker, "vcs_settings": self.vcs_settings, "options": self.options, "metadata_xsd": get_metadata_xsd(self.repo_settings), "uselist": uselist, "checks": checks, "repo_metadata": self.repo_metadata, "profiles": profiles, "include_arches": self.include_arches, "caches": self.caches, "repoman_incrementals": self.repoman_incrementals, "env": self.env, "have": self.have, "dev_keywords": self.dev_keywords, "linechecks": self.moduleconfig.linechecks, } # initialize the plugin checks here self.modules = {} self._ext_futures = {} self.pkg_level_futures = None