def _expand_parent_colon(self, parentsFile, parentPath, repo_loc, repositories): colon = parentPath.find(":") if colon == -1: return parentPath if colon == 0: if repo_loc is None: raise ParseError( _("Parent '%s' not found: '%s'") % \ (parentPath, parentsFile)) else: parentPath = normalize_path(os.path.join( repo_loc, 'profiles', parentPath[colon+1:])) else: p_repo_name = parentPath[:colon] try: p_repo_loc = repositories.get_location_for_name(p_repo_name) except KeyError: raise ParseError( _("Parent '%s' not found: '%s'") % \ (parentPath, parentsFile)) else: parentPath = normalize_path(os.path.join( p_repo_loc, 'profiles', parentPath[colon+1:])) return parentPath
def make_herd_base(filename): herd_to_emails = dict() all_emails = set() try: xml_tree = xml.etree.ElementTree.parse( _unicode_encode(filename, encoding=_encodings['fs'], errors='strict'), parser=xml.etree.ElementTree.XMLParser(target=_HerdsTreeBuilder())) except ExpatError as e: raise ParseError("metadata.xml: " + str(e)) except EnvironmentError as e: func_call = "open('%s')" % filename if e.errno == errno.EACCES: raise PermissionDenied(func_call) elif e.errno == errno.ENOENT: raise FileNotFound(filename) raise herds = xml_tree.findall('herd') for h in herds: _herd_name = h.find('name') if _herd_name is None: continue herd_name = _herd_name.text.strip() del _herd_name maintainers = h.findall('maintainer') herd_emails = set() for m in maintainers: _m_email = m.find('email') if _m_email is None: continue m_email = _m_email.text.strip() herd_emails.add(m_email) all_emails.add(m_email) herd_to_emails[herd_name] = herd_emails return HerdBase(herd_to_emails, all_emails)
def _addProfile(self, currentPath, repositories, known_repos): current_abs_path = os.path.abspath(currentPath) allow_directories = True allow_parent_colon = True repo_loc = None compat_mode = False eapi_file = os.path.join(currentPath, "eapi") eapi = "0" f = None try: f = io.open(_unicode_encode(eapi_file, encoding=_encodings['fs'], errors='strict'), mode='r', encoding=_encodings['content'], errors='replace') eapi = f.readline().strip() except IOError: pass else: if not eapi_is_supported(eapi): raise ParseError(_( "Profile contains unsupported " "EAPI '%s': '%s'") % \ (eapi, os.path.realpath(eapi_file),)) finally: if f is not None: f.close() intersecting_repos = [x for x in known_repos if current_abs_path.startswith(x[0])] if intersecting_repos: # protect against nested repositories. Insane configuration, but the longest # path will be the correct one. repo_loc, layout_data = max(intersecting_repos, key=lambda x:len(x[0])) allow_directories = eapi_allows_directories_on_profile_level_and_repository_level(eapi) or \ any(x in _portage1_profiles_allow_directories for x in layout_data['profile-formats']) compat_mode = not eapi_allows_directories_on_profile_level_and_repository_level(eapi) and \ layout_data['profile-formats'] == ('portage-1-compat',) allow_parent_colon = any(x in _allow_parent_colon for x in layout_data['profile-formats']) if compat_mode: offenders = _PORTAGE1_DIRECTORIES.intersection(os.listdir(currentPath)) offenders = sorted(x for x in offenders if os.path.isdir(os.path.join(currentPath, x))) if offenders: warnings.warn(_("Profile '%(profile_path)s' in repository " "'%(repo_name)s' is implicitly using 'portage-1' profile format, but " "the repository profiles are not marked as that format. This will break " "in the future. Please either convert the following paths " "to files, or add\nprofile-formats = portage-1\nto the " "repositories layout.conf. Files: '%(files)s'\n") % dict(profile_path=currentPath, repo_name=repo_loc, files=', '.join(offenders))) parentsFile = os.path.join(currentPath, "parent") if os.path.exists(parentsFile): parents = grabfile(parentsFile) if not parents: raise ParseError( _("Empty parent file: '%s'") % parentsFile) for parentPath in parents: abs_parent = parentPath[:1] == os.sep if not abs_parent and allow_parent_colon: parentPath = self._expand_parent_colon(parentsFile, parentPath, repo_loc, repositories) # NOTE: This os.path.join() call is intended to ignore # currentPath if parentPath is already absolute. parentPath = normalize_path(os.path.join( currentPath, parentPath)) if abs_parent or repo_loc is None or \ not parentPath.startswith(repo_loc): # It seems that this parent may point outside # of the current repo, so realpath it. parentPath = os.path.realpath(parentPath) if os.path.exists(parentPath): self._addProfile(parentPath, repositories, known_repos) else: raise ParseError( _("Parent '%s' not found: '%s'") % \ (parentPath, parentsFile)) self.profiles.append(currentPath) self.profiles_complex.append( _profile_node(currentPath, allow_directories))
def _parse_color_map(config_root='/', onerror=None): """ Parse /etc/portage/color.map and return a dict of error codes. @param onerror: an optional callback to handle any ParseError that would otherwise be raised @type onerror: callable @rtype: dict @return: a dictionary mapping color classes to color codes """ global codes, _styles myfile = os.path.join(config_root, COLOR_MAP_FILE) ansi_code_pattern = re.compile("^[0-9;]*m$") quotes = '\'"' def strip_quotes(token): if token[0] in quotes and token[0] == token[-1]: token = token[1:-1] return token try: with io.open(_unicode_encode(myfile, encoding=_encodings['fs'], errors='strict'), mode='r', encoding=_encodings['content'], errors='replace') as f: lines = f.readlines() for lineno, line in enumerate(lines): commenter_pos = line.find("#") line = line[:commenter_pos].strip() if len(line) == 0: continue split_line = line.split("=") if len(split_line) != 2: e = ParseError(_("'%s', line %s: expected exactly one occurrence of '=' operator") % \ (myfile, lineno)) raise e if onerror: onerror(e) else: raise e continue k = strip_quotes(split_line[0].strip()) v = strip_quotes(split_line[1].strip()) if not k in _styles and not k in codes: e = ParseError(_("'%s', line %s: Unknown variable: '%s'") % \ (myfile, lineno, k)) if onerror: onerror(e) else: raise e continue if ansi_code_pattern.match(v): if k in _styles: _styles[k] = (esc_seq + v, ) elif k in codes: codes[k] = esc_seq + v else: code_list = [] for x in v.split(): if x in codes: if k in _styles: code_list.append(x) elif k in codes: code_list.append(codes[x]) else: e = ParseError(_("'%s', line %s: Undefined: '%s'") % \ (myfile, lineno, x)) if onerror: onerror(e) else: raise e if k in _styles: _styles[k] = tuple(code_list) elif k in codes: codes[k] = "".join(code_list) except (IOError, OSError) as e: if e.errno == errno.ENOENT: raise FileNotFound(myfile) elif e.errno == errno.EACCES: raise PermissionDenied(myfile) raise
def _addProfile(self, currentPath, repositories, known_repos): current_abs_path = os.path.abspath(currentPath) allow_directories = True allow_parent_colon = True repo_loc = None compat_mode = False current_formats = () eapi = None intersecting_repos = [ x for x in known_repos if current_abs_path.startswith(x[0]) ] if intersecting_repos: # Handle nested repositories. The longest path # will be the correct one. repo_loc, layout_data = max(intersecting_repos, key=lambda x: len(x[0])) eapi = layout_data.get("profile_eapi_when_unspecified") eapi_file = os.path.join(currentPath, "eapi") eapi = eapi or "0" f = None try: f = io.open(_unicode_encode(eapi_file, encoding=_encodings['fs'], errors='strict'), mode='r', encoding=_encodings['content'], errors='replace') eapi = f.readline().strip() except IOError: pass else: if not eapi_is_supported(eapi): raise ParseError(_( "Profile contains unsupported " "EAPI '%s': '%s'") % \ (eapi, os.path.realpath(eapi_file),)) finally: if f is not None: f.close() if intersecting_repos: allow_directories = eapi_allows_directories_on_profile_level_and_repository_level(eapi) or \ any(x in _portage1_profiles_allow_directories for x in layout_data['profile-formats']) compat_mode = not eapi_allows_directories_on_profile_level_and_repository_level(eapi) and \ layout_data['profile-formats'] == ('portage-1-compat',) allow_parent_colon = any(x in _allow_parent_colon for x in layout_data['profile-formats']) current_formats = tuple(layout_data['profile-formats']) if compat_mode: offenders = _PORTAGE1_DIRECTORIES.intersection( os.listdir(currentPath)) offenders = sorted(x for x in offenders if os.path.isdir(os.path.join(currentPath, x))) if offenders: warnings.warn( _("\nThe selected profile is implicitly using the 'portage-1' format:\n" "\tprofile = %(profile_path)s\n" "But this repository is not using that format:\n" "\trepo = %(repo_name)s\n" "This will break in the future. Please convert these dirs to files:\n" "\t%(files)s\n" "Or, add this line to the repository's layout.conf:\n" "\tprofile-formats = portage-1") % dict(profile_path=currentPath, repo_name=repo_loc, files='\n\t'.join(offenders))) parentsFile = os.path.join(currentPath, "parent") if exists_raise_eaccess(parentsFile): parents = grabfile(parentsFile) if not parents: raise ParseError(_("Empty parent file: '%s'") % parentsFile) for parentPath in parents: abs_parent = parentPath[:1] == os.sep if not abs_parent and allow_parent_colon: parentPath = self._expand_parent_colon( parentsFile, parentPath, repo_loc, repositories) # NOTE: This os.path.join() call is intended to ignore # currentPath if parentPath is already absolute. parentPath = normalize_path( os.path.join(currentPath, parentPath)) if abs_parent or repo_loc is None or \ not parentPath.startswith(repo_loc): # It seems that this parent may point outside # of the current repo, so realpath it. parentPath = os.path.realpath(parentPath) if exists_raise_eaccess(parentPath): self._addProfile(parentPath, repositories, known_repos) else: raise ParseError( _("Parent '%s' not found: '%s'") % \ (parentPath, parentsFile)) self.profiles.append(currentPath) self.profiles_complex.append( _profile_node(currentPath, allow_directories, False, current_formats, eapi, 'build-id' in current_formats))
def _expand_new_virtuals(mysplit, edebug, mydbapi, mysettings, myroot="/", trees=None, use_mask=None, use_force=None, **kwargs): """ In order to solve bug #141118, recursively expand new-style virtuals so as to collapse one or more levels of indirection, generating an expanded search space. In dep_zapdeps, new-style virtuals will be assigned zero cost regardless of whether or not they are currently installed. Virtual blockers are supported but only when the virtual expands to a single atom because it wouldn't necessarily make sense to block all the components of a compound virtual. When more than one new-style virtual is matched, the matches are sorted from highest to lowest versions and the atom is expanded to || ( highest match ... lowest match ).""" newsplit = [] mytrees = trees[myroot] portdb = mytrees["porttree"].dbapi pkg_use_enabled = mytrees.get("pkg_use_enabled") atom_graph = mytrees.get("atom_graph") parent = mytrees.get("parent") virt_parent = mytrees.get("virt_parent") graph_parent = None eapi = None if parent is not None: if virt_parent is not None: graph_parent = virt_parent eapi = virt_parent[0].metadata['EAPI'] else: graph_parent = parent eapi = parent.metadata["EAPI"] repoman = not mysettings.local_config if kwargs["use_binaries"]: portdb = trees[myroot]["bintree"].dbapi myvirtuals = mysettings.getvirtuals() pprovideddict = mysettings.pprovideddict myuse = kwargs["myuse"] for x in mysplit: if x == "||": newsplit.append(x) continue elif isinstance(x, list): newsplit.append( _expand_new_virtuals(x, edebug, mydbapi, mysettings, myroot=myroot, trees=trees, use_mask=use_mask, use_force=use_force, **kwargs)) continue if not isinstance(x, Atom): raise ParseError(_("invalid token: '%s'") % x) if repoman: x = x._eval_qa_conditionals(use_mask, use_force) mykey = x.cp if not mykey.startswith("virtual/"): newsplit.append(x) if atom_graph is not None: atom_graph.add(x, graph_parent) continue mychoices = myvirtuals.get(mykey, []) if x.blocker: # Virtual blockers are no longer expanded here since # the un-expanded virtual atom is more useful for # maintaining a cache of blocker atoms. newsplit.append(x) if atom_graph is not None: atom_graph.add(x, graph_parent) continue if repoman or not hasattr(portdb, 'match_pkgs') or \ pkg_use_enabled is None: if portdb.cp_list(x.cp): newsplit.append(x) else: # TODO: Add PROVIDE check for repoman. a = [] for y in mychoices: a.append(Atom(x.replace(x.cp, y.cp, 1))) if not a: newsplit.append(x) elif len(a) == 1: newsplit.append(a[0]) else: newsplit.append(['||'] + a) continue pkgs = [] # Ignore USE deps here, since otherwise we might not # get any matches. Choices with correct USE settings # will be preferred in dep_zapdeps(). matches = portdb.match_pkgs(x.without_use) # Use descending order to prefer higher versions. matches.reverse() for pkg in matches: # only use new-style matches if pkg.cp.startswith("virtual/"): pkgs.append(pkg) if not (pkgs or mychoices): # This one couldn't be expanded as a new-style virtual. Old-style # virtuals have already been expanded by dep_virtual, so this one # is unavailable and dep_zapdeps will identify it as such. The # atom is not eliminated here since it may still represent a # dependency that needs to be satisfied. newsplit.append(x) if atom_graph is not None: atom_graph.add(x, graph_parent) continue a = [] for pkg in pkgs: virt_atom = '=' + pkg.cpv if x.use: virt_atom += str(x.use) virt_atom = Atom(virt_atom) # According to GLEP 37, RDEPEND is the only dependency # type that is valid for new-style virtuals. Repoman # should enforce this. depstring = pkg.metadata['RDEPEND'] pkg_kwargs = kwargs.copy() pkg_kwargs["myuse"] = pkg_use_enabled(pkg) if edebug: writemsg_level(_("Virtual Parent: %s\n") \ % (pkg,), noiselevel=-1, level=logging.DEBUG) writemsg_level(_("Virtual Depstring: %s\n") \ % (depstring,), noiselevel=-1, level=logging.DEBUG) # Set EAPI used for validation in dep_check() recursion. mytrees["virt_parent"] = (pkg, virt_atom) try: mycheck = dep_check(depstring, mydbapi, mysettings, myroot=myroot, trees=trees, **pkg_kwargs) finally: # Restore previous EAPI after recursion. if virt_parent is not None: mytrees["virt_parent"] = virt_parent else: del mytrees["virt_parent"] if not mycheck[0]: raise ParseError("%s: %s '%s'" % (pkg, mycheck[1], depstring)) # pull in the new-style virtual mycheck[1].append(virt_atom) a.append(mycheck[1]) if atom_graph is not None: atom_graph.add(virt_atom, graph_parent) # Plain old-style virtuals. New-style virtuals are preferred. if not pkgs: for y in mychoices: new_atom = Atom(x.replace(x.cp, y.cp, 1)) matches = portdb.match(new_atom) # portdb is an instance of depgraph._dep_check_composite_db, so # USE conditionals are already evaluated. if matches and mykey in \ portdb.aux_get(matches[-1], ['PROVIDE'])[0].split(): a.append(new_atom) if atom_graph is not None: atom_graph.add(new_atom, graph_parent) if not a and mychoices: # Check for a virtual package.provided match. for y in mychoices: new_atom = Atom(x.replace(x.cp, y.cp, 1)) if match_from_list(new_atom, pprovideddict.get(new_atom.cp, [])): a.append(new_atom) if atom_graph is not None: atom_graph.add(new_atom, graph_parent) if not a: newsplit.append(x) if atom_graph is not None: atom_graph.add(x, graph_parent) elif len(a) == 1: newsplit.append(a[0]) else: newsplit.append(['||'] + a) return newsplit
def _expand_new_virtuals(mysplit, edebug, mydbapi, mysettings, myroot="/", trees=None, use_mask=None, use_force=None, **kwargs): """ In order to solve bug #141118, recursively expand new-style virtuals so as to collapse one or more levels of indirection, generating an expanded search space. In dep_zapdeps, new-style virtuals will be assigned zero cost regardless of whether or not they are currently installed. Virtual blockers are supported but only when the virtual expands to a single atom because it wouldn't necessarily make sense to block all the components of a compound virtual. When more than one new-style virtual is matched, the matches are sorted from highest to lowest versions and the atom is expanded to || ( highest match ... lowest match ). The result is normalized in the same way as use_reduce, having a top-level conjuction, and no redundant nested lists. """ newsplit = [] mytrees = trees[myroot] portdb = mytrees["porttree"].dbapi pkg_use_enabled = mytrees.get("pkg_use_enabled") # Atoms are stored in the graph as (atom, id(atom)) tuples # since each atom is considered to be a unique entity. For # example, atoms that appear identical may behave differently # in USE matching, depending on their unevaluated form. Also, # specially generated virtual atoms may appear identical while # having different _orig_atom attributes. atom_graph = mytrees.get("atom_graph") parent = mytrees.get("parent") virt_parent = mytrees.get("virt_parent") graph_parent = None if parent is not None: if virt_parent is not None: graph_parent = virt_parent parent = virt_parent else: graph_parent = parent repoman = not mysettings.local_config if kwargs["use_binaries"]: portdb = trees[myroot]["bintree"].dbapi pprovideddict = mysettings.pprovideddict myuse = kwargs["myuse"] is_disjunction = mysplit and mysplit[0] == '||' for x in mysplit: if x == "||": newsplit.append(x) continue elif isinstance(x, list): assert x, 'Normalization error, empty conjunction found in %s' % ( mysplit, ) if is_disjunction: assert x[0] != '||', \ 'Normalization error, nested disjunction found in %s' % (mysplit,) else: assert x[0] == '||', \ 'Normalization error, nested conjunction found in %s' % (mysplit,) x_exp = _expand_new_virtuals(x, edebug, mydbapi, mysettings, myroot=myroot, trees=trees, use_mask=use_mask, use_force=use_force, **kwargs) if is_disjunction: if len(x_exp) == 1: x = x_exp[0] if isinstance(x, list): # Due to normalization, a conjunction must not be # nested directly in another conjunction, so this # must be a disjunction. assert x and x[0] == '||', \ 'Normalization error, nested conjunction found in %s' % (x_exp,) newsplit.extend(x[1:]) else: newsplit.append(x) else: newsplit.append(x_exp) else: newsplit.extend(x_exp) continue if not isinstance(x, Atom): raise ParseError(_("invalid token: '%s'") % x) if repoman: x = x._eval_qa_conditionals(use_mask, use_force) mykey = x.cp if not mykey.startswith("virtual/"): newsplit.append(x) if atom_graph is not None: atom_graph.add((x, id(x)), graph_parent) continue if x.blocker: # Virtual blockers are no longer expanded here since # the un-expanded virtual atom is more useful for # maintaining a cache of blocker atoms. newsplit.append(x) if atom_graph is not None: atom_graph.add((x, id(x)), graph_parent) continue if repoman or not hasattr(portdb, 'match_pkgs') or \ pkg_use_enabled is None: if portdb.cp_list(x.cp): newsplit.append(x) else: a = [] myvartree = mytrees.get("vartree") if myvartree is not None: mysettings._populate_treeVirtuals_if_needed(myvartree) mychoices = mysettings.getvirtuals().get(mykey, []) for y in mychoices: a.append(Atom(x.replace(x.cp, y.cp, 1))) if not a: newsplit.append(x) elif is_disjunction: newsplit.extend(a) elif len(a) == 1: newsplit.append(a[0]) else: newsplit.append(['||'] + a) continue pkgs = [] # Ignore USE deps here, since otherwise we might not # get any matches. Choices with correct USE settings # will be preferred in dep_zapdeps(). matches = portdb.match_pkgs(x.without_use) # Use descending order to prefer higher versions. matches.reverse() for pkg in matches: # only use new-style matches if pkg.cp.startswith("virtual/"): pkgs.append(pkg) mychoices = [] if not pkgs and not portdb.cp_list(x.cp): myvartree = mytrees.get("vartree") if myvartree is not None: mysettings._populate_treeVirtuals_if_needed(myvartree) mychoices = mysettings.getvirtuals().get(mykey, []) if not (pkgs or mychoices): # This one couldn't be expanded as a new-style virtual. Old-style # virtuals have already been expanded by dep_virtual, so this one # is unavailable and dep_zapdeps will identify it as such. The # atom is not eliminated here since it may still represent a # dependency that needs to be satisfied. newsplit.append(x) if atom_graph is not None: atom_graph.add((x, id(x)), graph_parent) continue a = [] for pkg in pkgs: virt_atom = '=' + pkg.cpv if x.unevaluated_atom.use: virt_atom += str(x.unevaluated_atom.use) virt_atom = Atom(virt_atom) if parent is None: if myuse is None: virt_atom = virt_atom.evaluate_conditionals( mysettings.get("PORTAGE_USE", "").split()) else: virt_atom = virt_atom.evaluate_conditionals(myuse) else: virt_atom = virt_atom.evaluate_conditionals( pkg_use_enabled(parent)) else: virt_atom = Atom(virt_atom) # Allow the depgraph to map this atom back to the # original, in order to avoid distortion in places # like display or conflict resolution code. virt_atom.__dict__['_orig_atom'] = x # According to GLEP 37, RDEPEND is the only dependency # type that is valid for new-style virtuals. Repoman # should enforce this. depstring = pkg._metadata['RDEPEND'] pkg_kwargs = kwargs.copy() pkg_kwargs["myuse"] = pkg_use_enabled(pkg) if edebug: writemsg_level(_("Virtual Parent: %s\n") \ % (pkg,), noiselevel=-1, level=logging.DEBUG) writemsg_level(_("Virtual Depstring: %s\n") \ % (depstring,), noiselevel=-1, level=logging.DEBUG) # Set EAPI used for validation in dep_check() recursion. mytrees["virt_parent"] = pkg try: mycheck = dep_check(depstring, mydbapi, mysettings, myroot=myroot, trees=trees, **pkg_kwargs) finally: # Restore previous EAPI after recursion. if virt_parent is not None: mytrees["virt_parent"] = virt_parent else: del mytrees["virt_parent"] if not mycheck[0]: raise ParseError("%s: %s '%s'" % \ (pkg, mycheck[1], depstring)) # Replace the original atom "x" with "virt_atom" which refers # to the specific version of the virtual whose deps we're # expanding. The virt_atom._orig_atom attribute is used # by depgraph to map virt_atom back to the original atom. # We specifically exclude the original atom "x" from the # the expanded output here, since otherwise it could trigger # incorrect dep_zapdeps behavior (see bug #597752). mycheck[1].append(virt_atom) a.append(mycheck[1]) if atom_graph is not None: virt_atom_node = (virt_atom, id(virt_atom)) atom_graph.add(virt_atom_node, graph_parent) atom_graph.add(pkg, virt_atom_node) atom_graph.add((x, id(x)), graph_parent) if not a and mychoices: # Check for a virtual package.provided match. for y in mychoices: new_atom = Atom(x.replace(x.cp, y.cp, 1)) if match_from_list(new_atom, pprovideddict.get(new_atom.cp, [])): a.append(new_atom) if atom_graph is not None: atom_graph.add((new_atom, id(new_atom)), graph_parent) if not a: newsplit.append(x) if atom_graph is not None: atom_graph.add((x, id(x)), graph_parent) elif is_disjunction: newsplit.extend(a) elif len(a) == 1: newsplit.extend(a[0]) else: newsplit.append(['||'] + a) # For consistency with related functions like use_reduce, always # normalize the result to have a top-level conjunction. if is_disjunction: newsplit = [newsplit] return newsplit
def _populate(self, getbinpkgs=0): if (not os.path.isdir(self.pkgdir) and not getbinpkgs): return 0 # Clear all caches in case populate is called multiple times # as may be the case when _global_updates calls populate() # prior to performing package moves since it only wants to # operate on local packages (getbinpkgs=0). self._remotepkgs = None self.dbapi.clear() _instance_key = self.dbapi._instance_key if True: pkg_paths = {} self._pkg_paths = pkg_paths dir_files = {} for parent, dir_names, file_names in os.walk(self.pkgdir): relative_parent = parent[len(self.pkgdir)+1:] dir_files[relative_parent] = file_names pkgindex = self._load_pkgindex() if not self._pkgindex_version_supported(pkgindex): pkgindex = self._new_pkgindex() header = pkgindex.header metadata = {} basename_index = {} for d in pkgindex.packages: cpv = _pkg_str(d["CPV"], metadata=d, settings=self.settings) d["CPV"] = cpv metadata[_instance_key(cpv)] = d path = d.get("PATH") if not path: path = cpv + ".tbz2" basename = os.path.basename(path) basename_index.setdefault(basename, []).append(d) update_pkgindex = False for mydir, file_names in dir_files.items(): try: mydir = _unicode_decode(mydir, encoding=_encodings["fs"], errors="strict") except UnicodeDecodeError: continue for myfile in file_names: try: myfile = _unicode_decode(myfile, encoding=_encodings["fs"], errors="strict") except UnicodeDecodeError: continue if not myfile.endswith(SUPPORTED_XPAK_EXTENSIONS): continue mypath = os.path.join(mydir, myfile) full_path = os.path.join(self.pkgdir, mypath) s = os.lstat(full_path) if not stat.S_ISREG(s.st_mode): continue # Validate data from the package index and try to avoid # reading the xpak if possible. possibilities = basename_index.get(myfile) if possibilities: match = None for d in possibilities: try: if long(d["_mtime_"]) != s[stat.ST_MTIME]: continue except (KeyError, ValueError): continue try: if long(d["SIZE"]) != long(s.st_size): continue except (KeyError, ValueError): continue if not self._pkgindex_keys.difference(d): match = d break if match: mycpv = match["CPV"] instance_key = _instance_key(mycpv) pkg_paths[instance_key] = mypath # update the path if the package has been moved oldpath = d.get("PATH") if oldpath and oldpath != mypath: update_pkgindex = True # Omit PATH if it is the default path for # the current Packages format version. if mypath != mycpv + ".tbz2": d["PATH"] = mypath if not oldpath: update_pkgindex = True else: d.pop("PATH", None) if oldpath: update_pkgindex = True self.dbapi.cpv_inject(mycpv) continue if not os.access(full_path, os.R_OK): writemsg(_("!!! Permission denied to read " \ "binary package: '%s'\n") % full_path, noiselevel=-1) self.invalids.append(myfile[:-5]) continue pkg_metadata = self._read_metadata(full_path, s, keys=chain(self.dbapi._aux_cache_keys, ("PF", "CATEGORY"))) mycat = pkg_metadata.get("CATEGORY", "") mypf = pkg_metadata.get("PF", "") slot = pkg_metadata.get("SLOT", "") mypkg = myfile[:-5] if not mycat or not mypf or not slot: #old-style or corrupt package writemsg(_("\n!!! Invalid binary package: '%s'\n") % full_path, noiselevel=-1) missing_keys = [] if not mycat: missing_keys.append("CATEGORY") if not mypf: missing_keys.append("PF") if not slot: missing_keys.append("SLOT") msg = [] if missing_keys: missing_keys.sort() msg.append(_("Missing metadata key(s): %s.") % \ ", ".join(missing_keys)) msg.append(_(" This binary package is not " \ "recoverable and should be deleted.")) for line in textwrap.wrap("".join(msg), 72): writemsg("!!! %s\n" % line, noiselevel=-1) self.invalids.append(mypkg) continue multi_instance = False invalid_name = False build_id = None if myfile.endswith(".xpak"): multi_instance = True build_id = self._parse_build_id(myfile) if build_id < 1: invalid_name = True elif myfile != "%s-%s.xpak" % ( mypf, build_id): invalid_name = True else: mypkg = mypkg[:-len(str(build_id))-1] elif myfile != mypf + ".tbz2": invalid_name = True if invalid_name: writemsg(_("\n!!! Binary package name is " "invalid: '%s'\n") % full_path, noiselevel=-1) continue if pkg_metadata.get("BUILD_ID"): try: build_id = long(pkg_metadata["BUILD_ID"]) except ValueError: writemsg(_("!!! Binary package has " "invalid BUILD_ID: '%s'\n") % full_path, noiselevel=-1) continue else: build_id = None if multi_instance: name_split = catpkgsplit("%s/%s" % (mycat, mypf)) if (name_split is None or tuple(catsplit(mydir)) != name_split[:2]): continue elif mycat != mydir and mydir != "All": continue if mypkg != mypf.strip(): continue mycpv = mycat + "/" + mypkg if not self.dbapi._category_re.match(mycat): writemsg(_("!!! Binary package has an " \ "unrecognized category: '%s'\n") % full_path, noiselevel=-1) writemsg(_("!!! '%s' has a category that is not" \ " listed in %setc/portage/categories\n") % \ (mycpv, self.settings["PORTAGE_CONFIGROOT"]), noiselevel=-1) continue if build_id is not None: pkg_metadata["BUILD_ID"] = _unicode(build_id) pkg_metadata["SIZE"] = _unicode(s.st_size) # Discard items used only for validation above. pkg_metadata.pop("CATEGORY") pkg_metadata.pop("PF") mycpv = _pkg_str(mycpv, metadata=self.dbapi._aux_cache_slot_dict( pkg_metadata)) pkg_paths[_instance_key(mycpv)] = mypath self.dbapi.cpv_inject(mycpv) update_pkgindex = True d = metadata.get(_instance_key(mycpv), pkgindex._pkg_slot_dict()) if d: try: if long(d["_mtime_"]) != s[stat.ST_MTIME]: d.clear() except (KeyError, ValueError): d.clear() if d: try: if long(d["SIZE"]) != long(s.st_size): d.clear() except (KeyError, ValueError): d.clear() for k in self._pkgindex_allowed_pkg_keys: v = pkg_metadata.get(k) if v is not None: d[k] = v d["CPV"] = mycpv try: self._eval_use_flags(mycpv, d) except portage.exception.InvalidDependString: writemsg(_("!!! Invalid binary package: '%s'\n") % \ self.getname(mycpv), noiselevel=-1) self.dbapi.cpv_remove(mycpv) del pkg_paths[_instance_key(mycpv)] # record location if it's non-default if mypath != mycpv + ".tbz2": d["PATH"] = mypath else: d.pop("PATH", None) metadata[_instance_key(mycpv)] = d for instance_key in list(metadata): if instance_key not in pkg_paths: del metadata[instance_key] # Do not bother to write the Packages index if $PKGDIR/All/ exists # since it will provide no benefit due to the need to read CATEGORY # from xpak. if update_pkgindex and os.access(self.pkgdir, os.W_OK): del pkgindex.packages[:] pkgindex.packages.extend(iter(metadata.values())) self._update_pkgindex_header(pkgindex.header) self._pkgindex_write(pkgindex) if getbinpkgs and not self.settings.get("PORTAGE_BINHOST"): writemsg(_("!!! PORTAGE_BINHOST unset, but use is requested.\n"), noiselevel=-1) if not getbinpkgs or 'PORTAGE_BINHOST' not in self.settings: self.populated=1 return self._remotepkgs = {} for base_url in self.settings["PORTAGE_BINHOST"].split(): parsed_url = urlparse(base_url) host = parsed_url.netloc port = parsed_url.port user = None passwd = None user_passwd = "" if "@" in host: user, host = host.split("@", 1) user_passwd = user + "@" if ":" in user: user, passwd = user.split(":", 1) port_args = [] if port is not None: port_str = ":%s" % (port,) if host.endswith(port_str): host = host[:-len(port_str)] pkgindex_file = os.path.join(self.settings["EROOT"], CACHE_PATH, "binhost", host, parsed_url.path.lstrip("/"), "Packages") pkgindex = self._new_pkgindex() try: f = io.open(_unicode_encode(pkgindex_file, encoding=_encodings['fs'], errors='strict'), mode='r', encoding=_encodings['repo.content'], errors='replace') try: pkgindex.read(f) finally: f.close() except EnvironmentError as e: if e.errno != errno.ENOENT: raise local_timestamp = pkgindex.header.get("TIMESTAMP", None) try: download_timestamp = \ float(pkgindex.header.get("DOWNLOAD_TIMESTAMP", 0)) except ValueError: download_timestamp = 0 remote_timestamp = None rmt_idx = self._new_pkgindex() proc = None tmp_filename = None try: # urlparse.urljoin() only works correctly with recognized # protocols and requires the base url to have a trailing # slash, so join manually... url = base_url.rstrip("/") + "/Packages" f = None try: ttl = float(pkgindex.header.get("TTL", 0)) except ValueError: pass else: if download_timestamp and ttl and \ download_timestamp + ttl > time.time(): raise UseCachedCopyOfRemoteIndex() # Don't use urlopen for https, since it doesn't support # certificate/hostname verification (bug #469888). if parsed_url.scheme not in ('https',): try: f = _urlopen(url, if_modified_since=local_timestamp) if hasattr(f, 'headers') and f.headers.get('timestamp', ''): remote_timestamp = f.headers.get('timestamp') except IOError as err: if hasattr(err, 'code') and err.code == 304: # not modified (since local_timestamp) raise UseCachedCopyOfRemoteIndex() if parsed_url.scheme in ('ftp', 'http', 'https'): # This protocol is supposedly supported by urlopen, # so apparently there's a problem with the url # or a bug in urlopen. if self.settings.get("PORTAGE_DEBUG", "0") != "0": traceback.print_exc() raise except ValueError: raise ParseError("Invalid Portage BINHOST value '%s'" % url.lstrip()) if f is None: path = parsed_url.path.rstrip("/") + "/Packages" if parsed_url.scheme == 'ssh': # Use a pipe so that we can terminate the download # early if we detect that the TIMESTAMP header # matches that of the cached Packages file. ssh_args = ['ssh'] if port is not None: ssh_args.append("-p%s" % (port,)) # NOTE: shlex evaluates embedded quotes ssh_args.extend(portage.util.shlex_split( self.settings.get("PORTAGE_SSH_OPTS", ""))) ssh_args.append(user_passwd + host) ssh_args.append('--') ssh_args.append('cat') ssh_args.append(path) proc = subprocess.Popen(ssh_args, stdout=subprocess.PIPE) f = proc.stdout else: setting = 'FETCHCOMMAND_' + parsed_url.scheme.upper() fcmd = self.settings.get(setting) if not fcmd: fcmd = self.settings.get('FETCHCOMMAND') if not fcmd: raise EnvironmentError("FETCHCOMMAND is unset") fd, tmp_filename = tempfile.mkstemp() tmp_dirname, tmp_basename = os.path.split(tmp_filename) os.close(fd) fcmd_vars = { "DISTDIR": tmp_dirname, "FILE": tmp_basename, "URI": url } for k in ("PORTAGE_SSH_OPTS",): v = self.settings.get(k) if v is not None: fcmd_vars[k] = v success = portage.getbinpkg.file_get( fcmd=fcmd, fcmd_vars=fcmd_vars) if not success: raise EnvironmentError("%s failed" % (setting,)) f = open(tmp_filename, 'rb') f_dec = codecs.iterdecode(f, _encodings['repo.content'], errors='replace') try: rmt_idx.readHeader(f_dec) if not remote_timestamp: # in case it had not been read from HTTP header remote_timestamp = rmt_idx.header.get("TIMESTAMP", None) if not remote_timestamp: # no timestamp in the header, something's wrong pkgindex = None writemsg(_("\n\n!!! Binhost package index " \ " has no TIMESTAMP field.\n"), noiselevel=-1) else: if not self._pkgindex_version_supported(rmt_idx): writemsg(_("\n\n!!! Binhost package index version" \ " is not supported: '%s'\n") % \ rmt_idx.header.get("VERSION"), noiselevel=-1) pkgindex = None elif local_timestamp != remote_timestamp: rmt_idx.readBody(f_dec) pkgindex = rmt_idx finally: # Timeout after 5 seconds, in case close() blocks # indefinitely (see bug #350139). try: try: AlarmSignal.register(5) f.close() finally: AlarmSignal.unregister() except AlarmSignal: writemsg("\n\n!!! %s\n" % \ _("Timed out while closing connection to binhost"), noiselevel=-1) except UseCachedCopyOfRemoteIndex: writemsg_stdout("\n") writemsg_stdout( colorize("GOOD", _("Local copy of remote index is up-to-date and will be used.")) + \ "\n") rmt_idx = pkgindex except EnvironmentError as e: writemsg(_("\n\n!!! Error fetching binhost package" \ " info from '%s'\n") % _hide_url_passwd(base_url)) # With Python 2, the EnvironmentError message may # contain bytes or unicode, so use _unicode to ensure # safety with all locales (bug #532784). try: error_msg = _unicode(e) except UnicodeDecodeError as uerror: error_msg = _unicode(uerror.object, encoding='utf_8', errors='replace') writemsg("!!! %s\n\n" % error_msg) del e pkgindex = None if proc is not None: if proc.poll() is None: proc.kill() proc.wait() proc = None if tmp_filename is not None: try: os.unlink(tmp_filename) except OSError: pass if pkgindex is rmt_idx: pkgindex.modified = False # don't update the header pkgindex.header["DOWNLOAD_TIMESTAMP"] = "%d" % time.time() try: ensure_dirs(os.path.dirname(pkgindex_file)) f = atomic_ofstream(pkgindex_file) pkgindex.write(f) f.close() except (IOError, PortageException): if os.access(os.path.dirname(pkgindex_file), os.W_OK): raise # The current user doesn't have permission to cache the # file, but that's alright. if pkgindex: remote_base_uri = pkgindex.header.get("URI", base_url) for d in pkgindex.packages: cpv = _pkg_str(d["CPV"], metadata=d, settings=self.settings) instance_key = _instance_key(cpv) # Local package instances override remote instances # with the same instance_key. if instance_key in metadata: continue d["CPV"] = cpv d["BASE_URI"] = remote_base_uri d["PKGINDEX_URI"] = url self._remotepkgs[instance_key] = d metadata[instance_key] = d self.dbapi.cpv_inject(cpv) self._remote_has_index = True self.populated=1
def getconfig(mycfg, tolerant=0, allow_sourcing=False, expand=True): if isinstance(expand, dict): # Some existing variable definitions have been # passed in, for use in substitutions. expand_map = expand expand = True else: expand_map = {} mykeys = {} f = None try: # NOTE: shlex doesn't support unicode objects with Python 2 # (produces spurious \0 characters). if sys.hexversion < 0x3000000: f = open(_unicode_encode(mycfg, encoding=_encodings['fs'], errors='strict'), 'rb') else: f = open(_unicode_encode(mycfg, encoding=_encodings['fs'], errors='strict'), mode='r', encoding=_encodings['content'], errors='replace') content = f.read() except IOError as e: if e.errno == PermissionDenied.errno: raise PermissionDenied(mycfg) if e.errno != errno.ENOENT: writemsg("open('%s', 'r'): %s\n" % (mycfg, e), noiselevel=-1) if e.errno not in (errno.EISDIR,): raise return None finally: if f is not None: f.close() # Workaround for avoiding a silent error in shlex that is # triggered by a source statement at the end of the file # without a trailing newline after the source statement. if content and content[-1] != '\n': content += '\n' # Warn about dos-style line endings since that prevents # people from being able to source them with bash. if '\r' in content: writemsg(("!!! " + _("Please use dos2unix to convert line endings " + \ "in config file: '%s'") + "\n") % mycfg, noiselevel=-1) lex = None try: if tolerant: shlex_class = _tolerant_shlex else: shlex_class = shlex.shlex # The default shlex.sourcehook() implementation # only joins relative paths when the infile # attribute is properly set. lex = shlex_class(content, infile=mycfg, posix=True) lex.wordchars = string.digits + string.ascii_letters + \ "~!@#$%*_\:;?,./-+{}" lex.quotes="\"'" if allow_sourcing: lex.source="source" while 1: key=lex.get_token() if key == "export": key = lex.get_token() if key is None: #normal end of file break; equ=lex.get_token() if (equ==''): msg = lex.error_leader() + _("Unexpected EOF") if not tolerant: raise ParseError(msg) else: writemsg("%s\n" % msg, noiselevel=-1) return mykeys elif (equ!='='): msg = lex.error_leader() + \ _("Invalid token '%s' (not '=')") % (equ,) if not tolerant: raise ParseError(msg) else: writemsg("%s\n" % msg, noiselevel=-1) return mykeys val=lex.get_token() if val is None: msg = lex.error_leader() + \ _("Unexpected end of config file: variable '%s'") % (key,) if not tolerant: raise ParseError(msg) else: writemsg("%s\n" % msg, noiselevel=-1) return mykeys key = _unicode_decode(key) val = _unicode_decode(val) if _invalid_var_name_re.search(key) is not None: msg = lex.error_leader() + \ _("Invalid variable name '%s'") % (key,) if not tolerant: raise ParseError(msg) writemsg("%s\n" % msg, noiselevel=-1) continue if expand: mykeys[key] = varexpand(val, mydict=expand_map, error_leader=lex.error_leader) expand_map[key] = mykeys[key] else: mykeys[key] = val except SystemExit as e: raise except Exception as e: if isinstance(e, ParseError) or lex is None: raise msg = _unicode_decode("%s%s") % (lex.error_leader(), e) writemsg("%s\n" % msg, noiselevel=-1) raise return mykeys
def _populate_remote(self, getbinpkg_refresh=True): self._remote_has_index = False self._remotepkgs = {} for base_url in self.settings["PORTAGE_BINHOST"].split(): parsed_url = urlparse(base_url) host = parsed_url.netloc port = parsed_url.port user = None passwd = None user_passwd = "" if "@" in host: user, host = host.split("@", 1) user_passwd = user + "@" if ":" in user: user, passwd = user.split(":", 1) if port is not None: port_str = ":%s" % (port,) if host.endswith(port_str): host = host[:-len(port_str)] pkgindex_file = os.path.join(self.settings["EROOT"], CACHE_PATH, "binhost", host, parsed_url.path.lstrip("/"), "Packages") pkgindex = self._new_pkgindex() try: f = io.open(_unicode_encode(pkgindex_file, encoding=_encodings['fs'], errors='strict'), mode='r', encoding=_encodings['repo.content'], errors='replace') try: pkgindex.read(f) finally: f.close() except EnvironmentError as e: if e.errno != errno.ENOENT: raise local_timestamp = pkgindex.header.get("TIMESTAMP", None) try: download_timestamp = \ float(pkgindex.header.get("DOWNLOAD_TIMESTAMP", 0)) except ValueError: download_timestamp = 0 remote_timestamp = None rmt_idx = self._new_pkgindex() proc = None tmp_filename = None try: # urlparse.urljoin() only works correctly with recognized # protocols and requires the base url to have a trailing # slash, so join manually... url = base_url.rstrip("/") + "/Packages" f = None if not getbinpkg_refresh and local_timestamp: raise UseCachedCopyOfRemoteIndex() try: ttl = float(pkgindex.header.get("TTL", 0)) except ValueError: pass else: if download_timestamp and ttl and \ download_timestamp + ttl > time.time(): raise UseCachedCopyOfRemoteIndex() # Don't use urlopen for https, unless # PEP 476 is supported (bug #469888). if parsed_url.scheme not in ('https',) or _have_pep_476(): try: f = _urlopen(url, if_modified_since=local_timestamp) if hasattr(f, 'headers') and f.headers.get('timestamp', ''): remote_timestamp = f.headers.get('timestamp') except IOError as err: if hasattr(err, 'code') and err.code == 304: # not modified (since local_timestamp) raise UseCachedCopyOfRemoteIndex() if parsed_url.scheme in ('ftp', 'http', 'https'): # This protocol is supposedly supported by urlopen, # so apparently there's a problem with the url # or a bug in urlopen. if self.settings.get("PORTAGE_DEBUG", "0") != "0": traceback.print_exc() raise except ValueError: raise ParseError("Invalid Portage BINHOST value '%s'" % url.lstrip()) if f is None: path = parsed_url.path.rstrip("/") + "/Packages" if parsed_url.scheme == 'ssh': # Use a pipe so that we can terminate the download # early if we detect that the TIMESTAMP header # matches that of the cached Packages file. ssh_args = ['ssh'] if port is not None: ssh_args.append("-p%s" % (port,)) # NOTE: shlex evaluates embedded quotes ssh_args.extend(portage.util.shlex_split( self.settings.get("PORTAGE_SSH_OPTS", ""))) ssh_args.append(user_passwd + host) ssh_args.append('--') ssh_args.append('cat') ssh_args.append(path) proc = subprocess.Popen(ssh_args, stdout=subprocess.PIPE) f = proc.stdout else: setting = 'FETCHCOMMAND_' + parsed_url.scheme.upper() fcmd = self.settings.get(setting) if not fcmd: fcmd = self.settings.get('FETCHCOMMAND') if not fcmd: raise EnvironmentError("FETCHCOMMAND is unset") fd, tmp_filename = tempfile.mkstemp() tmp_dirname, tmp_basename = os.path.split(tmp_filename) os.close(fd) fcmd_vars = { "DISTDIR": tmp_dirname, "FILE": tmp_basename, "URI": url } for k in ("PORTAGE_SSH_OPTS",): v = self.settings.get(k) if v is not None: fcmd_vars[k] = v success = portage.getbinpkg.file_get( fcmd=fcmd, fcmd_vars=fcmd_vars) if not success: raise EnvironmentError("%s failed" % (setting,)) f = open(tmp_filename, 'rb') f_dec = codecs.iterdecode(f, _encodings['repo.content'], errors='replace') try: rmt_idx.readHeader(f_dec) if not remote_timestamp: # in case it had not been read from HTTP header remote_timestamp = rmt_idx.header.get("TIMESTAMP", None) if not remote_timestamp: # no timestamp in the header, something's wrong pkgindex = None writemsg(_("\n\n!!! Binhost package index " \ " has no TIMESTAMP field.\n"), noiselevel=-1) else: if not self._pkgindex_version_supported(rmt_idx): writemsg(_("\n\n!!! Binhost package index version" \ " is not supported: '%s'\n") % \ rmt_idx.header.get("VERSION"), noiselevel=-1) pkgindex = None elif local_timestamp != remote_timestamp: rmt_idx.readBody(f_dec) pkgindex = rmt_idx finally: # Timeout after 5 seconds, in case close() blocks # indefinitely (see bug #350139). try: try: AlarmSignal.register(5) f.close() finally: AlarmSignal.unregister() except AlarmSignal: writemsg("\n\n!!! %s\n" % \ _("Timed out while closing connection to binhost"), noiselevel=-1) except UseCachedCopyOfRemoteIndex: writemsg_stdout("\n") writemsg_stdout( colorize("GOOD", _("Local copy of remote index is up-to-date and will be used.")) + \ "\n") rmt_idx = pkgindex except EnvironmentError as e: # This includes URLError which is raised for SSL # certificate errors when PEP 476 is supported. writemsg(_("\n\n!!! Error fetching binhost package" \ " info from '%s'\n") % _hide_url_passwd(base_url)) # With Python 2, the EnvironmentError message may # contain bytes or unicode, so use _unicode to ensure # safety with all locales (bug #532784). try: error_msg = _unicode(e) except UnicodeDecodeError as uerror: error_msg = _unicode(uerror.object, encoding='utf_8', errors='replace') writemsg("!!! %s\n\n" % error_msg) del e pkgindex = None if proc is not None: if proc.poll() is None: proc.kill() proc.wait() proc = None if tmp_filename is not None: try: os.unlink(tmp_filename) except OSError: pass if pkgindex is rmt_idx: pkgindex.modified = False # don't update the header pkgindex.header["DOWNLOAD_TIMESTAMP"] = "%d" % time.time() try: ensure_dirs(os.path.dirname(pkgindex_file)) f = atomic_ofstream(pkgindex_file) pkgindex.write(f) f.close() except (IOError, PortageException): if os.access(os.path.dirname(pkgindex_file), os.W_OK): raise # The current user doesn't have permission to cache the # file, but that's alright. if pkgindex: remote_base_uri = pkgindex.header.get("URI", base_url) for d in pkgindex.packages: cpv = _pkg_str(d["CPV"], metadata=d, settings=self.settings) # Local package instances override remote instances # with the same instance_key. if self.dbapi.cpv_exists(cpv): continue d["CPV"] = cpv d["BASE_URI"] = remote_base_uri d["PKGINDEX_URI"] = url self._remotepkgs[self.dbapi._instance_key(cpv)] = d self.dbapi.cpv_inject(cpv) self._remote_has_index = True
def _addProfile(self, currentPath, repositories, known_repos, previous_repos): current_abs_path = os.path.abspath(currentPath) allow_directories = True allow_parent_colon = True repo_loc = None compat_mode = False current_formats = () eapi = None intersecting_repos = tuple(x for x in known_repos if current_abs_path.startswith(x[0])) if intersecting_repos: # Handle nested repositories. The longest path # will be the correct one. repo_loc, layout_data = max(intersecting_repos, key=lambda x: len(x[0])) eapi = layout_data.get("profile_eapi_when_unspecified") eapi_file = os.path.join(currentPath, "eapi") eapi = eapi or "0" f = None try: f = io.open( _unicode_encode(eapi_file, encoding=_encodings["fs"], errors="strict"), mode="r", encoding=_encodings["content"], errors="replace", ) eapi = f.readline().strip() except IOError: pass else: if not eapi_is_supported(eapi): raise ParseError( _("Profile contains unsupported " "EAPI '%s': '%s'") % ( eapi, os.path.realpath(eapi_file), )) finally: if f is not None: f.close() if intersecting_repos: allow_directories = ( eapi_allows_directories_on_profile_level_and_repository_level( eapi) or any(x in _portage1_profiles_allow_directories for x in layout_data["profile-formats"])) compat_mode = ( not eapi_allows_directories_on_profile_level_and_repository_level( eapi) and layout_data["profile-formats"] == ("portage-1-compat", )) allow_parent_colon = any(x in _allow_parent_colon for x in layout_data["profile-formats"]) current_formats = tuple(layout_data["profile-formats"]) # According to PMS, a deprecated profile warning is not inherited. Since # the current profile node may have been inherited by a user profile # node, the deprecation warning may be relevant even if it is not a # top-level profile node. Therefore, consider the deprecated warning # to be irrelevant when the current profile node belongs to the same # repo as the previous profile node. show_deprecated_warning = tuple(x[0] for x in previous_repos) != tuple( x[0] for x in intersecting_repos) if compat_mode: offenders = _PORTAGE1_DIRECTORIES.intersection( os.listdir(currentPath)) offenders = sorted(x for x in offenders if os.path.isdir(os.path.join(currentPath, x))) if offenders: warnings.warn( _("\nThe selected profile is implicitly using the 'portage-1' format:\n" "\tprofile = %(profile_path)s\n" "But this repository is not using that format:\n" "\trepo = %(repo_name)s\n" "This will break in the future. Please convert these dirs to files:\n" "\t%(files)s\n" "Or, add this line to the repository's layout.conf:\n" "\tprofile-formats = portage-1") % dict( profile_path=currentPath, repo_name=repo_loc, files="\n\t".join(offenders), )) parentsFile = os.path.join(currentPath, "parent") if exists_raise_eaccess(parentsFile): parents = grabfile(parentsFile) if not parents: raise ParseError(_("Empty parent file: '%s'") % parentsFile) for parentPath in parents: abs_parent = parentPath[:1] == os.sep if not abs_parent and allow_parent_colon: parentPath = self._expand_parent_colon( parentsFile, parentPath, repo_loc, repositories) # NOTE: This os.path.join() call is intended to ignore # currentPath if parentPath is already absolute. parentPath = normalize_path( os.path.join(currentPath, parentPath)) if (abs_parent or repo_loc is None or not parentPath.startswith(repo_loc)): # It seems that this parent may point outside # of the current repo, so realpath it. parentPath = os.path.realpath(parentPath) if exists_raise_eaccess(parentPath): self._addProfile(parentPath, repositories, known_repos, intersecting_repos) else: raise ParseError( _("Parent '%s' not found: '%s'") % (parentPath, parentsFile)) self.profiles.append(currentPath) self.profiles_complex.append( _profile_node( currentPath, allow_directories, False, current_formats, eapi, "build-id" in current_formats, show_deprecated_warning=show_deprecated_warning, ))