def _addProfile(self, currentPath):
		parentsFile = os.path.join(currentPath, "parent")
		eapi_file = os.path.join(currentPath, "eapi")
		try:
			eapi = codecs.open(_unicode_encode(eapi_file,
				encoding=_encodings['fs'], errors='strict'),
				mode='r', encoding=_encodings['content'], errors='replace'
				).readline().strip()
		except IOError:
			pass
		else:
			if not eapi_is_supported(eapi):
				raise ParseError(_(
					"Profile contains unsupported "
					"EAPI '%s': '%s'") % \
					(eapi, os.path.realpath(eapi_file),))
		if os.path.exists(parentsFile):
			parents = grabfile(parentsFile)
			if not parents:
				raise ParseError(
					_("Empty parent file: '%s'") % parentsFile)
			for parentPath in parents:
				parentPath = normalize_path(os.path.join(
					currentPath, parentPath))
				if os.path.exists(parentPath):
					self._addProfile(parentPath)
				else:
					raise ParseError(
						_("Parent '%s' not found: '%s'") %  \
						(parentPath, parentsFile))
		self.profiles.append(currentPath)
Example #2
0
def load_unpack_dependencies_configuration(repositories):
	repo_dict = {}
	for repo in repositories.repos_with_profiles():
		for eapi in _supported_eapis:
			if eapi_has_automatic_unpack_dependencies(eapi):
				file_name = os.path.join(repo.location, "profiles", "unpack_dependencies", eapi)
				lines = grabfile(file_name, recursive=True)
				for line in lines:
					elements = line.split()
					suffix = elements[0].lower()
					if len(elements) == 1:
						writemsg(_("--- Missing unpack dependencies for '%s' suffix in '%s'\n") % (suffix, file_name))
					depend = " ".join(elements[1:])
					try:
						use_reduce(depend, eapi=eapi)
					except InvalidDependString as e:
						writemsg(_("--- Invalid unpack dependencies for '%s' suffix in '%s': '%s'\n" % (suffix, file_name, e)))
					else:
						repo_dict.setdefault(repo.name, {}).setdefault(eapi, {})[suffix] = depend

	ret = {}
	for repo in repositories.repos_with_profiles():
		for repo_name in [x.name for x in repo.masters] + [repo.name]:
			for eapi in repo_dict.get(repo_name, {}):
				for suffix, depend in repo_dict.get(repo_name, {}).get(eapi, {}).items():
					ret.setdefault(repo.name, {}).setdefault(eapi, {})[suffix] = depend

	return ret
Example #3
0
 def _parse_file_to_tuple(self,
                          file_name,
                          recursive=True,
                          eapi_filter=None):
     ret = []
     lines = grabfile(file_name, recursive=recursive)
     eapi = read_corresponding_eapi_file(file_name)
     if eapi_filter is not None and not eapi_filter(eapi):
         if lines:
             writemsg(_("--- EAPI '%s' does not support '%s': '%s'\n") %
                      (eapi, os.path.basename(file_name), file_name),
                      noiselevel=-1)
         return ()
     useflag_re = _get_useflag_re(eapi)
     for prefixed_useflag in lines:
         if prefixed_useflag[:1] == "-":
             useflag = prefixed_useflag[1:]
         else:
             useflag = prefixed_useflag
         if useflag_re.match(useflag) is None:
             writemsg(_("--- Invalid USE flag in '%s': '%s'\n") %
                      (file_name, prefixed_useflag),
                      noiselevel=-1)
         else:
             ret.append(prefixed_useflag)
     return tuple(ret)
Example #4
0
	def getUnreadItems(self, repoid, update=False):
		"""
		Determine if there are unread relevant items in news.repoid.unread.
		If there are unread items return their number.
		If update is specified, updateNewsItems( repoid ) will be called to
		check for new items.
		"""

		if update:
			self.updateItems(repoid)

		unread_filename = self._unread_filename(repoid)
		unread_lock = None
		try:
			unread_lock = lockfile(unread_filename, wantnewlockfile=1)
		except (InvalidLocation, OperationNotPermitted, PermissionDenied):
			pass
		try:
			try:
				return len(grabfile(unread_filename))
			except PermissionDenied:
				return 0
		finally:
			if unread_lock:
				unlockfile(unread_lock)
Example #5
0
	def __init__(self, filename, greedy=False, dbapi=None):
		super(StaticFileSet, self).__init__(allow_repo=True)
		self._filename = filename
		self._mtime = None
		self.description = "Package set loaded from file %s" % self._filename
		self.loader = ItemFileLoader(self._filename, self._validate)
		if greedy and not dbapi:
			self.errors.append(_("%s configured as greedy set, but no dbapi instance passed in constructor") % self._filename)
			greedy = False
		self.greedy = greedy
		self.dbapi = dbapi

		metadata = grabfile(self._filename + ".metadata")
		key = None
		value = []
		for line in metadata:
			line = line.strip()
			if len(line) == 0 and key != None:
				setattr(self, key, " ".join(value))
				key = None
			elif line[-1] == ":" and key == None:
				key = line[:-1].lower()
				value = []
			elif key != None:
				value.append(line)
			else:
				pass
		else:
			if key != None:
				setattr(self, key, " ".join(value))
Example #6
0
    def getUnreadItems(self, repoid, update=False):
        """
		Determine if there are unread relevant items in news.repoid.unread.
		If there are unread items return their number.
		If update is specified, updateNewsItems( repoid ) will be called to
		check for new items.
		"""

        if update:
            self.updateItems(repoid)

        unread_filename = self._unread_filename(repoid)
        unread_lock = None
        try:
            unread_lock = lockfile(unread_filename, wantnewlockfile=1)
        except (InvalidLocation, OperationNotPermitted, PermissionDenied,
                ReadOnlyFileSystem):
            pass
        try:
            try:
                return len(grabfile(unread_filename))
            except PermissionDenied:
                return 0
        finally:
            if unread_lock:
                unlockfile(unread_lock)
Example #7
0
    def __init__(self, filename, greedy=False, dbapi=None):
        super(StaticFileSet, self).__init__(allow_repo=True)
        self._filename = filename
        self._mtime = None
        self.description = "Package set loaded from file %s" % self._filename
        self.loader = ItemFileLoader(self._filename, self._validate)
        if greedy and not dbapi:
            self.errors.append(
                _("%s configured as greedy set, but no dbapi instance passed in constructor") % self._filename
            )
            greedy = False
        self.greedy = greedy
        self.dbapi = dbapi

        metadata = grabfile(self._filename + ".metadata")
        key = None
        value = []
        for line in metadata:
            line = line.strip()
            if len(line) == 0 and key != None:
                setattr(self, key, " ".join(value))
                key = None
            elif line[-1] == ":" and key == None:
                key = line[:-1].lower()
                value = []
            elif key != None:
                value.append(line)
            else:
                pass
        else:
            if key != None:
                setattr(self, key, " ".join(value))
Example #8
0
 def _parse_repository_packageusealiases(self, repositories):
     ret = {}
     for repo in repositories.repos_with_profiles():
         file_name = os.path.join(repo.location, "profiles",
                                  "package.use.aliases")
         eapi = read_corresponding_eapi_file(file_name, default=repo.eapi)
         useflag_re = _get_useflag_re(eapi)
         lines = grabfile(file_name, recursive=True)
         file_dict = {}
         for line in lines:
             elements = line.split()
             atom = elements[0]
             try:
                 atom = Atom(atom, eapi=eapi)
             except InvalidAtom:
                 writemsg(
                     _("--- Invalid atom in '%s': '%s'\n") %
                     (file_name, atom))
                 continue
             if len(elements) == 1:
                 writemsg(
                     _("--- Missing real USE flag for '%s' in '%s'\n") %
                     (atom, file_name),
                     noiselevel=-1,
                 )
                 continue
             real_flag = elements[1]
             if useflag_re.match(real_flag) is None:
                 writemsg(
                     _("--- Invalid real USE flag for '%s' in '%s': '%s'\n")
                     % (atom, file_name, real_flag),
                     noiselevel=-1,
                 )
             else:
                 for alias in elements[2:]:
                     if useflag_re.match(alias) is None:
                         writemsg(
                             _("--- Invalid USE flag alias for '%s' real USE flag for '%s' in '%s': '%s'\n"
                               ) % (real_flag, atom, file_name, alias),
                             noiselevel=-1,
                         )
                     else:
                         # Duplicated USE flag aliases in entries for different atoms
                         # matching the same package version are detected in getUseAliases().
                         if any(alias in v for k, v in file_dict.get(
                                 atom.cp, {}).get(atom, {}).items()
                                if k != real_flag):
                             writemsg(
                                 _("--- Duplicated USE flag alias for '%s' in '%s': '%s'\n"
                                   ) % (atom, file_name, alias),
                                 noiselevel=-1,
                             )
                         else:
                             file_dict.setdefault(atom.cp, {}).setdefault(
                                 atom, {}).setdefault(real_flag,
                                                      []).append(alias)
         ret[repo.name] = file_dict
     return ret
Example #9
0
def get_applied_glsas(settings):
    """
	Return a list of applied or injected GLSA IDs
	
	@type	settings: portage.config
	@param	settings: portage config instance
	@rtype:		list
	@return:	list of glsa IDs
	"""
    return grabfile(os.path.join(settings["EROOT"], CACHE_PATH, "glsa"))
Example #10
0
def get_applied_glsas(settings):
	"""
	Return a list of applied or injected GLSA IDs
	
	@type	settings: portage.config
	@param	settings: portage config instance
	@rtype:		list
	@return:	list of glsa IDs
	"""
	return grabfile(os.path.join(settings["EROOT"], PRIVATE_PATH, "glsa_injected"))
Example #11
0
    def glob(self):
        if None not in self.cache:
            kws = set()
            for r in self.dbapi.porttrees:
                kws.update(grabfile(os.path.join(r, 'profiles', 'arch.list')))
            kws.update(['~%s' % x for x in kws], ('*', '**', '~*'))

            # and the ** special keyword
            self.cache[None] = frozenset(kws)

        return self.cache[None]
Example #12
0
 def _parse_file_to_tuple(self,
                          file_name,
                          recursive=True,
                          eapi_filter=None,
                          eapi=None,
                          eapi_default="0"):
     """
     @param file_name: input file name
     @type file_name: str
     @param recursive: triggers recursion if the input file is a
             directory
     @type recursive: bool
     @param eapi_filter: a function that accepts a single eapi
             argument, and returns true if the current file type
             is supported by the given EAPI
     @type eapi_filter: callable
     @param eapi: the EAPI of the current profile node, which allows
             a call to read_corresponding_eapi_file to be skipped
     @type eapi: str
     @param eapi_default: the default EAPI which applies if the
             current profile node does not define a local EAPI
     @type eapi_default: str
     @rtype: tuple
     @return: collection of USE flags
     """
     ret = []
     lines = grabfile(file_name, recursive=recursive)
     if eapi is None:
         eapi = read_corresponding_eapi_file(file_name,
                                             default=eapi_default)
     if eapi_filter is not None and not eapi_filter(eapi):
         if lines:
             writemsg(
                 _("--- EAPI '%s' does not support '%s': '%s'\n") %
                 (eapi, os.path.basename(file_name), file_name),
                 noiselevel=-1,
             )
         return ()
     useflag_re = _get_useflag_re(eapi)
     for prefixed_useflag in lines:
         if prefixed_useflag[:1] == "-":
             useflag = prefixed_useflag[1:]
         else:
             useflag = prefixed_useflag
         if useflag_re.match(useflag) is None:
             writemsg(
                 _("--- Invalid USE flag in '%s': '%s'\n") %
                 (file_name, prefixed_useflag),
                 noiselevel=-1,
             )
         else:
             ret.append(prefixed_useflag)
     return tuple(ret)
Example #13
0
 def _parse_repository_packageusealiases(self, repositories):
     ret = {}
     for repo in repositories.repos_with_profiles():
         file_name = os.path.join(repo.location, "profiles", "package.use.aliases")
         eapi = read_corresponding_eapi_file(file_name)
         useflag_re = _get_useflag_re(eapi)
         lines = grabfile(file_name, recursive=True)
         file_dict = {}
         for line in lines:
             elements = line.split()
             atom = elements[0]
             try:
                 atom = Atom(atom, eapi=eapi)
             except InvalidAtom:
                 writemsg(_("--- Invalid atom in '%s': '%s'\n") % (file_name, atom))
                 continue
             if len(elements) == 1:
                 writemsg(_("--- Missing real USE flag for '%s' in '%s'\n") % (atom, file_name), noiselevel=-1)
                 continue
             real_flag = elements[1]
             if useflag_re.match(real_flag) is None:
                 writemsg(
                     _("--- Invalid real USE flag for '%s' in '%s': '%s'\n") % (atom, file_name, real_flag),
                     noiselevel=-1,
                 )
             else:
                 for alias in elements[2:]:
                     if useflag_re.match(alias) is None:
                         writemsg(
                             _("--- Invalid USE flag alias for '%s' real USE flag for '%s' in '%s': '%s'\n")
                             % (real_flag, atom, file_name, alias),
                             noiselevel=-1,
                         )
                     else:
                         # Duplicated USE flag aliases in entries for different atoms
                         # matching the same package version are detected in getUseAliases().
                         if any(
                             alias in v
                             for k, v in file_dict.get(atom.cp, {}).get(atom, {}).items()
                             if k != real_flag
                         ):
                             writemsg(
                                 _("--- Duplicated USE flag alias for '%s' in '%s': '%s'\n")
                                 % (atom, file_name, alias),
                                 noiselevel=-1,
                             )
                         else:
                             file_dict.setdefault(atom.cp, {}).setdefault(atom, {}).setdefault(real_flag, []).append(
                                 alias
                             )
         ret[repo.name] = file_dict
     return ret
Example #14
0
def old_tree_timestamp_warn(portdir, settings):
    unixtime = time.time()
    default_warnsync = 30

    timestamp_file = os.path.join(portdir, "metadata/timestamp.x")
    try:
        lastsync = grabfile(timestamp_file)
    except PortageException:
        return False

    if not lastsync:
        return False

    lastsync = lastsync[0].split()
    if not lastsync:
        return False

    try:
        lastsync = int(lastsync[0])
    except ValueError:
        return False

    var_name = "PORTAGE_SYNC_STALE"
    try:
        warnsync = float(settings.get(var_name, default_warnsync))
    except ValueError:
        writemsg_level(
            "!!! %s contains non-numeric value: %s\n" %
            (var_name, settings[var_name]),
            level=logging.ERROR,
            noiselevel=-1,
        )
        return False

    if warnsync <= 0:
        return False

    if (unixtime - 86400 * warnsync) > lastsync:
        out = EOutput()
        if have_english_locale():
            out.ewarn("Last emerge --sync was %s ago." %
                      whenago(unixtime - lastsync))
        else:
            out.ewarn(
                _("Last emerge --sync was %s.") %
                _unicode_decode(time.strftime("%c", time.localtime(lastsync))))
        return True
    return False
Example #15
0
	def _parse_file_to_tuple(self, file_name, recursive=True):
		ret = []
		lines = grabfile(file_name, recursive=recursive)
		eapi = read_corresponding_eapi_file(file_name)
		useflag_re = _get_useflag_re(eapi)
		for prefixed_useflag in lines:
			if prefixed_useflag[:1] == "-":
				useflag = prefixed_useflag[1:]
			else:
				useflag = prefixed_useflag
			if useflag_re.match(useflag) is None:
				writemsg(_("--- Invalid USE flag in '%s': '%s'\n") %
					(file_name, prefixed_useflag), noiselevel=-1)
			else:
				ret.append(prefixed_useflag)
		return tuple(ret)
def old_tree_timestamp_warn(portdir, settings):
	unixtime = time.time()
	default_warnsync = 30

	timestamp_file = os.path.join(portdir, "metadata/timestamp.x")
	try:
		lastsync = grabfile(timestamp_file)
	except PortageException:
		return False

	if not lastsync:
		return False

	lastsync = lastsync[0].split()
	if not lastsync:
		return False

	try:
		lastsync = int(lastsync[0])
	except ValueError:
		return False

	var_name = 'PORTAGE_SYNC_STALE'
	try:
		warnsync = float(settings.get(var_name, default_warnsync))
	except ValueError:
		writemsg_level("!!! %s contains non-numeric value: %s\n" % \
			(var_name, settings[var_name]),
			level=logging.ERROR, noiselevel=-1)
		return False

	if warnsync <= 0:
		return False

	if (unixtime - 86400 * warnsync) > lastsync:
		if have_english_locale():
			writemsg_stdout(">>> Last emerge --sync was %s ago\n" % \
				whenago(unixtime - lastsync), noiselevel=-1)
		else:
			writemsg_stdout(">>> %s\n" % \
				_("Last emerge --sync was %s") % \
				time.strftime('%c', time.localtime(lastsync)),
				noiselevel=-1)
		return True
	return False
Example #17
0
	def _parse_file_to_tuple(self, file_name, recursive=True,
		eapi_filter=None, eapi=None, eapi_default="0"):
		"""
		@param file_name: input file name
		@type file_name: str
		@param recursive: triggers recursion if the input file is a
			directory
		@type recursive: bool
		@param eapi_filter: a function that accepts a single eapi
			argument, and returns true if the the current file type
			is supported by the given EAPI
		@type eapi_filter: callable
		@param eapi: the EAPI of the current profile node, which allows
			a call to read_corresponding_eapi_file to be skipped
		@type eapi: str
		@param eapi_default: the default EAPI which applies if the
			current profile node does not define a local EAPI
		@type eapi_default: str
		@rtype: tuple
		@return: collection of USE flags
		"""
		ret = []
		lines = grabfile(file_name, recursive=recursive)
		if eapi is None:
			eapi = read_corresponding_eapi_file(
				file_name, default=eapi_default)
		if eapi_filter is not None and not eapi_filter(eapi):
			if lines:
				writemsg(_("--- EAPI '%s' does not support '%s': '%s'\n") %
					(eapi, os.path.basename(file_name), file_name),
					noiselevel=-1)
			return ()
		useflag_re = _get_useflag_re(eapi)
		for prefixed_useflag in lines:
			if prefixed_useflag[:1] == "-":
				useflag = prefixed_useflag[1:]
			else:
				useflag = prefixed_useflag
			if useflag_re.match(useflag) is None:
				writemsg(_("--- Invalid USE flag in '%s': '%s'\n") %
					(file_name, prefixed_useflag), noiselevel=-1)
			else:
				ret.append(prefixed_useflag)
		return tuple(ret)
Example #18
0
	def _parse_file_to_tuple(self, file_name, recursive=True, eapi_filter=None):
		ret = []
		lines = grabfile(file_name, recursive=recursive)
		eapi = read_corresponding_eapi_file(file_name)
		if eapi_filter is not None and not eapi_filter(eapi):
			if lines:
				writemsg(_("--- EAPI '%s' does not support '%s': '%s'\n") %
					(eapi, os.path.basename(file_name), file_name),
					noiselevel=-1)
			return ()
		useflag_re = _get_useflag_re(eapi)
		for prefixed_useflag in lines:
			if prefixed_useflag[:1] == "-":
				useflag = prefixed_useflag[1:]
			else:
				useflag = prefixed_useflag
			if useflag_re.match(useflag) is None:
				writemsg(_("--- Invalid USE flag in '%s': '%s'\n") %
					(file_name, prefixed_useflag), noiselevel=-1)
			else:
				ret.append(prefixed_useflag)
		return tuple(ret)
Example #19
0
def load_unpack_dependencies_configuration(repositories):
    repo_dict = {}
    for repo in repositories.repos_with_profiles():
        for eapi in _supported_eapis:
            if eapi_has_automatic_unpack_dependencies(eapi):
                file_name = os.path.join(repo.location, "profiles",
                                         "unpack_dependencies", eapi)
                lines = grabfile(file_name, recursive=True)
                for line in lines:
                    elements = line.split()
                    suffix = elements[0].lower()
                    if len(elements) == 1:
                        writemsg(
                            _("--- Missing unpack dependencies for '%s' suffix in '%s'\n"
                              ) % (suffix, file_name))
                    depend = " ".join(elements[1:])
                    try:
                        use_reduce(depend, eapi=eapi)
                    except InvalidDependString as e:
                        writemsg(
                            _("--- Invalid unpack dependencies for '%s' suffix in '%s': '%s'\n"
                              % (suffix, file_name, e)))
                    else:
                        repo_dict.setdefault(repo.name, {}).setdefault(
                            eapi, {})[suffix] = depend

    ret = {}
    for repo in repositories.repos_with_profiles():
        for repo_name in [x.name for x in repo.masters] + [repo.name]:
            for eapi in repo_dict.get(repo_name, {}):
                for suffix, depend in (repo_dict.get(repo_name,
                                                     {}).get(eapi,
                                                             {}).items()):
                    ret.setdefault(repo.name,
                                   {}).setdefault(eapi, {})[suffix] = depend

    return ret
Example #20
0
	def rebuild(self, exclude_pkgs=None, include_file=None,
		preserve_paths=None):
		"""
		Raises CommandNotFound if there are preserved libs
		and the scanelf binary is not available.

		@param exclude_pkgs: A set of packages that should be excluded from
			the LinkageMap, since they are being unmerged and their NEEDED
			entries are therefore irrelevant and would only serve to corrupt
			the LinkageMap.
		@type exclude_pkgs: set
		@param include_file: The path of a file containing NEEDED entries for
			a package which does not exist in the vardbapi yet because it is
			currently being merged.
		@type include_file: String
		@param preserve_paths: Libraries preserved by a package instance that
			is currently being merged. They need to be explicitly passed to the
			LinkageMap, since they are not registered in the
			PreservedLibsRegistry yet.
		@type preserve_paths: set
		"""

		os = _os_merge
		root = self._root
		root_len = len(root) - 1
		self._clear_cache()
		self._defpath.update(getlibpaths(self._root, env=self._dbapi.settings))
		libs = self._libs
		obj_properties = self._obj_properties

		lines = []

		# Data from include_file is processed first so that it
		# overrides any data from previously installed files.
		if include_file is not None:
			for line in grabfile(include_file):
				lines.append((None, include_file, line))

		aux_keys = [self._needed_aux_key]
		can_lock = os.access(os.path.dirname(self._dbapi._dbroot), os.W_OK)
		if can_lock:
			self._dbapi.lock()
		try:
			for cpv in self._dbapi.cpv_all():
				if exclude_pkgs is not None and cpv in exclude_pkgs:
					continue
				needed_file = self._dbapi.getpath(cpv,
					filename=self._needed_aux_key)
				for line in self._dbapi.aux_get(cpv, aux_keys)[0].splitlines():
					lines.append((cpv, needed_file, line))
		finally:
			if can_lock:
				self._dbapi.unlock()

		# have to call scanelf for preserved libs here as they aren't 
		# registered in NEEDED.ELF.2 files
		plibs = {}
		if preserve_paths is not None:
			plibs.update((x, None) for x in preserve_paths)
		if self._dbapi._plib_registry and \
			self._dbapi._plib_registry.hasEntries():
			for cpv, items in \
				self._dbapi._plib_registry.getPreservedLibs().items():
				if exclude_pkgs is not None and cpv in exclude_pkgs:
					# These preserved libs will either be unmerged,
					# rendering them irrelevant, or they will be
					# preserved in the replacement package and are
					# already represented via the preserve_paths
					# parameter.
					continue
				plibs.update((x, cpv) for x in items)
		if plibs:
			args = [os.path.join(EPREFIX or "/", "usr/bin/scanelf"), "-qF", "%a;%F;%S;%r;%n"]
			args.extend(os.path.join(root, x.lstrip("." + os.sep)) \
				for x in plibs)
			try:
				proc = subprocess.Popen(args, stdout=subprocess.PIPE)
			except EnvironmentError as e:
				if e.errno != errno.ENOENT:
					raise
				raise CommandNotFound(args[0])
			else:
				for l in proc.stdout:
					try:
						l = _unicode_decode(l,
							encoding=_encodings['content'], errors='strict')
					except UnicodeDecodeError:
						l = _unicode_decode(l,
							encoding=_encodings['content'], errors='replace')
						writemsg_level(_("\nError decoding characters " \
							"returned from scanelf: %s\n\n") % (l,),
							level=logging.ERROR, noiselevel=-1)
					l = l[3:].rstrip("\n")
					if not l:
						continue
					fields = l.split(";")
					if len(fields) < 5:
						writemsg_level(_("\nWrong number of fields " \
							"returned from scanelf: %s\n\n") % (l,),
							level=logging.ERROR, noiselevel=-1)
						continue
					fields[1] = fields[1][root_len:]
					owner = plibs.pop(fields[1], None)
					lines.append((owner, "scanelf", ";".join(fields)))
				proc.wait()
				proc.stdout.close()

		if plibs:
			# Preserved libraries that did not appear in the scanelf output.
			# This is known to happen with statically linked libraries.
			# Generate dummy lines for these, so we can assume that every
			# preserved library has an entry in self._obj_properties. This
			# is important in order to prevent findConsumers from raising
			# an unwanted KeyError.
			for x, cpv in plibs.items():
				lines.append((cpv, "plibs", ";".join(['', x, '', '', ''])))

		# Share identical frozenset instances when available,
		# in order to conserve memory.
		frozensets = {}

		for owner, location, l in lines:
			l = l.rstrip("\n")
			if not l:
				continue
			if '\0' in l:
				# os.stat() will raise "TypeError: must be encoded string
				# without NULL bytes, not str" in this case.
				writemsg_level(_("\nLine contains null byte(s) " \
					"in %s: %s\n\n") % (location, l),
					level=logging.ERROR, noiselevel=-1)
				continue
			try:
				entry = NeededEntry.parse(location, l)
			except InvalidData as e:
				writemsg_level("\n%s\n\n" % (e,),
					level=logging.ERROR, noiselevel=-1)
				continue

			# If NEEDED.ELF.2 contains the new multilib category field,
			# then use that for categorization. Otherwise, if a mapping
			# exists, map e_machine (entry.arch) to an approximate
			# multilib category. If all else fails, use e_machine, just
			# as older versions of portage did.
			arch = entry.multilib_category
			if arch is None:
				arch = _approx_multilib_categories.get(
					entry.arch, entry.arch)

			obj = entry.filename
			soname = entry.soname
			expand = {"ORIGIN": os.path.dirname(entry.filename)}
			path = frozenset(normalize_path(
				varexpand(x, expand, error_leader=lambda: "%s: " % location))
				for x in entry.runpaths)
			path = frozensets.setdefault(path, path)
			needed = frozenset(entry.needed)

			needed = frozensets.setdefault(needed, needed)

			obj_key = self._obj_key(obj)
			indexed = True
			myprops = obj_properties.get(obj_key)
			if myprops is None:
				indexed = False
				myprops = self._obj_properties_class(
					arch, needed, path, soname, [], owner)
				obj_properties[obj_key] = myprops
			# All object paths are added into the obj_properties tuple.
			myprops.alt_paths.append(obj)

			# Don't index the same file more that once since only one
			# set of data can be correct and therefore mixing data
			# may corrupt the index (include_file overrides previously
			# installed).
			if indexed:
				continue

			arch_map = libs.get(arch)
			if arch_map is None:
				arch_map = {}
				libs[arch] = arch_map
			if soname:
				soname_map = arch_map.get(soname)
				if soname_map is None:
					soname_map = self._soname_map_class(
						providers=[], consumers=[])
					arch_map[soname] = soname_map
				soname_map.providers.append(obj_key)
			for needed_soname in needed:
				soname_map = arch_map.get(needed_soname)
				if soname_map is None:
					soname_map = self._soname_map_class(
						providers=[], consumers=[])
					arch_map[needed_soname] = soname_map
				soname_map.consumers.append(obj_key)

		for arch, sonames in libs.items():
			for soname_node in sonames.values():
				soname_node.providers = tuple(set(soname_node.providers))
				soname_node.consumers = tuple(set(soname_node.consumers))
Example #21
0
	def updateItems(self, repoid):
		"""
		Figure out which news items from NEWS_PATH are both unread and relevant to
		the user (according to the GLEP 42 standards of relevancy).  Then add these
		items into the news.repoid.unread file.
		"""

		# Ensure that the unread path exists and is writable.

		try:
			ensure_dirs(self.unread_path, uid=self._uid, gid=self._gid,
				mode=self._dir_mode, mask=self._mode_mask)
		except (OperationNotPermitted, PermissionDenied):
			return

		if not os.access(self.unread_path, os.W_OK):
			return

		news_dir = self._news_dir(repoid)
		try:
			news = _os.listdir(_unicode_encode(news_dir,
				encoding=_encodings['fs'], errors='strict'))
		except OSError:
			return

		skip_filename = self._skip_filename(repoid)
		unread_filename = self._unread_filename(repoid)
		unread_lock = lockfile(unread_filename, wantnewlockfile=1)
		try:
			try:
				unread = set(grabfile(unread_filename))
				unread_orig = unread.copy()
				skip = set(grabfile(skip_filename))
				skip_orig = skip.copy()
			except PermissionDenied:
				return

			for itemid in news:
				try:
					itemid = _unicode_decode(itemid,
						encoding=_encodings['fs'], errors='strict')
				except UnicodeDecodeError:
					itemid = _unicode_decode(itemid,
						encoding=_encodings['fs'], errors='replace')
					writemsg_level(
						_("!!! Invalid encoding in news item name: '%s'\n") % \
						itemid, level=logging.ERROR, noiselevel=-1)
					continue

				if itemid in skip:
					continue
				filename = os.path.join(news_dir, itemid,
					itemid + "." + self.language_id + ".txt")
				if not os.path.isfile(filename):
					continue
				item = NewsItem(filename, itemid)
				if not item.isValid():
					continue
				if item.isRelevant(profile=self._profile_path,
					config=self.config, vardb=self.vdb):
					unread.add(item.name)
					skip.add(item.name)

			if unread != unread_orig:
				write_atomic(unread_filename,
					"".join("%s\n" % x for x in sorted(unread)))
				apply_secpass_permissions(unread_filename,
					uid=self._uid, gid=self._gid,
					mode=self._file_mode, mask=self._mode_mask)

			if skip != skip_orig:
				write_atomic(skip_filename,
					"".join("%s\n" % x for x in sorted(skip)))
				apply_secpass_permissions(skip_filename,
					uid=self._uid, gid=self._gid,
					mode=self._file_mode, mask=self._mode_mask)

		finally:
			unlockfile(unread_lock)
Example #22
0
	def rebuild(self, exclude_pkgs=None, include_file=None,
		preserve_paths=None):
		"""
		Raises CommandNotFound if there are preserved libs
		and the scanelf binary is not available.

		@param exclude_pkgs: A set of packages that should be excluded from
			the LinkageMap, since they are being unmerged and their NEEDED
			entries are therefore irrelevant and would only serve to corrupt
			the LinkageMap.
		@type exclude_pkgs: set
		@param include_file: The path of a file containing NEEDED entries for
			a package which does not exist in the vardbapi yet because it is
			currently being merged.
		@type include_file: String
		@param preserve_paths: Libraries preserved by a package instance that
			is currently being merged. They need to be explicitly passed to the
			LinkageMap, since they are not registered in the
			PreservedLibsRegistry yet.
		@type preserve_paths: set
		"""

		os = _os_merge
		root = self._root
		root_len = len(root) - 1
		self._clear_cache()
		self._defpath.update(getlibpaths(self._dbapi.settings['EROOT'],
			env=self._dbapi.settings))
		libs = self._libs
		obj_properties = self._obj_properties

		lines = []

		# Data from include_file is processed first so that it
		# overrides any data from previously installed files.
		if include_file is not None:
			for line in grabfile(include_file):
				lines.append((None, include_file, line))

		aux_keys = [self._needed_aux_key]
		can_lock = os.access(os.path.dirname(self._dbapi._dbroot), os.W_OK)
		if can_lock:
			self._dbapi.lock()
		try:
			for cpv in self._dbapi.cpv_all():
				if exclude_pkgs is not None and cpv in exclude_pkgs:
					continue
				needed_file = self._dbapi.getpath(cpv,
					filename=self._needed_aux_key)
				for line in self._dbapi.aux_get(cpv, aux_keys)[0].splitlines():
					lines.append((cpv, needed_file, line))
		finally:
			if can_lock:
				self._dbapi.unlock()

		# have to call scanelf for preserved libs here as they aren't 
		# registered in NEEDED.ELF.2 files
		plibs = {}
		if preserve_paths is not None:
			plibs.update((x, None) for x in preserve_paths)
		if self._dbapi._plib_registry and \
			self._dbapi._plib_registry.hasEntries():
			for cpv, items in \
				self._dbapi._plib_registry.getPreservedLibs().items():
				if exclude_pkgs is not None and cpv in exclude_pkgs:
					# These preserved libs will either be unmerged,
					# rendering them irrelevant, or they will be
					# preserved in the replacement package and are
					# already represented via the preserve_paths
					# parameter.
					continue
				plibs.update((x, cpv) for x in items)
		if plibs:
			args = [os.path.join(EPREFIX or "/", "usr/bin/scanelf"), "-qF", "%a;%F;%S;%r;%n"]
			args.extend(os.path.join(root, x.lstrip("." + os.sep)) \
				for x in plibs)
			try:
				proc = subprocess.Popen(args, stdout=subprocess.PIPE)
			except EnvironmentError as e:
				if e.errno != errno.ENOENT:
					raise
				raise CommandNotFound(args[0])
			else:
				for l in proc.stdout:
					try:
						l = _unicode_decode(l,
							encoding=_encodings['content'], errors='strict')
					except UnicodeDecodeError:
						l = _unicode_decode(l,
							encoding=_encodings['content'], errors='replace')
						writemsg_level(_("\nError decoding characters " \
							"returned from scanelf: %s\n\n") % (l,),
							level=logging.ERROR, noiselevel=-1)
					l = l[3:].rstrip("\n")
					if not l:
						continue
					try:
						entry = NeededEntry.parse("scanelf", l)
					except InvalidData as e:
						writemsg_level("\n%s\n\n" % (e,),
							level=logging.ERROR, noiselevel=-1)
						continue
					try:
						with open(_unicode_encode(entry.filename,
							encoding=_encodings['fs'],
							errors='strict'), 'rb') as f:
							elf_header = ELFHeader.read(f)
					except EnvironmentError as e:
						if e.errno != errno.ENOENT:
							raise
						# File removed concurrently.
						continue
					entry.multilib_category = compute_multilib_category(elf_header)
					entry.filename = entry.filename[root_len:]
					owner = plibs.pop(entry.filename, None)
					lines.append((owner, "scanelf", _unicode(entry)))
				proc.wait()
				proc.stdout.close()

		if plibs:
			# Preserved libraries that did not appear in the scanelf output.
			# This is known to happen with statically linked libraries.
			# Generate dummy lines for these, so we can assume that every
			# preserved library has an entry in self._obj_properties. This
			# is important in order to prevent findConsumers from raising
			# an unwanted KeyError.
			for x, cpv in plibs.items():
				lines.append((cpv, "plibs", ";".join(['', x, '', '', ''])))

		# Share identical frozenset instances when available,
		# in order to conserve memory.
		frozensets = {}

		for owner, location, l in lines:
			l = l.rstrip("\n")
			if not l:
				continue
			if '\0' in l:
				# os.stat() will raise "TypeError: must be encoded string
				# without NULL bytes, not str" in this case.
				writemsg_level(_("\nLine contains null byte(s) " \
					"in %s: %s\n\n") % (location, l),
					level=logging.ERROR, noiselevel=-1)
				continue
			try:
				entry = NeededEntry.parse(location, l)
			except InvalidData as e:
				writemsg_level("\n%s\n\n" % (e,),
					level=logging.ERROR, noiselevel=-1)
				continue

			# If NEEDED.ELF.2 contains the new multilib category field,
			# then use that for categorization. Otherwise, if a mapping
			# exists, map e_machine (entry.arch) to an approximate
			# multilib category. If all else fails, use e_machine, just
			# as older versions of portage did.
			arch = entry.multilib_category
			if arch is None:
				arch = _approx_multilib_categories.get(
					entry.arch, entry.arch)

			obj = entry.filename
			soname = entry.soname
			expand = {"ORIGIN": os.path.dirname(entry.filename)}
			path = frozenset(normalize_path(
				varexpand(x, expand, error_leader=lambda: "%s: " % location))
				for x in entry.runpaths)
			path = frozensets.setdefault(path, path)
			needed = frozenset(entry.needed)

			needed = frozensets.setdefault(needed, needed)

			obj_key = self._obj_key(obj)
			indexed = True
			myprops = obj_properties.get(obj_key)
			if myprops is None:
				indexed = False
				myprops = self._obj_properties_class(
					arch, needed, path, soname, [], owner)
				obj_properties[obj_key] = myprops
			# All object paths are added into the obj_properties tuple.
			myprops.alt_paths.append(obj)

			# Don't index the same file more that once since only one
			# set of data can be correct and therefore mixing data
			# may corrupt the index (include_file overrides previously
			# installed).
			if indexed:
				continue

			arch_map = libs.get(arch)
			if arch_map is None:
				arch_map = {}
				libs[arch] = arch_map
			if soname:
				soname_map = arch_map.get(soname)
				if soname_map is None:
					soname_map = self._soname_map_class(
						providers=[], consumers=[])
					arch_map[soname] = soname_map
				soname_map.providers.append(obj_key)
			for needed_soname in needed:
				soname_map = arch_map.get(needed_soname)
				if soname_map is None:
					soname_map = self._soname_map_class(
						providers=[], consumers=[])
					arch_map[needed_soname] = soname_map
				soname_map.consumers.append(obj_key)

		for arch, sonames in libs.items():
			for soname_node in sonames.values():
				soname_node.providers = tuple(set(soname_node.providers))
				soname_node.consumers = tuple(set(soname_node.consumers))
Example #23
0
	def _parse_profile_files_to_list(self, file_name, locations):
		return tuple(
			tuple(grabfile(os.path.join(x, file_name), recursive=1))
			for x in locations)
Example #24
0
def _global_updates(trees, prev_mtimes):
	"""
	Perform new global updates if they exist in $PORTDIR/profiles/updates/.
	This simply returns if ROOT != "/" (when len(trees) != 1). If ROOT != "/"
	then the user should instead use emaint --fix movebin and/or moveinst.

	@param trees: A dictionary containing portage trees.
	@type trees: dict
	@param prev_mtimes: A dictionary containing mtimes of files located in
		$PORTDIR/profiles/updates/.
	@type prev_mtimes: dict
	@rtype: None or List
	@return: None if no were no updates, otherwise a list of update commands
		that have been performed.
	"""
	# only do this if we're root and not running repoman/ebuild digest

	if secpass < 2 or \
		"SANDBOX_ACTIVE" in os.environ or \
		len(trees) != 1:
		return 0
	root = "/"
	mysettings = trees["/"]["vartree"].settings
	updpath = os.path.join(mysettings["PORTDIR"], "profiles", "updates")

	try:
		if mysettings["PORTAGE_CALLER"] == "fixpackages":
			update_data = grab_updates(updpath)
		else:
			update_data = grab_updates(updpath, prev_mtimes)
	except DirectoryNotFound:
		writemsg(_("--- 'profiles/updates' is empty or "
			"not available. Empty portage tree?\n"), noiselevel=1)
		return 0
	myupd = None
	if len(update_data) > 0:
		do_upgrade_packagesmessage = 0
		myupd = []
		timestamps = {}
		for mykey, mystat, mycontent in update_data:
			writemsg_stdout("\n\n")
			writemsg_stdout(colorize("GOOD",
				_("Performing Global Updates: "))+bold(mykey)+"\n")
			writemsg_stdout(_("(Could take a couple of minutes if you have a lot of binary packages.)\n"))
			writemsg_stdout(_("  %s='update pass'  %s='binary update'  "
				"%s='/var/db update'  %s='/var/db move'\n"
				"  %s='/var/db SLOT move'  %s='binary move'  "
				"%s='binary SLOT move'\n  %s='update /etc/portage/package.*'\n") % \
				(bold("."), bold("*"), bold("#"), bold("@"), bold("s"), bold("%"), bold("S"), bold("p")))
			valid_updates, errors = parse_updates(mycontent)
			myupd.extend(valid_updates)
			writemsg_stdout(len(valid_updates) * "." + "\n")
			if len(errors) == 0:
				# Update our internal mtime since we
				# processed all of our directives.
				timestamps[mykey] = mystat[stat.ST_MTIME]
			else:
				for msg in errors:
					writemsg("%s\n" % msg, noiselevel=-1)

		world_file = os.path.join(root, WORLD_FILE)
		world_list = grabfile(world_file)
		world_modified = False
		for update_cmd in myupd:
			for pos, atom in enumerate(world_list):
				new_atom = update_dbentry(update_cmd, atom)
				if atom != new_atom:
					world_list[pos] = new_atom
					world_modified = True
		if world_modified:
			world_list.sort()
			write_atomic(world_file,
				"".join("%s\n" % (x,) for x in world_list))

		update_config_files("/",
			mysettings.get("CONFIG_PROTECT","").split(),
			mysettings.get("CONFIG_PROTECT_MASK","").split(),
			myupd)

		vardb = trees["/"]["vartree"].dbapi
		bindb = trees["/"]["bintree"].dbapi
		if not os.access(bindb.bintree.pkgdir, os.W_OK):
			bindb = None
		else:
			# Call binarytree.populate(), since we want to make sure it's
			# only populated with local packages here (getbinpkgs=0).
			bindb.bintree.populate()
		for update_cmd in myupd:
			if update_cmd[0] == "move":
				moves = vardb.move_ent(update_cmd)
				if moves:
					writemsg_stdout(moves * "@")
				if bindb:
					moves = bindb.move_ent(update_cmd)
					if moves:
						writemsg_stdout(moves * "%")
			elif update_cmd[0] == "slotmove":
				moves = vardb.move_slot_ent(update_cmd)
				if moves:
					writemsg_stdout(moves * "s")
				if bindb:
					moves = bindb.move_slot_ent(update_cmd)
					if moves:
						writemsg_stdout(moves * "S")

		# The above global updates proceed quickly, so they
		# are considered a single mtimedb transaction.
		if len(timestamps) > 0:
			# We do not update the mtime in the mtimedb
			# until after _all_ of the above updates have
			# been processed because the mtimedb will
			# automatically commit when killed by ctrl C.
			for mykey, mtime in timestamps.items():
				prev_mtimes[mykey] = mtime

		# We gotta do the brute force updates for these now.
		if mysettings["PORTAGE_CALLER"] == "fixpackages" or \
		"fixpackages" in mysettings.features:
			def onUpdate(maxval, curval):
				if curval > 0:
					writemsg_stdout("#")
			vardb.update_ents(myupd, onUpdate=onUpdate)
			if bindb:
				def onUpdate(maxval, curval):
					if curval > 0:
						writemsg_stdout("*")
				bindb.update_ents(myupd, onUpdate=onUpdate)
		else:
			do_upgrade_packagesmessage = 1

		# Update progress above is indicated by characters written to stdout so
		# we print a couple new lines here to separate the progress output from
		# what follows.
		print()
		print()

		if do_upgrade_packagesmessage and bindb and \
			bindb.cpv_all():
			writemsg_stdout(_(" ** Skipping packages. Run 'fixpackages' or set it in FEATURES to fix the tbz2's in the packages directory.\n"))
			writemsg_stdout(bold(_("Note: This can take a very long time.")))
			writemsg_stdout("\n")
	if myupd:
		return myupd
Example #25
0
	def _addProfile(self, currentPath, repositories, known_repos):
		current_abs_path = os.path.abspath(currentPath)
		allow_directories = True
		allow_parent_colon = True
		repo_loc = None
		compat_mode = False

		eapi_file = os.path.join(currentPath, "eapi")
		eapi = "0"
		f = None
		try:
			f = io.open(_unicode_encode(eapi_file,
				encoding=_encodings['fs'], errors='strict'),
				mode='r', encoding=_encodings['content'], errors='replace')
			eapi = f.readline().strip()
		except IOError:
			pass
		else:
			if not eapi_is_supported(eapi):
				raise ParseError(_(
					"Profile contains unsupported "
					"EAPI '%s': '%s'") % \
					(eapi, os.path.realpath(eapi_file),))
		finally:
			if f is not None:
				f.close()

		intersecting_repos = [x for x in known_repos if current_abs_path.startswith(x[0])]
		if intersecting_repos:
			# protect against nested repositories.  Insane configuration, but the longest
			# path will be the correct one.
			repo_loc, layout_data = max(intersecting_repos, key=lambda x:len(x[0]))
			allow_directories = eapi_allows_directories_on_profile_level_and_repository_level(eapi) or \
				any(x in _portage1_profiles_allow_directories for x in layout_data['profile-formats'])
			compat_mode = not eapi_allows_directories_on_profile_level_and_repository_level(eapi) and \
				layout_data['profile-formats'] == ('portage-1-compat',)
			allow_parent_colon = any(x in _allow_parent_colon
				for x in layout_data['profile-formats'])

		if compat_mode:
			offenders = _PORTAGE1_DIRECTORIES.intersection(os.listdir(currentPath))
			offenders = sorted(x for x in offenders
				if os.path.isdir(os.path.join(currentPath, x)))
			if offenders:
				warnings.warn(_("Profile '%(profile_path)s' in repository "
					"'%(repo_name)s' is implicitly using 'portage-1' profile format, but "
					"the repository profiles are not marked as that format.  This will break "
					"in the future.  Please either convert the following paths "
					"to files, or add\nprofile-formats = portage-1\nto the "
					"repositories layout.conf.  Files: '%(files)s'\n")
					% dict(profile_path=currentPath, repo_name=repo_loc,
						files=', '.join(offenders)))

		parentsFile = os.path.join(currentPath, "parent")
		if os.path.exists(parentsFile):
			parents = grabfile(parentsFile)
			if not parents:
				raise ParseError(
					_("Empty parent file: '%s'") % parentsFile)
			for parentPath in parents:
				abs_parent = parentPath[:1] == os.sep
				if not abs_parent and allow_parent_colon:
					parentPath = self._expand_parent_colon(parentsFile,
						parentPath, repo_loc, repositories)

				# NOTE: This os.path.join() call is intended to ignore
				# currentPath if parentPath is already absolute.
				parentPath = normalize_path(os.path.join(
					currentPath, parentPath))

				if abs_parent or repo_loc is None or \
					not parentPath.startswith(repo_loc):
					# It seems that this parent may point outside
					# of the current repo, so realpath it.
					parentPath = os.path.realpath(parentPath)

				if os.path.exists(parentPath):
					self._addProfile(parentPath, repositories, known_repos)
				else:
					raise ParseError(
						_("Parent '%s' not found: '%s'") %  \
						(parentPath, parentsFile))

		self.profiles.append(currentPath)
		self.profiles_complex.append(
			_profile_node(currentPath, allow_directories))
Example #26
0
    def _addProfile(self, currentPath, repositories, known_repos,
                    previous_repos):
        current_abs_path = os.path.abspath(currentPath)
        allow_directories = True
        allow_parent_colon = True
        repo_loc = None
        compat_mode = False
        current_formats = ()
        eapi = None

        intersecting_repos = tuple(x for x in known_repos
                                   if current_abs_path.startswith(x[0]))
        if intersecting_repos:
            # Handle nested repositories. The longest path
            # will be the correct one.
            repo_loc, layout_data = max(intersecting_repos,
                                        key=lambda x: len(x[0]))
            eapi = layout_data.get("profile_eapi_when_unspecified")

        eapi_file = os.path.join(currentPath, "eapi")
        eapi = eapi or "0"
        f = None
        try:
            f = io.open(
                _unicode_encode(eapi_file,
                                encoding=_encodings["fs"],
                                errors="strict"),
                mode="r",
                encoding=_encodings["content"],
                errors="replace",
            )
            eapi = f.readline().strip()
        except IOError:
            pass
        else:
            if not eapi_is_supported(eapi):
                raise ParseError(
                    _("Profile contains unsupported "
                      "EAPI '%s': '%s'") % (
                          eapi,
                          os.path.realpath(eapi_file),
                      ))
        finally:
            if f is not None:
                f.close()

        if intersecting_repos:
            allow_directories = (
                eapi_allows_directories_on_profile_level_and_repository_level(
                    eapi) or any(x in _portage1_profiles_allow_directories
                                 for x in layout_data["profile-formats"]))
            compat_mode = (
                not eapi_allows_directories_on_profile_level_and_repository_level(
                    eapi)
                and layout_data["profile-formats"] == ("portage-1-compat", ))
            allow_parent_colon = any(x in _allow_parent_colon
                                     for x in layout_data["profile-formats"])
            current_formats = tuple(layout_data["profile-formats"])

        # According to PMS, a deprecated profile warning is not inherited. Since
        # the current profile node may have been inherited by a user profile
        # node, the deprecation warning may be relevant even if it is not a
        # top-level profile node. Therefore, consider the deprecated warning
        # to be irrelevant when the current profile node belongs to the same
        # repo as the previous profile node.
        show_deprecated_warning = tuple(x[0] for x in previous_repos) != tuple(
            x[0] for x in intersecting_repos)

        if compat_mode:
            offenders = _PORTAGE1_DIRECTORIES.intersection(
                os.listdir(currentPath))
            offenders = sorted(x for x in offenders
                               if os.path.isdir(os.path.join(currentPath, x)))
            if offenders:
                warnings.warn(
                    _("\nThe selected profile is implicitly using the 'portage-1' format:\n"
                      "\tprofile = %(profile_path)s\n"
                      "But this repository is not using that format:\n"
                      "\trepo = %(repo_name)s\n"
                      "This will break in the future.  Please convert these dirs to files:\n"
                      "\t%(files)s\n"
                      "Or, add this line to the repository's layout.conf:\n"
                      "\tprofile-formats = portage-1") % dict(
                          profile_path=currentPath,
                          repo_name=repo_loc,
                          files="\n\t".join(offenders),
                      ))

        parentsFile = os.path.join(currentPath, "parent")
        if exists_raise_eaccess(parentsFile):
            parents = grabfile(parentsFile)
            if not parents:
                raise ParseError(_("Empty parent file: '%s'") % parentsFile)
            for parentPath in parents:
                abs_parent = parentPath[:1] == os.sep
                if not abs_parent and allow_parent_colon:
                    parentPath = self._expand_parent_colon(
                        parentsFile, parentPath, repo_loc, repositories)

                # NOTE: This os.path.join() call is intended to ignore
                # currentPath if parentPath is already absolute.
                parentPath = normalize_path(
                    os.path.join(currentPath, parentPath))

                if (abs_parent or repo_loc is None
                        or not parentPath.startswith(repo_loc)):
                    # It seems that this parent may point outside
                    # of the current repo, so realpath it.
                    parentPath = os.path.realpath(parentPath)

                if exists_raise_eaccess(parentPath):
                    self._addProfile(parentPath, repositories, known_repos,
                                     intersecting_repos)
                else:
                    raise ParseError(
                        _("Parent '%s' not found: '%s'") %
                        (parentPath, parentsFile))

        self.profiles.append(currentPath)
        self.profiles_complex.append(
            _profile_node(
                currentPath,
                allow_directories,
                False,
                current_formats,
                eapi,
                "build-id" in current_formats,
                show_deprecated_warning=show_deprecated_warning,
            ))
def ExtractKernelVersion(base_dir):
    """
	Try to figure out what kernel version we are running
	@param base_dir: Path to sources (usually /usr/src/linux)
	@type base_dir: string
	@rtype: tuple( version[string], error[string])
	@return:
	1. tuple( version[string], error[string])
	Either version or error is populated (but never both)

	"""
    lines = []
    pathname = os.path.join(base_dir, 'Makefile')
    try:
        f = io.open(_unicode_encode(pathname,
                                    encoding=_encodings['fs'],
                                    errors='strict'),
                    mode='r',
                    encoding=_encodings['content'],
                    errors='replace')
    except OSError as details:
        return (None, str(details))
    except IOError as details:
        return (None, str(details))

    try:
        for i in range(4):
            lines.append(f.readline())
    except OSError as details:
        return (None, str(details))
    except IOError as details:
        return (None, str(details))
    finally:
        f.close()

    lines = [l.strip() for l in lines]

    version = ''

    #XXX: The following code relies on the ordering of vars within the Makefile
    for line in lines:
        # split on the '=' then remove annoying whitespace
        items = line.split("=")
        items = [i.strip() for i in items]
        if items[0] == 'VERSION' or \
         items[0] == 'PATCHLEVEL':
            version += items[1]
            version += "."
        elif items[0] == 'SUBLEVEL':
            version += items[1]
        elif items[0] == 'EXTRAVERSION' and \
         items[-1] != items[0]:
            version += items[1]

    # Grab a list of files named localversion* and sort them
    localversions = os.listdir(base_dir)
    for x in range(len(localversions) - 1, -1, -1):
        if localversions[x][:12] != "localversion":
            del localversions[x]
    localversions.sort()

    # Append the contents of each to the version string, stripping ALL whitespace
    for lv in localversions:
        version += "".join(" ".join(grabfile(base_dir + "/" + lv)).split())

    # Check the .config for a CONFIG_LOCALVERSION and append that too, also stripping whitespace
    loader = KeyValuePairFileLoader(os.path.join(base_dir, ".config"), None)
    kernelconfig, loader_errors = loader.load()
    if loader_errors:
        for file_path, file_errors in loader_errors.items():
            for error_str in file_errors:
                writemsg_level("%s: %s\n" % (file_path, error_str),
                               level=logging.ERROR,
                               noiselevel=-1)

    if kernelconfig and "CONFIG_LOCALVERSION" in kernelconfig:
        version += "".join(shlex_split(kernelconfig["CONFIG_LOCALVERSION"]))

    return (version, None)
Example #28
0
def _do_global_updates(trees, prev_mtimes, quiet=False, if_mtime_changed=True):
	root = trees._running_eroot
	mysettings = trees[root]["vartree"].settings
	portdb = trees[root]["porttree"].dbapi
	vardb = trees[root]["vartree"].dbapi
	bindb = trees[root]["bintree"].dbapi

	world_file = os.path.join(mysettings['EROOT'], WORLD_FILE)
	world_list = grabfile(world_file)
	world_modified = False
	world_warnings = set()
	updpath_map = {}
	# Maps repo_name to list of updates. If a given repo has no updates
	# directory, it will be omitted. If a repo has an updates directory
	# but none need to be applied (according to timestamp logic), the
	# value in the dict will be an empty list.
	repo_map = {}
	timestamps = {}

	retupd = False
	update_notice_printed = False
	for repo_name in portdb.getRepositories():
		repo = portdb.getRepositoryPath(repo_name)
		updpath = os.path.join(repo, "profiles", "updates")
		if not os.path.isdir(updpath):
			continue

		if updpath in updpath_map:
			repo_map[repo_name] = updpath_map[updpath]
			continue

		try:
			if if_mtime_changed:
				update_data = grab_updates(updpath, prev_mtimes=prev_mtimes)
			else:
				update_data = grab_updates(updpath)
		except DirectoryNotFound:
			continue
		myupd = []
		updpath_map[updpath] = myupd
		repo_map[repo_name] = myupd
		if len(update_data) > 0:
			for mykey, mystat, mycontent in update_data:
				if not update_notice_printed:
					update_notice_printed = True
					writemsg_stdout("\n")
					if quiet:
						writemsg_stdout(colorize("GOOD",
							_("Performing Global Updates\n")))
						writemsg_stdout(_("(Could take a couple of minutes if you have a lot of binary packages.)\n"))
					else:
						writemsg_stdout(colorize("GOOD",
							_("Performing Global Updates:\n")))
						writemsg_stdout(_("(Could take a couple of minutes if you have a lot of binary packages.)\n"))
						writemsg_stdout(_("  %s='update pass'  %s='binary update'  "
							"%s='/var/db update'  %s='/var/db move'\n"
							"  %s='/var/db SLOT move'  %s='binary move'  "
							"%s='binary SLOT move'\n  %s='update /etc/portage/package.*'\n") % \
							(bold("."), bold("*"), bold("#"), bold("@"), bold("s"), bold("%"), bold("S"), bold("p")))
				valid_updates, errors = parse_updates(mycontent)
				myupd.extend(valid_updates)
				if not quiet:
					writemsg_stdout(bold(mykey))
					writemsg_stdout(len(valid_updates) * "." + "\n")
				if len(errors) == 0:
					# Update our internal mtime since we
					# processed all of our directives.
					timestamps[mykey] = mystat[stat.ST_MTIME]
				else:
					for msg in errors:
						writemsg("%s\n" % msg, noiselevel=-1)
			if myupd:
				retupd = True

	if retupd:
		if os.access(bindb.bintree.pkgdir, os.W_OK):
			# Call binarytree.populate(), since we want to make sure it's
			# only populated with local packages here (getbinpkgs=0).
			bindb.bintree.populate()
		else:
			bindb = None

	master_repo = portdb.getRepositoryName(portdb.porttree_root)
	if master_repo in repo_map:
		repo_map['DEFAULT'] = repo_map[master_repo]

	for repo_name, myupd in repo_map.items():
			if repo_name == 'DEFAULT':
				continue
			if not myupd:
				continue

			def repo_match(repository):
				return repository == repo_name or \
					(repo_name == master_repo and repository not in repo_map)

			def _world_repo_match(atoma, atomb):
				"""
				Check whether to perform a world change from atoma to atomb.
				If best vardb match for atoma comes from the same repository
				as the update file, allow that. Additionally, if portdb still
				can find a match for old atom name, warn about that.
				"""
				matches = vardb.match(atoma)
				if not matches:
					matches = vardb.match(atomb)
				if matches and \
					repo_match(vardb.aux_get(best(matches), ['repository'])[0]):
					if portdb.match(atoma):
						world_warnings.add((atoma, atomb))
					return True
				else:
					return False

			for update_cmd in myupd:
				for pos, atom in enumerate(world_list):
					new_atom = update_dbentry(update_cmd, atom)
					if atom != new_atom:
						if _world_repo_match(atom, new_atom):
							world_list[pos] = new_atom
							world_modified = True

			for update_cmd in myupd:
				if update_cmd[0] == "move":
					moves = vardb.move_ent(update_cmd, repo_match=repo_match)
					if moves:
						writemsg_stdout(moves * "@")
					if bindb:
						moves = bindb.move_ent(update_cmd, repo_match=repo_match)
						if moves:
							writemsg_stdout(moves * "%")
				elif update_cmd[0] == "slotmove":
					moves = vardb.move_slot_ent(update_cmd, repo_match=repo_match)
					if moves:
						writemsg_stdout(moves * "s")
					if bindb:
						moves = bindb.move_slot_ent(update_cmd, repo_match=repo_match)
						if moves:
							writemsg_stdout(moves * "S")

	if world_modified:
		world_list.sort()
		write_atomic(world_file,
			"".join("%s\n" % (x,) for x in world_list))
		if world_warnings:
			# XXX: print warning that we've updated world entries
			# and the old name still matches something (from an overlay)?
			pass

	if retupd:

			def _config_repo_match(repo_name, atoma, atomb):
				"""
				Check whether to perform a world change from atoma to atomb.
				If best vardb match for atoma comes from the same repository
				as the update file, allow that. Additionally, if portdb still
				can find a match for old atom name, warn about that.
				"""
				matches = vardb.match(atoma)
				if not matches:
					matches = vardb.match(atomb)
					if not matches:
						return False
				repository = vardb.aux_get(best(matches), ['repository'])[0]
				return repository == repo_name or \
					(repo_name == master_repo and repository not in repo_map)

			update_config_files(root,
				shlex_split(mysettings.get("CONFIG_PROTECT", "")),
				shlex_split(mysettings.get("CONFIG_PROTECT_MASK", "")),
				repo_map, match_callback=_config_repo_match)

			# The above global updates proceed quickly, so they
			# are considered a single mtimedb transaction.
			if timestamps:
				# We do not update the mtime in the mtimedb
				# until after _all_ of the above updates have
				# been processed because the mtimedb will
				# automatically commit when killed by ctrl C.
				for mykey, mtime in timestamps.items():
					prev_mtimes[mykey] = mtime

			do_upgrade_packagesmessage = False
			# We gotta do the brute force updates for these now.
			if True:
				def onUpdate(maxval, curval):
					if curval > 0:
						writemsg_stdout("#")
				if quiet:
					onUpdate = None
				vardb.update_ents(repo_map, onUpdate=onUpdate)
				if bindb:
					def onUpdate(maxval, curval):
						if curval > 0:
							writemsg_stdout("*")
					if quiet:
						onUpdate = None
					bindb.update_ents(repo_map, onUpdate=onUpdate)
			else:
				do_upgrade_packagesmessage = 1

			# Update progress above is indicated by characters written to stdout so
			# we print a couple new lines here to separate the progress output from
			# what follows.
			writemsg_stdout("\n\n")

			if do_upgrade_packagesmessage and bindb and \
				bindb.cpv_all():
				writemsg_stdout(_(" ** Skipping packages. Run 'fixpackages' or set it in FEATURES to fix the tbz2's in the packages directory.\n"))
				writemsg_stdout(bold(_("Note: This can take a very long time.")))
				writemsg_stdout("\n")

	return retupd
Example #29
0
 def _parse_profile_files_to_list(self, file_name, locations):
     return tuple(
         tuple(grabfile(os.path.join(x, file_name), recursive=1))
         for x in locations)
Example #30
0
def ExtractKernelVersion(base_dir):
	"""
	Try to figure out what kernel version we are running
	@param base_dir: Path to sources (usually /usr/src/linux)
	@type base_dir: string
	@rtype: tuple( version[string], error[string])
	@return:
	1. tuple( version[string], error[string])
	Either version or error is populated (but never both)

	"""
	lines = []
	pathname = os.path.join(base_dir, 'Makefile')
	try:
		f = io.open(_unicode_encode(pathname,
			encoding=_encodings['fs'], errors='strict'), mode='r',
			encoding=_encodings['content'], errors='replace')
	except OSError as details:
		return (None, str(details))
	except IOError as details:
		return (None, str(details))

	try:
		for i in range(4):
			lines.append(f.readline())
	except OSError as details:
		return (None, str(details))
	except IOError as details:
		return (None, str(details))
	finally:
		f.close()

	lines = [l.strip() for l in lines]

	version = ''

	#XXX: The following code relies on the ordering of vars within the Makefile
	for line in lines:
		# split on the '=' then remove annoying whitespace
		items = line.split("=")
		items = [i.strip() for i in items]
		if items[0] == 'VERSION' or \
			items[0] == 'PATCHLEVEL':
			version += items[1]
			version += "."
		elif items[0] == 'SUBLEVEL':
			version += items[1]
		elif items[0] == 'EXTRAVERSION' and \
			items[-1] != items[0]:
			version += items[1]

	# Grab a list of files named localversion* and sort them
	localversions = os.listdir(base_dir)
	for x in range(len(localversions) - 1, -1, -1):
		if localversions[x][:12] != "localversion":
			del localversions[x]
	localversions.sort()

	# Append the contents of each to the version string, stripping ALL whitespace
	for lv in localversions:
		version += "".join(" ".join(grabfile(base_dir + "/" + lv)).split())

	# Check the .config for a CONFIG_LOCALVERSION and append that too, also stripping whitespace
	kernelconfig = getconfig(base_dir+"/.config")
	if kernelconfig and "CONFIG_LOCALVERSION" in kernelconfig:
		version += "".join(kernelconfig["CONFIG_LOCALVERSION"].split())

	return (version, None)
Example #31
0
    def updateItems(self, repoid):
        """
		Figure out which news items from NEWS_PATH are both unread and relevant to
		the user (according to the GLEP 42 standards of relevancy).  Then add these
		items into the news.repoid.unread file.
		"""

        # Ensure that the unread path exists and is writable.

        try:
            ensure_dirs(self.unread_path,
                        uid=self._uid,
                        gid=self._gid,
                        mode=self._dir_mode,
                        mask=self._mode_mask)
        except (OperationNotPermitted, PermissionDenied):
            return

        if not os.access(self.unread_path, os.W_OK):
            return

        news_dir = self._news_dir(repoid)
        try:
            news = _os.listdir(
                _unicode_encode(news_dir,
                                encoding=_encodings['fs'],
                                errors='strict'))
        except OSError:
            return

        skip_filename = self._skip_filename(repoid)
        unread_filename = self._unread_filename(repoid)
        unread_lock = lockfile(unread_filename, wantnewlockfile=1)
        try:
            try:
                unread = set(grabfile(unread_filename))
                unread_orig = unread.copy()
                skip = set(grabfile(skip_filename))
                skip_orig = skip.copy()
            except PermissionDenied:
                return

            for itemid in news:
                try:
                    itemid = _unicode_decode(itemid,
                                             encoding=_encodings['fs'],
                                             errors='strict')
                except UnicodeDecodeError:
                    itemid = _unicode_decode(itemid,
                                             encoding=_encodings['fs'],
                                             errors='replace')
                    writemsg_level(
                     _("!!! Invalid encoding in news item name: '%s'\n") % \
                     itemid, level=logging.ERROR, noiselevel=-1)
                    continue

                if itemid in skip:
                    continue
                filename = os.path.join(
                    news_dir, itemid, itemid + "." + self.language_id + ".txt")
                if not os.path.isfile(filename):
                    continue
                item = NewsItem(filename, itemid)
                if not item.isValid():
                    continue
                if item.isRelevant(profile=self._profile_path,
                                   config=self.config,
                                   vardb=self.vdb):
                    unread.add(item.name)
                    skip.add(item.name)

            if unread != unread_orig:
                write_atomic(unread_filename,
                             "".join("%s\n" % x for x in sorted(unread)))
                apply_secpass_permissions(unread_filename,
                                          uid=self._uid,
                                          gid=self._gid,
                                          mode=self._file_mode,
                                          mask=self._mode_mask)

            if skip != skip_orig:
                write_atomic(skip_filename,
                             "".join("%s\n" % x for x in sorted(skip)))
                apply_secpass_permissions(skip_filename,
                                          uid=self._uid,
                                          gid=self._gid,
                                          mode=self._file_mode,
                                          mask=self._mode_mask)

        finally:
            unlockfile(unread_lock)
Example #32
0
	def rebuild(self, exclude_pkgs=None, include_file=None):
		"""
		Raises CommandNotFound if there are preserved libs
		and the scanelf binary is not available.
		"""

		os = _os_merge
		root = self._eroot
		root_len = len(root) - 1
		self._clear_cache()
		self._defpath.update(getlibpaths(self._eroot))
		libs = self._libs
		obj_properties = self._obj_properties

		lines = []

		# Data from include_file is processed first so that it
		# overrides any data from previously installed files.
		if include_file is not None:
			lines += grabfile(include_file)

		aux_keys = [self._needed_aux_key]
		for cpv in self._dbapi.cpv_all():
			if exclude_pkgs is not None and cpv in exclude_pkgs:
				continue
			lines += self._dbapi.aux_get(cpv, aux_keys)[0].split('\n')
		# Cache NEEDED.* files avoid doing excessive IO for every rebuild.
		self._dbapi.flush_cache()

		# have to call scanelf for preserved libs here as they aren't 
		# registered in NEEDED.ELF.2 files
		plibs = set()
		if self._dbapi._plib_registry and self._dbapi._plib_registry.getPreservedLibs():
			args = ["/usr/bin/scanelf", "-qF", "%a;%F;%S;%r;%n"]
			for items in self._dbapi._plib_registry.getPreservedLibs().values():
				plibs.update(items)
				args.extend(os.path.join(root, x.lstrip("." + os.sep)) \
					for x in items)
			try:
				proc = subprocess.Popen(args, stdout=subprocess.PIPE)
			except EnvironmentError as e:
				if e.errno != errno.ENOENT:
					raise
				raise CommandNotFound(args[0])
			else:
				for l in proc.stdout:
					try:
						l = _unicode_decode(l,
							encoding=_encodings['content'], errors='strict')
					except UnicodeDecodeError:
						l = _unicode_decode(l,
							encoding=_encodings['content'], errors='replace')
						writemsg_level(_("\nError decoding characters " \
							"returned from scanelf: %s\n\n") % (l,),
							level=logging.ERROR, noiselevel=-1)
					l = l[3:].rstrip("\n")
					if not l:
						continue
					fields = l.split(";")
					if len(fields) < 5:
						writemsg_level(_("\nWrong number of fields " \
							"returned from scanelf: %s\n\n") % (l,),
							level=logging.ERROR, noiselevel=-1)
						continue
					fields[1] = fields[1][root_len:]
					plibs.discard(fields[1])
					lines.append(";".join(fields))
				proc.wait()

		if plibs:
			# Preserved libraries that did not appear in the scanelf output.
			# This is known to happen with statically linked libraries.
			# Generate dummy lines for these, so we can assume that every
			# preserved library has an entry in self._obj_properties. This
			# is important in order to prevent findConsumers from raising
			# an unwanted KeyError.
			for x in plibs:
				lines.append(";".join(['', x, '', '', '']))

		for l in lines:
			l = l.rstrip("\n")
			if not l:
				continue
			fields = l.split(";")
			if len(fields) < 5:
				writemsg_level(_("\nWrong number of fields " \
					"in %s: %s\n\n") % (self._needed_aux_key, l),
					level=logging.ERROR, noiselevel=-1)
				continue
			arch = fields[0]
			obj = fields[1]
			soname = fields[2]
			path = set([normalize_path(x) \
				for x in filter(None, fields[3].replace(
				"${ORIGIN}", os.path.dirname(obj)).replace(
				"$ORIGIN", os.path.dirname(obj)).split(":"))])
			needed = [x for x in fields[4].split(",") if x]

			obj_key = self._obj_key(obj)
			indexed = True
			myprops = obj_properties.get(obj_key)
			if myprops is None:
				indexed = False
				myprops = (arch, needed, path, soname, set())
				obj_properties[obj_key] = myprops
			# All object paths are added into the obj_properties tuple.
			myprops[4].add(obj)

			# Don't index the same file more that once since only one
			# set of data can be correct and therefore mixing data
			# may corrupt the index (include_file overrides previously
			# installed).
			if indexed:
				continue

			arch_map = libs.get(arch)
			if arch_map is None:
				arch_map = {}
				libs[arch] = arch_map
			if soname:
				soname_map = arch_map.get(soname)
				if soname_map is None:
					soname_map = self._soname_map_class(
						providers=set(), consumers=set())
					arch_map[soname] = soname_map
				soname_map.providers.add(obj_key)
			for needed_soname in needed:
				soname_map = arch_map.get(needed_soname)
				if soname_map is None:
					soname_map = self._soname_map_class(
						providers=set(), consumers=set())
					arch_map[needed_soname] = soname_map
				soname_map.consumers.add(obj_key)
Example #33
0
	def _addProfile(self, currentPath, repositories, known_repos):
		current_abs_path = os.path.abspath(currentPath)
		allow_directories = True
		allow_parent_colon = True
		repo_loc = None
		compat_mode = False
		current_formats = ()
		eapi = None

		intersecting_repos = [x for x in known_repos
			if current_abs_path.startswith(x[0])]
		if intersecting_repos:
			# Handle nested repositories. The longest path
			# will be the correct one.
			repo_loc, layout_data = max(intersecting_repos,
				key=lambda x:len(x[0]))
			eapi = layout_data.get("profile_eapi_when_unspecified")

		eapi_file = os.path.join(currentPath, "eapi")
		eapi = eapi or "0"
		f = None
		try:
			f = io.open(_unicode_encode(eapi_file,
				encoding=_encodings['fs'], errors='strict'),
				mode='r', encoding=_encodings['content'], errors='replace')
			eapi = f.readline().strip()
		except IOError:
			pass
		else:
			if not eapi_is_supported(eapi):
				raise ParseError(_(
					"Profile contains unsupported "
					"EAPI '%s': '%s'") % \
					(eapi, os.path.realpath(eapi_file),))
		finally:
			if f is not None:
				f.close()

		if intersecting_repos:
			allow_directories = eapi_allows_directories_on_profile_level_and_repository_level(eapi) or \
				any(x in _portage1_profiles_allow_directories for x in layout_data['profile-formats'])
			compat_mode = not eapi_allows_directories_on_profile_level_and_repository_level(eapi) and \
				layout_data['profile-formats'] == ('portage-1-compat',)
			allow_parent_colon = any(x in _allow_parent_colon
				for x in layout_data['profile-formats'])
			current_formats = tuple(layout_data['profile-formats'])


		if compat_mode:
			offenders = _PORTAGE1_DIRECTORIES.intersection(os.listdir(currentPath))
			offenders = sorted(x for x in offenders
				if os.path.isdir(os.path.join(currentPath, x)))
			if offenders:
				warnings.warn(_(
					"\nThe selected profile is implicitly using the 'portage-1' format:\n"
					"\tprofile = %(profile_path)s\n"
					"But this repository is not using that format:\n"
					"\trepo = %(repo_name)s\n"
					"This will break in the future.  Please convert these dirs to files:\n"
					"\t%(files)s\n"
					"Or, add this line to the repository's layout.conf:\n"
					"\tprofile-formats = portage-1")
					% dict(profile_path=currentPath, repo_name=repo_loc,
						files='\n\t'.join(offenders)))

		parentsFile = os.path.join(currentPath, "parent")
		if exists_raise_eaccess(parentsFile):
			parents = grabfile(parentsFile)
			if not parents:
				raise ParseError(
					_("Empty parent file: '%s'") % parentsFile)
			for parentPath in parents:
				abs_parent = parentPath[:1] == os.sep
				if not abs_parent and allow_parent_colon:
					parentPath = self._expand_parent_colon(parentsFile,
						parentPath, repo_loc, repositories)

				# NOTE: This os.path.join() call is intended to ignore
				# currentPath if parentPath is already absolute.
				parentPath = normalize_path(os.path.join(
					currentPath, parentPath))

				if abs_parent or repo_loc is None or \
					not parentPath.startswith(repo_loc):
					# It seems that this parent may point outside
					# of the current repo, so realpath it.
					parentPath = os.path.realpath(parentPath)

				if exists_raise_eaccess(parentPath):
					self._addProfile(parentPath, repositories, known_repos)
				else:
					raise ParseError(
						_("Parent '%s' not found: '%s'") %  \
						(parentPath, parentsFile))

		self.profiles.append(currentPath)
		self.profiles_complex.append(
			_profile_node(currentPath, allow_directories, False,
				current_formats, eapi))
Example #34
0
def _do_global_updates(trees, prev_mtimes, quiet=False, if_mtime_changed=True):
    root = trees._running_eroot
    mysettings = trees[root]["vartree"].settings
    portdb = trees[root]["porttree"].dbapi
    vardb = trees[root]["vartree"].dbapi
    bindb = trees[root]["bintree"].dbapi

    world_file = os.path.join(mysettings['EROOT'], WORLD_FILE)
    world_list = grabfile(world_file)
    world_modified = False
    world_warnings = set()
    updpath_map = {}
    # Maps repo_name to list of updates. If a given repo has no updates
    # directory, it will be omitted. If a repo has an updates directory
    # but none need to be applied (according to timestamp logic), the
    # value in the dict will be an empty list.
    repo_map = {}
    timestamps = {}

    retupd = False
    update_notice_printed = False
    for repo_name in portdb.getRepositories():
        repo = portdb.getRepositoryPath(repo_name)
        updpath = os.path.join(repo, "profiles", "updates")
        if not os.path.isdir(updpath):
            continue

        if updpath in updpath_map:
            repo_map[repo_name] = updpath_map[updpath]
            continue

        try:
            if if_mtime_changed:
                update_data = grab_updates(updpath, prev_mtimes=prev_mtimes)
            else:
                update_data = grab_updates(updpath)
        except DirectoryNotFound:
            continue
        myupd = []
        updpath_map[updpath] = myupd
        repo_map[repo_name] = myupd
        if len(update_data) > 0:
            for mykey, mystat, mycontent in update_data:
                if not update_notice_printed:
                    update_notice_printed = True
                    writemsg_stdout("\n")
                    writemsg_stdout(
                        colorize("GOOD", _("Performing Global Updates\n")))
                    writemsg_stdout(
                        _("(Could take a couple of minutes if you have a lot of binary packages.)\n"
                          ))
                    if not quiet:
                        writemsg_stdout(_("  %s='update pass'  %s='binary update'  "
                         "%s='/var/db update'  %s='/var/db move'\n"
                         "  %s='/var/db SLOT move'  %s='binary move'  "
                         "%s='binary SLOT move'\n  %s='update /etc/portage/package.*'\n") % \
                         (bold("."), bold("*"), bold("#"), bold("@"), bold("s"), bold("%"), bold("S"), bold("p")))
                valid_updates, errors = parse_updates(mycontent)
                myupd.extend(valid_updates)
                if not quiet:
                    writemsg_stdout(bold(mykey))
                    writemsg_stdout(len(valid_updates) * "." + "\n")
                if len(errors) == 0:
                    # Update our internal mtime since we
                    # processed all of our directives.
                    timestamps[mykey] = mystat[stat.ST_MTIME]
                else:
                    for msg in errors:
                        writemsg("%s\n" % msg, noiselevel=-1)
            if myupd:
                retupd = True

    if retupd:
        if os.access(bindb.bintree.pkgdir, os.W_OK):
            # Call binarytree.populate(), since we want to make sure it's
            # only populated with local packages here (getbinpkgs=0).
            bindb.bintree.populate()
        else:
            bindb = None

    master_repo = portdb.repositories.mainRepo()
    if master_repo is not None:
        master_repo = master_repo.name
    if master_repo in repo_map:
        repo_map['DEFAULT'] = repo_map[master_repo]

    for repo_name, myupd in repo_map.items():
        if repo_name == 'DEFAULT':
            continue
        if not myupd:
            continue

        def repo_match(repository):
            return repository == repo_name or \
             (repo_name == master_repo and repository not in repo_map)

        def _world_repo_match(atoma, atomb):
            """
			Check whether to perform a world change from atoma to atomb.
			If best vardb match for atoma comes from the same repository
			as the update file, allow that. Additionally, if portdb still
			can find a match for old atom name, warn about that.
			"""
            matches = vardb.match(atoma)
            if not matches:
                matches = vardb.match(atomb)
            if matches and \
             repo_match(vardb.aux_get(best(matches), ['repository'])[0]):
                if portdb.match(atoma):
                    world_warnings.add((atoma, atomb))
                return True
            else:
                return False

        for update_cmd in myupd:
            for pos, atom in enumerate(world_list):
                new_atom = update_dbentry(update_cmd, atom)
                if atom != new_atom:
                    if _world_repo_match(atom, new_atom):
                        world_list[pos] = new_atom
                        world_modified = True

        for update_cmd in myupd:
            if update_cmd[0] == "move":
                moves = vardb.move_ent(update_cmd, repo_match=repo_match)
                if moves:
                    writemsg_stdout(moves * "@")
                if bindb:
                    moves = bindb.move_ent(update_cmd, repo_match=repo_match)
                    if moves:
                        writemsg_stdout(moves * "%")
            elif update_cmd[0] == "slotmove":
                moves = vardb.move_slot_ent(update_cmd, repo_match=repo_match)
                if moves:
                    writemsg_stdout(moves * "s")
                if bindb:
                    moves = bindb.move_slot_ent(update_cmd,
                                                repo_match=repo_match)
                    if moves:
                        writemsg_stdout(moves * "S")

    if world_modified:
        world_list.sort()
        write_atomic(world_file, "".join("%s\n" % (x, ) for x in world_list))
        if world_warnings:
            # XXX: print warning that we've updated world entries
            # and the old name still matches something (from an overlay)?
            pass

    if retupd:

        def _config_repo_match(repo_name, atoma, atomb):
            """
			Check whether to perform a world change from atoma to atomb.
			If best vardb match for atoma comes from the same repository
			as the update file, allow that. Additionally, if portdb still
			can find a match for old atom name, warn about that.
			"""
            matches = vardb.match(atoma)
            if not matches:
                matches = vardb.match(atomb)
                if not matches:
                    return False
            repository = vardb.aux_get(best(matches), ['repository'])[0]
            return repository == repo_name or \
             (repo_name == master_repo and repository not in repo_map)

        update_config_files(root,
                            shlex_split(mysettings.get("CONFIG_PROTECT", "")),
                            shlex_split(
                                mysettings.get("CONFIG_PROTECT_MASK", "")),
                            repo_map,
                            match_callback=_config_repo_match,
                            case_insensitive="case-insensitive-fs"
                            in mysettings.features)

        # The above global updates proceed quickly, so they
        # are considered a single mtimedb transaction.
        if timestamps:
            # We do not update the mtime in the mtimedb
            # until after _all_ of the above updates have
            # been processed because the mtimedb will
            # automatically commit when killed by ctrl C.
            for mykey, mtime in timestamps.items():
                prev_mtimes[mykey] = mtime

        do_upgrade_packagesmessage = False
        # We gotta do the brute force updates for these now.
        if True:

            def onUpdate(_maxval, curval):
                if curval > 0:
                    writemsg_stdout("#")

            if quiet:
                onUpdate = None
            vardb.update_ents(repo_map, onUpdate=onUpdate)
            if bindb:

                def onUpdate(_maxval, curval):
                    if curval > 0:
                        writemsg_stdout("*")

                if quiet:
                    onUpdate = None
                bindb.update_ents(repo_map, onUpdate=onUpdate)
        else:
            do_upgrade_packagesmessage = 1

        # Update progress above is indicated by characters written to stdout so
        # we print a couple new lines here to separate the progress output from
        # what follows.
        writemsg_stdout("\n\n")

        if do_upgrade_packagesmessage and bindb and \
         bindb.cpv_all():
            writemsg_stdout(
                _(" ** Skipping packages. Run 'fixpackages' or set it in FEATURES to fix the tbz2's in the packages directory.\n"
                  ))
            writemsg_stdout(bold(_("Note: This can take a very long time.")))
            writemsg_stdout("\n")

    return retupd
Example #35
0
    def _addProfile(self, currentPath, repositories, known_repos):
        current_abs_path = os.path.abspath(currentPath)
        allow_directories = True
        allow_parent_colon = True
        repo_loc = None
        compat_mode = False
        current_formats = ()
        eapi = None

        intersecting_repos = [
            x for x in known_repos if current_abs_path.startswith(x[0])
        ]
        if intersecting_repos:
            # Handle nested repositories. The longest path
            # will be the correct one.
            repo_loc, layout_data = max(intersecting_repos,
                                        key=lambda x: len(x[0]))
            eapi = layout_data.get("profile_eapi_when_unspecified")

        eapi_file = os.path.join(currentPath, "eapi")
        eapi = eapi or "0"
        f = None
        try:
            f = io.open(_unicode_encode(eapi_file,
                                        encoding=_encodings['fs'],
                                        errors='strict'),
                        mode='r',
                        encoding=_encodings['content'],
                        errors='replace')
            eapi = f.readline().strip()
        except IOError:
            pass
        else:
            if not eapi_is_supported(eapi):
                raise ParseError(_(
                 "Profile contains unsupported "
                 "EAPI '%s': '%s'") % \
                 (eapi, os.path.realpath(eapi_file),))
        finally:
            if f is not None:
                f.close()

        if intersecting_repos:
            allow_directories = eapi_allows_directories_on_profile_level_and_repository_level(eapi) or \
             any(x in _portage1_profiles_allow_directories for x in layout_data['profile-formats'])
            compat_mode = not eapi_allows_directories_on_profile_level_and_repository_level(eapi) and \
             layout_data['profile-formats'] == ('portage-1-compat',)
            allow_parent_colon = any(x in _allow_parent_colon
                                     for x in layout_data['profile-formats'])
            current_formats = tuple(layout_data['profile-formats'])

        if compat_mode:
            offenders = _PORTAGE1_DIRECTORIES.intersection(
                os.listdir(currentPath))
            offenders = sorted(x for x in offenders
                               if os.path.isdir(os.path.join(currentPath, x)))
            if offenders:
                warnings.warn(
                    _("\nThe selected profile is implicitly using the 'portage-1' format:\n"
                      "\tprofile = %(profile_path)s\n"
                      "But this repository is not using that format:\n"
                      "\trepo = %(repo_name)s\n"
                      "This will break in the future.  Please convert these dirs to files:\n"
                      "\t%(files)s\n"
                      "Or, add this line to the repository's layout.conf:\n"
                      "\tprofile-formats = portage-1") %
                    dict(profile_path=currentPath,
                         repo_name=repo_loc,
                         files='\n\t'.join(offenders)))

        parentsFile = os.path.join(currentPath, "parent")
        if exists_raise_eaccess(parentsFile):
            parents = grabfile(parentsFile)
            if not parents:
                raise ParseError(_("Empty parent file: '%s'") % parentsFile)
            for parentPath in parents:
                abs_parent = parentPath[:1] == os.sep
                if not abs_parent and allow_parent_colon:
                    parentPath = self._expand_parent_colon(
                        parentsFile, parentPath, repo_loc, repositories)

                # NOTE: This os.path.join() call is intended to ignore
                # currentPath if parentPath is already absolute.
                parentPath = normalize_path(
                    os.path.join(currentPath, parentPath))

                if abs_parent or repo_loc is None or \
                 not parentPath.startswith(repo_loc):
                    # It seems that this parent may point outside
                    # of the current repo, so realpath it.
                    parentPath = os.path.realpath(parentPath)

                if exists_raise_eaccess(parentPath):
                    self._addProfile(parentPath, repositories, known_repos)
                else:
                    raise ParseError(
                     _("Parent '%s' not found: '%s'") %  \
                     (parentPath, parentsFile))

        self.profiles.append(currentPath)
        self.profiles_complex.append(
            _profile_node(currentPath, allow_directories, False,
                          current_formats, eapi, 'build-id'
                          in current_formats))