示例#1
0
    def __init__(
        self,
        allowed_pkg_keys=None,
        default_header_data=None,
        default_pkg_data=None,
        inherited_keys=None,
        translated_keys=None,
    ):

        self._pkg_slot_dict = None
        if allowed_pkg_keys is not None:
            self._pkg_slot_dict = slot_dict_class(allowed_pkg_keys)

        self._default_header_data = default_header_data
        self._default_pkg_data = default_pkg_data
        self._inherited_keys = inherited_keys
        self._write_translation_map = {}
        self._read_translation_map = {}
        if translated_keys:
            self._write_translation_map.update(translated_keys)
            self._read_translation_map.update(((y, x) for (x, y) in translated_keys))
        self.header = {}
        if self._default_header_data:
            self.header.update(self._default_header_data)
        self.packages = []
        self.modified = True
示例#2
0
class FifoIpcDaemon(AbstractPollTask):

    __slots__ = ("input_fifo", "output_fifo",) + \
     ("_files", "_reg_id",)

    _file_names = ("pipe_in", )
    _files_dict = slot_dict_class(_file_names, prefix="")

    def _start(self):
        self._files = self._files_dict()
        input_fd = os.open(self.input_fifo, os.O_RDONLY | os.O_NONBLOCK)
        self._files.pipe_in = os.fdopen(input_fd, 'rb')

        self._reg_id = self.scheduler.register(self._files.pipe_in.fileno(),
                                               self._registered_events,
                                               self._input_handler)

        self._registered = True

    def isAlive(self):
        return self._registered

    def cancel(self):
        if self.returncode is None:
            self.returncode = 1
            self.cancelled = True
        self._unregister()
        self.wait()

    def _wait(self):
        if self.returncode is not None:
            return self.returncode

        if self._registered:
            self.scheduler.schedule(self._reg_id)
            self._unregister()

        if self.returncode is None:
            self.returncode = os.EX_OK

        return self.returncode

    def _input_handler(self, fd, event):
        raise NotImplementedError(self)

    def _unregister(self):
        """
		Unregister from the scheduler and close open files.
		"""

        self._registered = False

        if self._reg_id is not None:
            self.scheduler.unregister(self._reg_id)
            self._reg_id = None

        if self._files is not None:
            for f in self._files.values():
                f.close()
            self._files = None
示例#3
0
    def __init__(self,
                 allowed_pkg_keys=None,
                 default_header_data=None,
                 default_pkg_data=None,
                 inherited_keys=None,
                 translated_keys=None):

        self._pkg_slot_dict = None
        if allowed_pkg_keys is not None:
            self._pkg_slot_dict = slot_dict_class(allowed_pkg_keys)

        self._default_header_data = default_header_data
        self._default_pkg_data = default_pkg_data
        self._inherited_keys = inherited_keys
        self._write_translation_map = {}
        self._read_translation_map = {}
        if translated_keys:
            self._write_translation_map.update(translated_keys)
            self._read_translation_map.update(
                ((y, x) for (x, y) in translated_keys))
        self.header = {}
        if self._default_header_data:
            self.header.update(self._default_header_data)
        self.packages = []
        self.modified = True
示例#4
0
class FifoIpcDaemon(AbstractPollTask):

    __slots__ = ("input_fifo", "output_fifo", "_files")

    _file_names = ("pipe_in", )
    _files_dict = slot_dict_class(_file_names, prefix="")

    def _start(self):
        self._files = self._files_dict()

        # File streams are in unbuffered mode since we do atomic
        # read and write of whole pickles.
        self._files.pipe_in = \
         os.open(self.input_fifo, os.O_RDONLY|os.O_NONBLOCK)

        self.scheduler.add_reader(self._files.pipe_in, self._input_handler)

        self._registered = True

    def _reopen_input(self):
        """
		Re-open the input stream, in order to suppress
		POLLHUP events (bug #339976).
		"""
        self.scheduler.remove_reader(self._files.pipe_in)
        os.close(self._files.pipe_in)
        self._files.pipe_in = \
         os.open(self.input_fifo, os.O_RDONLY|os.O_NONBLOCK)

        self.scheduler.add_reader(self._files.pipe_in, self._input_handler)

    def _cancel(self):
        if self.returncode is None:
            self.returncode = 1
        self._unregister()
        # notify exit listeners
        self._async_wait()

    def _input_handler(self):
        raise NotImplementedError(self)

    def _unregister(self):
        """
		Unregister from the scheduler and close open files.
		"""

        self._registered = False

        if self._files is not None:
            for f in self._files.values():
                self.scheduler.remove_reader(f)
                os.close(f)
            self._files = None
示例#5
0
	def __init__(self, mybintree=None, **kwargs):
		fakedbapi.__init__(self, **kwargs)
		self.bintree = mybintree
		self.move_ent = mybintree.move_ent
		self.cpvdict={}
		self.cpdict={}
		# Selectively cache metadata in order to optimize dep matching.
		self._aux_cache_keys = set(
			["BUILD_TIME", "CHOST", "DEPEND", "EAPI", "IUSE", "KEYWORDS",
			"LICENSE", "PDEPEND", "PROPERTIES", "PROVIDE",
			"RDEPEND", "repository", "RESTRICT", "SLOT", "USE"])
		self._aux_cache_slot_dict = slot_dict_class(self._aux_cache_keys)
		self._aux_cache = {}
示例#6
0
	def __init__(self, mybintree=None, **kwargs):
		fakedbapi.__init__(self, **kwargs)
		self.bintree = mybintree
		self.move_ent = mybintree.move_ent
		self.cpvdict={}
		self.cpdict={}
		# Selectively cache metadata in order to optimize dep matching.
		self._aux_cache_keys = set(
			["BUILD_TIME", "CHOST", "DEPEND", "EAPI", "IUSE", "KEYWORDS",
			"LICENSE", "PDEPEND", "PROPERTIES", "PROVIDE",
			"RDEPEND", "repository", "RESTRICT", "SLOT", "USE", "DEFINED_PHASES",
			"REQUIRED_USE"])
		self._aux_cache_slot_dict = slot_dict_class(self._aux_cache_keys)
		self._aux_cache = {}
示例#7
0
	def __init__(self, mybintree=None, **kwargs):
		# Always enable multi_instance mode for bindbapi indexing. This
		# does not affect the local PKGDIR file layout, since that is
		# controlled independently by FEATURES=binpkg-multi-instance.
		# The multi_instance mode is useful for the following reasons:
		# * binary packages with the same cpv from multiple binhosts
		#   can be considered simultaneously
		# * if binpkg-multi-instance is disabled, it's still possible
		#   to properly access a PKGDIR which has binpkg-multi-instance
		#   layout (or mixed layout)
		fakedbapi.__init__(self, exclusive_slots=False,
			multi_instance=True, **kwargs)
		self.bintree = mybintree
		self.move_ent = mybintree.move_ent
		# Selectively cache metadata in order to optimize dep matching.
		self._aux_cache_keys = set(
			["BUILD_ID", "BUILD_TIME", "CHOST", "DEFINED_PHASES",
			"DEPEND", "EAPI", "HDEPEND", "IUSE", "KEYWORDS",
			"LICENSE", "MD5", "PDEPEND", "PROPERTIES", "PROVIDE",
			"PROVIDES", "RDEPEND", "repository", "REQUIRES", "RESTRICT",
			"SIZE", "SLOT", "USE", "_mtime_"
			])
		self._aux_cache_slot_dict = slot_dict_class(self._aux_cache_keys)
		self._aux_cache = {}
示例#8
0
	def __init__(self, mybintree=None, **kwargs):
		# Always enable multi_instance mode for bindbapi indexing. This
		# does not affect the local PKGDIR file layout, since that is
		# controlled independently by FEATURES=binpkg-multi-instance.
		# The multi_instance mode is useful for the following reasons:
		# * binary packages with the same cpv from multiple binhosts
		#   can be considered simultaneously
		# * if binpkg-multi-instance is disabled, it's still possible
		#   to properly access a PKGDIR which has binpkg-multi-instance
		#   layout (or mixed layout)
		fakedbapi.__init__(self, exclusive_slots=False,
			multi_instance=True, **kwargs)
		self.bintree = mybintree
		self.move_ent = mybintree.move_ent
		# Selectively cache metadata in order to optimize dep matching.
		self._aux_cache_keys = set(
			["BUILD_ID", "BUILD_TIME", "CHOST", "DEFINED_PHASES",
			"DEPEND", "EAPI", "HDEPEND", "IUSE", "KEYWORDS",
			"LICENSE", "MD5", "PDEPEND", "PROPERTIES", "PROVIDE",
			"PROVIDES", "RDEPEND", "repository", "REQUIRES", "RESTRICT",
			"SIZE", "SLOT", "USE", "_mtime_"
			])
		self._aux_cache_slot_dict = slot_dict_class(self._aux_cache_keys)
		self._aux_cache = {}
示例#9
0
class EbuildMetadataPhase(SubProcess):
    """
    Asynchronous interface for the ebuild "depend" phase which is
    used to extract metadata from the ebuild.
    """

    __slots__ = (
        "cpv",
        "eapi_supported",
        "ebuild_hash",
        "fd_pipes",
        "metadata",
        "portdb",
        "repo_path",
        "settings",
        "write_auxdb",
    ) + (
        "_eapi",
        "_eapi_lineno",
        "_raw_metadata",
    )

    _file_names = ("ebuild", )
    _files_dict = slot_dict_class(_file_names, prefix="")

    def _start(self):
        ebuild_path = self.ebuild_hash.location

        with io.open(
                _unicode_encode(ebuild_path,
                                encoding=_encodings["fs"],
                                errors="strict"),
                mode="r",
                encoding=_encodings["repo.content"],
                errors="replace",
        ) as f:
            self._eapi, self._eapi_lineno = portage._parse_eapi_ebuild_head(f)

        parsed_eapi = self._eapi
        if parsed_eapi is None:
            parsed_eapi = "0"

        if not parsed_eapi:
            # An empty EAPI setting is invalid.
            self._eapi_invalid(None)
            self.returncode = 1
            self._async_wait()
            return

        self.eapi_supported = portage.eapi_is_supported(parsed_eapi)
        if not self.eapi_supported:
            self.metadata = {"EAPI": parsed_eapi}
            self.returncode = os.EX_OK
            self._async_wait()
            return

        settings = self.settings
        settings.setcpv(self.cpv)
        settings.configdict["pkg"]["EAPI"] = parsed_eapi

        debug = settings.get("PORTAGE_DEBUG") == "1"
        master_fd = None
        slave_fd = None
        fd_pipes = None
        if self.fd_pipes is not None:
            fd_pipes = self.fd_pipes.copy()
        else:
            fd_pipes = {}

        null_input = open("/dev/null", "rb")
        fd_pipes.setdefault(0, null_input.fileno())
        fd_pipes.setdefault(1, sys.__stdout__.fileno())
        fd_pipes.setdefault(2, sys.__stderr__.fileno())

        # flush any pending output
        stdout_filenos = (sys.__stdout__.fileno(), sys.__stderr__.fileno())
        for fd in fd_pipes.values():
            if fd in stdout_filenos:
                sys.__stdout__.flush()
                sys.__stderr__.flush()
                break

        self._files = self._files_dict()
        files = self._files

        master_fd, slave_fd = os.pipe()

        fcntl.fcntl(
            master_fd,
            fcntl.F_SETFL,
            fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK,
        )

        fd_pipes[slave_fd] = slave_fd
        settings["PORTAGE_PIPE_FD"] = str(slave_fd)

        self._raw_metadata = []
        files.ebuild = master_fd
        self.scheduler.add_reader(files.ebuild, self._output_handler)
        self._registered = True

        retval = portage.doebuild(
            ebuild_path,
            "depend",
            settings=settings,
            debug=debug,
            mydbapi=self.portdb,
            tree="porttree",
            fd_pipes=fd_pipes,
            returnpid=True,
        )
        settings.pop("PORTAGE_PIPE_FD", None)

        os.close(slave_fd)
        null_input.close()

        if isinstance(retval, int):
            # doebuild failed before spawning
            self.returncode = retval
            self._async_wait()
            return

        self.pid = retval[0]

    def _output_handler(self):
        while True:
            buf = self._read_buf(self._files.ebuild)
            if buf is None:
                break  # EAGAIN
            elif buf:
                self._raw_metadata.append(buf)
            else:  # EIO/POLLHUP
                if self.pid is None:
                    self._unregister()
                    self._async_wait()
                else:
                    self._async_waitpid()
                break

    def _unregister(self):
        if self._files is not None:
            self.scheduler.remove_reader(self._files.ebuild)
        SubProcess._unregister(self)

    def _async_waitpid_cb(self, *args, **kwargs):
        """
        Override _async_waitpid_cb to perform cleanup that is
        not necessarily idempotent.
        """
        SubProcess._async_waitpid_cb(self, *args, **kwargs)
        # self._raw_metadata is None when _start returns
        # early due to an unsupported EAPI
        if self.returncode == os.EX_OK and self._raw_metadata is not None:
            metadata_lines = _unicode_decode(
                b"".join(self._raw_metadata),
                encoding=_encodings["repo.content"],
                errors="replace",
            ).splitlines()
            metadata = {}
            metadata_valid = True
            for l in metadata_lines:
                if "=" not in l:
                    metadata_valid = False
                    break
                key, value = l.split("=", 1)
                metadata[key] = value

            if metadata_valid:
                parsed_eapi = self._eapi
                if parsed_eapi is None:
                    parsed_eapi = "0"
                self.eapi_supported = portage.eapi_is_supported(
                    metadata["EAPI"])
                if (not metadata["EAPI"] or self.eapi_supported
                    ) and metadata["EAPI"] != parsed_eapi:
                    self._eapi_invalid(metadata)
                    metadata_valid = False

            if metadata_valid:
                # Since we're supposed to be able to efficiently obtain the
                # EAPI from _parse_eapi_ebuild_head, we don't write cache
                # entries for unsupported EAPIs.
                if self.eapi_supported:

                    if metadata.get("INHERITED", False):
                        metadata[
                            "_eclasses_"] = self.portdb.repositories.get_repo_for_location(
                                self.repo_path).eclass_db.get_eclass_data(
                                    metadata["INHERITED"].split())
                    else:
                        metadata["_eclasses_"] = {}
                    metadata.pop("INHERITED", None)

                    # If called by egencache, this cache write is
                    # undesirable when metadata-transfer is disabled.
                    if self.write_auxdb is not False:
                        self.portdb._write_cache(self.cpv, self.repo_path,
                                                 metadata, self.ebuild_hash)
                else:
                    metadata = {"EAPI": metadata["EAPI"]}
                self.metadata = metadata
            else:
                self.returncode = 1

    def _eapi_invalid(self, metadata):
        repo_name = self.portdb.getRepositoryName(self.repo_path)
        if metadata is not None:
            eapi_var = metadata["EAPI"]
        else:
            eapi_var = None
        eapi_invalid(
            self,
            self.cpv,
            repo_name,
            self.settings,
            eapi_var,
            self._eapi,
            self._eapi_lineno,
        )
示例#10
0
		@return: A package with the specified USE flags
		@rtype: Package
		"""
		if use is not self.use.enabled:
			pkg = self.copy()
			pkg._metadata["USE"] = " ".join(use)
		else:
			pkg = self
		return pkg

_all_metadata_keys = set(x for x in portage.auxdbkeys \
	if not x.startswith("UNUSED_"))
_all_metadata_keys.update(Package.metadata_keys)
_all_metadata_keys = frozenset(_all_metadata_keys)

_PackageMetadataWrapperBase = slot_dict_class(_all_metadata_keys)

class _PackageMetadataWrapper(_PackageMetadataWrapperBase):
	"""
	Detect metadata updates and synchronize Package attributes.
	"""

	__slots__ = ("_pkg",)
	_wrapped_keys = frozenset(
		["COUNTER", "INHERITED", "USE", "_mtime_"])
	_use_conditional_keys = frozenset(
		['LICENSE', 'PROPERTIES', 'RESTRICT',])

	def __init__(self, pkg, metadata):
		_PackageMetadataWrapperBase.__init__(self)
		self._pkg = pkg
示例#11
0
class SpawnProcess(SubProcess):

	"""
	Constructor keyword args are passed into portage.process.spawn().
	The required "args" keyword argument will be passed as the first
	spawn() argument.
	"""

	_spawn_kwarg_names = ("env", "opt_name", "fd_pipes",
		"uid", "gid", "groups", "umask", "logfile",
		"path_lookup", "pre_exec")

	__slots__ = ("args",) + \
		_spawn_kwarg_names + ("_log_file_real", "_selinux_type",)

	_file_names = ("log", "process", "stdout")
	_files_dict = slot_dict_class(_file_names, prefix="")

	def _start(self):

		if self.fd_pipes is None:
			self.fd_pipes = {}
		fd_pipes = self.fd_pipes

		self._files = self._files_dict()
		files = self._files

		master_fd, slave_fd = self._pipe(fd_pipes)
		fcntl.fcntl(master_fd, fcntl.F_SETFL,
			fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
		files.process = master_fd

		logfile = None
		if self._can_log(slave_fd):
			logfile = self.logfile

		null_input = None
		if not self.background or 0 in fd_pipes:
			# Subclasses such as AbstractEbuildProcess may have already passed
			# in a null file descriptor in fd_pipes, so use that when given.
			pass
		else:
			# TODO: Use job control functions like tcsetpgrp() to control
			# access to stdin. Until then, use /dev/null so that any
			# attempts to read from stdin will immediately return EOF
			# instead of blocking indefinitely.
			null_input = os.open('/dev/null', os.O_RDWR)
			fd_pipes[0] = null_input

		fd_pipes.setdefault(0, sys.stdin.fileno())
		fd_pipes.setdefault(1, sys.stdout.fileno())
		fd_pipes.setdefault(2, sys.stderr.fileno())

		# flush any pending output
		for fd in fd_pipes.values():
			if fd == sys.stdout.fileno():
				sys.stdout.flush()
			if fd == sys.stderr.fileno():
				sys.stderr.flush()

		if logfile is not None:

			fd_pipes_orig = fd_pipes.copy()
			fd_pipes[1] = slave_fd
			fd_pipes[2] = slave_fd

			files.log = open(_unicode_encode(logfile,
				encoding=_encodings['fs'], errors='strict'), mode='ab')
			if logfile.endswith('.gz'):
				self._log_file_real = files.log
				files.log = gzip.GzipFile(filename='', mode='ab',
					fileobj=files.log)

			portage.util.apply_secpass_permissions(logfile,
				uid=portage.portage_uid, gid=portage.portage_gid,
				mode=0o660)

			if not self.background:
				files.stdout = os.dup(fd_pipes_orig[1])

			output_handler = self._output_handler

		else:

			# Create a dummy pipe so the scheduler can monitor
			# the process from inside a poll() loop.
			fd_pipes[self._dummy_pipe_fd] = slave_fd
			if self.background:
				fd_pipes[1] = slave_fd
				fd_pipes[2] = slave_fd
			output_handler = self._dummy_handler

		kwargs = {}
		for k in self._spawn_kwarg_names:
			v = getattr(self, k)
			if v is not None:
				kwargs[k] = v

		kwargs["fd_pipes"] = fd_pipes
		kwargs["returnpid"] = True
		kwargs.pop("logfile", None)

		self._reg_id = self.scheduler.register(files.process,
			self._registered_events, output_handler)
		self._registered = True

		retval = self._spawn(self.args, **kwargs)

		os.close(slave_fd)
		if null_input is not None:
			os.close(null_input)

		if isinstance(retval, int):
			# spawn failed
			self._unregister()
			self._set_returncode((self.pid, retval))
			self.wait()
			return

		self.pid = retval[0]
		portage.process.spawned_pids.remove(self.pid)

	def _can_log(self, slave_fd):
		return True

	def _pipe(self, fd_pipes):
		"""
		@type fd_pipes: dict
		@param fd_pipes: pipes from which to copy terminal size if desired.
		"""
		return os.pipe()

	def _spawn(self, args, **kwargs):
		spawn_func = portage.process.spawn

		if self._selinux_type is not None:
			spawn_func = portage.selinux.spawn_wrapper(spawn_func,
				self._selinux_type)
			# bash is an allowed entrypoint, while most binaries are not
			if args[0] != BASH_BINARY:
				args = [BASH_BINARY, "-c", "exec \"$@\"", args[0]] + args

		return spawn_func(args, **kwargs)

	def _output_handler(self, fd, event):

		files = self._files
		while True:
			buf = self._read_buf(fd, event)

			if buf is None:
				# not a POLLIN event, EAGAIN, etc...
				break

			if not buf:
				# EOF
				self._unregister()
				self.wait()
				break

			else:
				if not self.background:
					write_successful = False
					failures = 0
					while True:
						try:
							if not write_successful:
								os.write(files.stdout, buf)
								write_successful = True
							break
						except OSError as e:
							if e.errno != errno.EAGAIN:
								raise
							del e
							failures += 1
							if failures > 50:
								# Avoid a potentially infinite loop. In
								# most cases, the failure count is zero
								# and it's unlikely to exceed 1.
								raise

							# This means that a subprocess has put an inherited
							# stdio file descriptor (typically stdin) into
							# O_NONBLOCK mode. This is not acceptable (see bug
							# #264435), so revert it. We need to use a loop
							# here since there's a race condition due to
							# parallel processes being able to change the
							# flags on the inherited file descriptor.
							# TODO: When possible, avoid having child processes
							# inherit stdio file descriptors from portage
							# (maybe it can't be avoided with
							# PROPERTIES=interactive).
							fcntl.fcntl(files.stdout, fcntl.F_SETFL,
								fcntl.fcntl(files.stdout,
								fcntl.F_GETFL) ^ os.O_NONBLOCK)

				files.log.write(buf)
				files.log.flush()

		self._unregister_if_appropriate(event)

		return True

	def _dummy_handler(self, fd, event):
		"""
		This method is mainly interested in detecting EOF, since
		the only purpose of the pipe is to allow the scheduler to
		monitor the process from inside a poll() loop.
		"""

		while True:
			buf = self._read_buf(fd, event)

			if buf is None:
				# not a POLLIN event, EAGAIN, etc...
				break

			if not buf:
				# EOF
				self._unregister()
				self.wait()
				break

		self._unregister_if_appropriate(event)

		return True

	def _unregister(self):
		super(SpawnProcess, self)._unregister()
		if self._log_file_real is not None:
			# Avoid "ResourceWarning: unclosed file" since python 3.2.
			self._log_file_real.close()
			self._log_file_real = None
示例#12
0
		@return: A package with the specified USE flags
		@rtype: Package
		"""
		if use is not self.use.enabled:
			pkg = self.copy()
			pkg._metadata["USE"] = " ".join(use)
		else:
			pkg = self
		return pkg

_all_metadata_keys = set(x for x in portage.auxdbkeys \
	if not x.startswith("UNUSED_"))
_all_metadata_keys.update(Package.metadata_keys)
_all_metadata_keys = frozenset(_all_metadata_keys)

_PackageMetadataWrapperBase = slot_dict_class(_all_metadata_keys)

class _PackageMetadataWrapper(_PackageMetadataWrapperBase):
	"""
	Detect metadata updates and synchronize Package attributes.
	"""

	__slots__ = ("_pkg",)
	_wrapped_keys = frozenset(
		["COUNTER", "INHERITED", "USE", "_mtime_"])
	_use_conditional_keys = frozenset(
		['LICENSE', 'PROPERTIES', 'PROVIDE', 'RESTRICT',])

	def __init__(self, pkg, metadata):
		_PackageMetadataWrapperBase.__init__(self)
		self._pkg = pkg
示例#13
0
class EbuildMetadataPhase(SubProcess):
    """
	Asynchronous interface for the ebuild "depend" phase which is
	used to extract metadata from the ebuild.
	"""

    __slots__ = ("cpv", "eapi_supported", "ebuild_hash", "fd_pipes",
     "metadata", "portdb", "repo_path", "settings", "write_auxdb") + \
     ("_eapi", "_eapi_lineno", "_raw_metadata",)

    _file_names = ("ebuild", )
    _files_dict = slot_dict_class(_file_names, prefix="")

    def _start(self):
        ebuild_path = self.ebuild_hash.location

        with io.open(_unicode_encode(ebuild_path,
                                     encoding=_encodings['fs'],
                                     errors='strict'),
                     mode='r',
                     encoding=_encodings['repo.content'],
                     errors='replace') as f:
            self._eapi, self._eapi_lineno = portage._parse_eapi_ebuild_head(f)

        parsed_eapi = self._eapi
        if parsed_eapi is None:
            parsed_eapi = "0"

        if not parsed_eapi:
            # An empty EAPI setting is invalid.
            self._eapi_invalid(None)
            self.returncode = 1
            self._async_wait()
            return

        self.eapi_supported = portage.eapi_is_supported(parsed_eapi)
        if not self.eapi_supported:
            self.metadata = {"EAPI": parsed_eapi}
            self.returncode = os.EX_OK
            self._async_wait()
            return

        settings = self.settings
        settings.setcpv(self.cpv)
        settings.configdict['pkg']['EAPI'] = parsed_eapi

        debug = settings.get("PORTAGE_DEBUG") == "1"
        master_fd = None
        slave_fd = None
        fd_pipes = None
        if self.fd_pipes is not None:
            fd_pipes = self.fd_pipes.copy()
        else:
            fd_pipes = {}

        null_input = open('/dev/null', 'rb')
        fd_pipes.setdefault(0, null_input.fileno())
        fd_pipes.setdefault(1, sys.__stdout__.fileno())
        fd_pipes.setdefault(2, sys.__stderr__.fileno())

        # flush any pending output
        stdout_filenos = (sys.__stdout__.fileno(), sys.__stderr__.fileno())
        for fd in fd_pipes.values():
            if fd in stdout_filenos:
                sys.__stdout__.flush()
                sys.__stderr__.flush()
                break

        self._files = self._files_dict()
        files = self._files

        master_fd, slave_fd = os.pipe()

        fcntl.fcntl(master_fd, fcntl.F_SETFL,
                    fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)

        # FD_CLOEXEC is enabled by default in Python >=3.4.
        if sys.hexversion < 0x3040000:
            try:
                fcntl.FD_CLOEXEC
            except AttributeError:
                pass
            else:
                fcntl.fcntl(
                    master_fd, fcntl.F_SETFD,
                    fcntl.fcntl(master_fd, fcntl.F_GETFD) | fcntl.FD_CLOEXEC)

        fd_pipes[slave_fd] = slave_fd
        settings["PORTAGE_PIPE_FD"] = str(slave_fd)

        self._raw_metadata = []
        files.ebuild = master_fd
        self.scheduler.add_reader(files.ebuild, self._output_handler)
        self._registered = True

        retval = portage.doebuild(ebuild_path,
                                  "depend",
                                  settings=settings,
                                  debug=debug,
                                  mydbapi=self.portdb,
                                  tree="porttree",
                                  fd_pipes=fd_pipes,
                                  returnpid=True)
        settings.pop("PORTAGE_PIPE_FD", None)

        os.close(slave_fd)
        null_input.close()

        if isinstance(retval, int):
            # doebuild failed before spawning
            self.returncode = retval
            self._async_wait()
            return

        self.pid = retval[0]

    def _output_handler(self):
        while True:
            buf = self._read_buf(self._files.ebuild)
            if buf is None:
                break  # EAGAIN
            elif buf:
                self._raw_metadata.append(buf)
            else:  # EIO/POLLHUP
                if self.pid is None:
                    self._unregister()
                    self._async_wait()
                else:
                    self._async_waitpid()
                break

    def _unregister(self):
        if self._files is not None:
            self.scheduler.remove_reader(self._files.ebuild)
        SubProcess._unregister(self)

    def _async_waitpid_cb(self, *args, **kwargs):
        """
		Override _async_waitpid_cb to perform cleanup that is
		not necessarily idempotent.
		"""
        SubProcess._async_waitpid_cb(self, *args, **kwargs)
        # self._raw_metadata is None when _start returns
        # early due to an unsupported EAPI
        if self.returncode == os.EX_OK and \
         self._raw_metadata is not None:
            metadata_lines = _unicode_decode(
                b''.join(self._raw_metadata),
                encoding=_encodings['repo.content'],
                errors='replace').splitlines()
            metadata_valid = True
            if len(portage.auxdbkeys) != len(metadata_lines):
                # Don't trust bash's returncode if the
                # number of lines is incorrect.
                metadata_valid = False
            else:
                metadata = dict(zip(portage.auxdbkeys, metadata_lines))
                parsed_eapi = self._eapi
                if parsed_eapi is None:
                    parsed_eapi = "0"
                self.eapi_supported = \
                 portage.eapi_is_supported(metadata["EAPI"])
                if (not metadata["EAPI"] or self.eapi_supported) and \
                 metadata["EAPI"] != parsed_eapi:
                    self._eapi_invalid(metadata)
                    metadata_valid = False

            if metadata_valid:
                # Since we're supposed to be able to efficiently obtain the
                # EAPI from _parse_eapi_ebuild_head, we don't write cache
                # entries for unsupported EAPIs.
                if self.eapi_supported:

                    if metadata.get("INHERITED", False):
                        metadata["_eclasses_"] = \
                         self.portdb.repositories.get_repo_for_location(
                         self.repo_path).eclass_db.get_eclass_data(
                         metadata["INHERITED"].split())
                    else:
                        metadata["_eclasses_"] = {}
                    metadata.pop("INHERITED", None)

                    if eapi_has_automatic_unpack_dependencies(
                            metadata["EAPI"]):
                        repo = self.portdb.repositories.get_name_for_location(
                            self.repo_path)
                        unpackers = self.settings.unpack_dependencies.get(
                            repo, {}).get(metadata["EAPI"], {})
                        unpack_dependencies = extract_unpack_dependencies(
                            metadata["SRC_URI"], unpackers)
                        if unpack_dependencies:
                            metadata["DEPEND"] += (" "
                                                   if metadata["DEPEND"] else
                                                   "") + unpack_dependencies

                    # If called by egencache, this cache write is
                    # undesirable when metadata-transfer is disabled.
                    if self.write_auxdb is not False:
                        self.portdb._write_cache(self.cpv, self.repo_path,
                                                 metadata, self.ebuild_hash)
                else:
                    metadata = {"EAPI": metadata["EAPI"]}
                self.metadata = metadata
            else:
                self.returncode = 1

    def _eapi_invalid(self, metadata):
        repo_name = self.portdb.getRepositoryName(self.repo_path)
        if metadata is not None:
            eapi_var = metadata["EAPI"]
        else:
            eapi_var = None
        eapi_invalid(self, self.cpv, repo_name, self.settings, eapi_var,
                     self._eapi, self._eapi_lineno)
示例#14
0
class EbuildMetadataPhase(SubProcess):

	"""
	Asynchronous interface for the ebuild "depend" phase which is
	used to extract metadata from the ebuild.
	"""

	__slots__ = ("cpv", "eapi_supported", "ebuild_hash", "fd_pipes",
		"metadata", "portdb", "repo_path", "settings", "write_auxdb") + \
		("_eapi", "_eapi_lineno", "_raw_metadata",)

	_file_names = ("ebuild",)
	_files_dict = slot_dict_class(_file_names, prefix="")
	_metadata_fd = 9

	def _start(self):
		ebuild_path = self.ebuild_hash.location

		with io.open(_unicode_encode(ebuild_path,
			encoding=_encodings['fs'], errors='strict'),
			mode='r', encoding=_encodings['repo.content'],
			errors='replace') as f:
			self._eapi, self._eapi_lineno = portage._parse_eapi_ebuild_head(f)

		parsed_eapi = self._eapi
		if parsed_eapi is None:
			parsed_eapi = "0"

		if not parsed_eapi:
			# An empty EAPI setting is invalid.
			self._eapi_invalid(None)
			self._set_returncode((self.pid, 1 << 8))
			self.wait()
			return

		self.eapi_supported = portage.eapi_is_supported(parsed_eapi)
		if not self.eapi_supported:
			self.metadata = {"EAPI": parsed_eapi}
			self._set_returncode((self.pid, os.EX_OK << 8))
			self.wait()
			return

		settings = self.settings
		settings.setcpv(self.cpv)
		settings.configdict['pkg']['EAPI'] = parsed_eapi

		debug = settings.get("PORTAGE_DEBUG") == "1"
		master_fd = None
		slave_fd = None
		fd_pipes = None
		if self.fd_pipes is not None:
			fd_pipes = self.fd_pipes.copy()
		else:
			fd_pipes = {}

		null_input = open('/dev/null', 'rb')
		fd_pipes.setdefault(0, null_input.fileno())
		fd_pipes.setdefault(1, sys.__stdout__.fileno())
		fd_pipes.setdefault(2, sys.__stderr__.fileno())

		# flush any pending output
		stdout_filenos = (sys.__stdout__.fileno(), sys.__stderr__.fileno())
		for fd in fd_pipes.values():
			if fd in stdout_filenos:
				sys.__stdout__.flush()
				sys.__stderr__.flush()
				break

		self._files = self._files_dict()
		files = self._files

		master_fd, slave_fd = os.pipe()
		fcntl.fcntl(master_fd, fcntl.F_SETFL,
			fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)

		fd_pipes[self._metadata_fd] = slave_fd

		self._raw_metadata = []
		files.ebuild = master_fd
		self._reg_id = self.scheduler.register(files.ebuild,
			self._registered_events, self._output_handler)
		self._registered = True

		retval = portage.doebuild(ebuild_path, "depend",
			settings=settings, debug=debug,
			mydbapi=self.portdb, tree="porttree",
			fd_pipes=fd_pipes, returnpid=True)

		os.close(slave_fd)
		null_input.close()

		if isinstance(retval, int):
			# doebuild failed before spawning
			self._unregister()
			self._set_returncode((self.pid, retval << 8))
			self.wait()
			return

		self.pid = retval[0]
		portage.process.spawned_pids.remove(self.pid)

	def _output_handler(self, fd, event):

		if event & self.scheduler.IO_IN:
			while True:
				try:
					self._raw_metadata.append(
						os.read(self._files.ebuild, self._bufsize))
				except OSError as e:
					if e.errno not in (errno.EAGAIN,):
						raise
					break
				else:
					if not self._raw_metadata[-1]:
						self._unregister()
						self.wait()
						break

		self._unregister_if_appropriate(event)

		return True

	def _set_returncode(self, wait_retval):
		SubProcess._set_returncode(self, wait_retval)
		# self._raw_metadata is None when _start returns
		# early due to an unsupported EAPI
		if self.returncode == os.EX_OK and \
			self._raw_metadata is not None:
			metadata_lines = _unicode_decode(b''.join(self._raw_metadata),
				encoding=_encodings['repo.content'],
				errors='replace').splitlines()
			metadata_valid = True
			if len(portage.auxdbkeys) != len(metadata_lines):
				# Don't trust bash's returncode if the
				# number of lines is incorrect.
				metadata_valid = False
			else:
				metadata = dict(zip(portage.auxdbkeys, metadata_lines))
				parsed_eapi = self._eapi
				if parsed_eapi is None:
					parsed_eapi = "0"
				self.eapi_supported = \
					portage.eapi_is_supported(metadata["EAPI"])
				if (not metadata["EAPI"] or self.eapi_supported) and \
					metadata["EAPI"] != parsed_eapi:
					self._eapi_invalid(metadata)
					metadata_valid = False

			if metadata_valid:
				# Since we're supposed to be able to efficiently obtain the
				# EAPI from _parse_eapi_ebuild_head, we don't write cache
				# entries for unsupported EAPIs.
				if self.eapi_supported:

					if metadata.get("INHERITED", False):
						metadata["_eclasses_"] = \
							self.portdb.repositories.get_repo_for_location(
							self.repo_path).eclass_db.get_eclass_data(
							metadata["INHERITED"].split())
					else:
						metadata["_eclasses_"] = {}
					metadata.pop("INHERITED", None)

					# If called by egencache, this cache write is
					# undesirable when metadata-transfer is disabled.
					if self.write_auxdb is not False:
						self.portdb._write_cache(self.cpv,
							self.repo_path, metadata, self.ebuild_hash)
				else:
					metadata = {"EAPI": metadata["EAPI"]}
				self.metadata = metadata
			else:
				self.returncode = 1

	def _eapi_invalid(self, metadata):
		repo_name = self.portdb.getRepositoryName(self.repo_path)
		if metadata is not None:
			eapi_var = metadata["EAPI"]
		else:
			eapi_var = None
		eapi_invalid(self, self.cpv, repo_name, self.settings,
			eapi_var, self._eapi, self._eapi_lineno)
示例#15
0
class FifoIpcDaemon(AbstractPollTask):

    __slots__ = ("input_fifo", "output_fifo", "_files")

    _file_names = ("pipe_in", )
    _files_dict = slot_dict_class(_file_names, prefix="")

    def _start(self):
        self._files = self._files_dict()

        # File streams are in unbuffered mode since we do atomic
        # read and write of whole pickles.
        self._files.pipe_in = \
         os.open(self.input_fifo, os.O_RDONLY|os.O_NONBLOCK)

        # FD_CLOEXEC is enabled by default in Python >=3.4.
        if sys.hexversion < 0x3040000 and fcntl is not None:
            try:
                fcntl.FD_CLOEXEC
            except AttributeError:
                pass
            else:
                fcntl.fcntl(
                    self._files.pipe_in, fcntl.F_SETFD,
                    fcntl.fcntl(self._files.pipe_in, fcntl.F_GETFD)
                    | fcntl.FD_CLOEXEC)

        self.scheduler.add_reader(self._files.pipe_in, self._input_handler)

        self._registered = True

    def _reopen_input(self):
        """
		Re-open the input stream, in order to suppress
		POLLHUP events (bug #339976).
		"""
        self.scheduler.remove_reader(self._files.pipe_in)
        os.close(self._files.pipe_in)
        self._files.pipe_in = \
         os.open(self.input_fifo, os.O_RDONLY|os.O_NONBLOCK)

        # FD_CLOEXEC is enabled by default in Python >=3.4.
        if sys.hexversion < 0x3040000 and fcntl is not None:
            try:
                fcntl.FD_CLOEXEC
            except AttributeError:
                pass
            else:
                fcntl.fcntl(
                    self._files.pipe_in, fcntl.F_SETFD,
                    fcntl.fcntl(self._files.pipe_in, fcntl.F_GETFD)
                    | fcntl.FD_CLOEXEC)

        self.scheduler.add_reader(self._files.pipe_in, self._input_handler)

    def isAlive(self):
        return self._registered

    def _cancel(self):
        if self.returncode is None:
            self.returncode = 1
        self._unregister()
        # notify exit listeners
        self._async_wait()

    def _input_handler(self):
        raise NotImplementedError(self)

    def _unregister(self):
        """
		Unregister from the scheduler and close open files.
		"""

        self._registered = False

        if self._files is not None:
            for f in self._files.values():
                self.scheduler.remove_reader(f)
                os.close(f)
            self._files = None
示例#16
0
class SpawnProcess(SubProcess):
    """
	Constructor keyword args are passed into portage.process.spawn().
	The required "args" keyword argument will be passed as the first
	spawn() argument.
	"""

    _spawn_kwarg_names = ("env", "opt_name", "fd_pipes", "uid", "gid",
                          "groups", "umask", "logfile", "path_lookup",
                          "pre_exec")

    __slots__ = ("args",) + \
     _spawn_kwarg_names

    _file_names = ("log", "process", "stdout")
    _files_dict = slot_dict_class(_file_names, prefix="")

    def _start(self):

        if self.cancelled:
            return

        if self.fd_pipes is None:
            self.fd_pipes = {}
        fd_pipes = self.fd_pipes
        fd_pipes.setdefault(0, sys.stdin.fileno())
        fd_pipes.setdefault(1, sys.stdout.fileno())
        fd_pipes.setdefault(2, sys.stderr.fileno())

        # flush any pending output
        for fd in fd_pipes.values():
            if fd == sys.stdout.fileno():
                sys.stdout.flush()
            if fd == sys.stderr.fileno():
                sys.stderr.flush()

        self._files = self._files_dict()
        files = self._files

        master_fd, slave_fd = self._pipe(fd_pipes)
        fcntl.fcntl(master_fd, fcntl.F_SETFL,
                    fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)

        logfile = None
        if self._can_log(slave_fd):
            logfile = self.logfile

        null_input = None
        fd_pipes_orig = fd_pipes.copy()
        if self.background:
            # TODO: Use job control functions like tcsetpgrp() to control
            # access to stdin. Until then, use /dev/null so that any
            # attempts to read from stdin will immediately return EOF
            # instead of blocking indefinitely.
            null_input = open('/dev/null', 'rb')
            fd_pipes[0] = null_input.fileno()
        else:
            fd_pipes[0] = fd_pipes_orig[0]

        files.process = os.fdopen(master_fd, 'rb')
        if logfile is not None:

            fd_pipes[1] = slave_fd
            fd_pipes[2] = slave_fd

            files.log = open(logfile, mode='ab')
            portage.util.apply_secpass_permissions(logfile,
                                                   uid=portage.portage_uid,
                                                   gid=portage.portage_gid,
                                                   mode=0o660)

            if not self.background:
                files.stdout = os.fdopen(os.dup(fd_pipes_orig[1]), 'wb')

            output_handler = self._output_handler

        else:

            # Create a dummy pipe so the scheduler can monitor
            # the process from inside a poll() loop.
            fd_pipes[self._dummy_pipe_fd] = slave_fd
            if self.background:
                fd_pipes[1] = slave_fd
                fd_pipes[2] = slave_fd
            output_handler = self._dummy_handler

        kwargs = {}
        for k in self._spawn_kwarg_names:
            v = getattr(self, k)
            if v is not None:
                kwargs[k] = v

        kwargs["fd_pipes"] = fd_pipes
        kwargs["returnpid"] = True
        kwargs.pop("logfile", None)

        self._reg_id = self.scheduler.register(files.process.fileno(),
                                               self._registered_events,
                                               output_handler)
        self._registered = True

        retval = self._spawn(self.args, **kwargs)

        os.close(slave_fd)
        if null_input is not None:
            null_input.close()

        if isinstance(retval, int):
            # spawn failed
            self._unregister()
            self.returncode = retval
            self.wait()
            return

        self.pid = retval[0]
        portage.process.spawned_pids.remove(self.pid)

    def _can_log(self, slave_fd):
        return True

    def _pipe(self, fd_pipes):
        """
		@type fd_pipes: dict
		@param fd_pipes: pipes from which to copy terminal size if desired.
		"""
        return os.pipe()

    def _spawn(self, args, **kwargs):
        return portage.process.spawn(args, **kwargs)

    def _output_handler(self, fd, event):

        if event & PollConstants.POLLIN:

            files = self._files
            buf = array.array('B')
            try:
                buf.fromfile(files.process, self._bufsize)
            except EOFError:
                pass

            if buf:
                if not self.background:
                    write_successful = False
                    failures = 0
                    while True:
                        try:
                            if not write_successful:
                                buf.tofile(files.stdout)
                                write_successful = True
                            files.stdout.flush()
                            break
                        except IOError as e:
                            if e.errno != errno.EAGAIN:
                                raise
                            del e
                            failures += 1
                            if failures > 50:
                                # Avoid a potentially infinite loop. In
                                # most cases, the failure count is zero
                                # and it's unlikely to exceed 1.
                                raise

                            # This means that a subprocess has put an inherited
                            # stdio file descriptor (typically stdin) into
                            # O_NONBLOCK mode. This is not acceptable (see bug
                            # #264435), so revert it. We need to use a loop
                            # here since there's a race condition due to
                            # parallel processes being able to change the
                            # flags on the inherited file descriptor.
                            # TODO: When possible, avoid having child processes
                            # inherit stdio file descriptors from portage
                            # (maybe it can't be avoided with
                            # PROPERTIES=interactive).
                            fcntl.fcntl(
                                files.stdout.fileno(), fcntl.F_SETFL,
                                fcntl.fcntl(files.stdout.fileno(),
                                            fcntl.F_GETFL) ^ os.O_NONBLOCK)

                buf.tofile(files.log)
                files.log.flush()
            else:
                self._unregister()
                self.wait()

        self._unregister_if_appropriate(event)
        return self._registered

    def _dummy_handler(self, fd, event):
        """
		This method is mainly interested in detecting EOF, since
		the only purpose of the pipe is to allow the scheduler to
		monitor the process from inside a poll() loop.
		"""

        if event & PollConstants.POLLIN:

            buf = array.array('B')
            try:
                buf.fromfile(self._files.process, self._bufsize)
            except EOFError:
                pass

            if buf:
                pass
            else:
                self._unregister()
                self.wait()

        self._unregister_if_appropriate(event)
        return self._registered
示例#17
0
class EbuildMetadataPhase(SubProcess):
    """
	Asynchronous interface for the ebuild "depend" phase which is
	used to extract metadata from the ebuild.
	"""

    __slots__ = ("cpv", "ebuild_path", "fd_pipes", "metadata_callback",
     "ebuild_mtime", "metadata", "portdb", "repo_path", "settings") + \
     ("_raw_metadata",)

    _file_names = ("ebuild", )
    _files_dict = slot_dict_class(_file_names, prefix="")
    _metadata_fd = 9

    def _start(self):
        settings = self.settings
        settings.setcpv(self.cpv)
        ebuild_path = self.ebuild_path

        eapi = None
        if eapi is None and \
         'parse-eapi-ebuild-head' in settings.features:
            eapi = portage._parse_eapi_ebuild_head(
                codecs.open(_unicode_encode(ebuild_path,
                                            encoding=_encodings['fs'],
                                            errors='strict'),
                            mode='r',
                            encoding=_encodings['repo.content'],
                            errors='replace'))

        if eapi is not None:
            if not portage.eapi_is_supported(eapi):
                self.metadata_callback(self.cpv, self.ebuild_path,
                                       self.repo_path, {'EAPI': eapi},
                                       self.ebuild_mtime)
                self._set_returncode((self.pid, os.EX_OK))
                self.wait()
                return

            settings.configdict['pkg']['EAPI'] = eapi

        debug = settings.get("PORTAGE_DEBUG") == "1"
        master_fd = None
        slave_fd = None
        fd_pipes = None
        if self.fd_pipes is not None:
            fd_pipes = self.fd_pipes.copy()
        else:
            fd_pipes = {}

        fd_pipes.setdefault(0, sys.stdin.fileno())
        fd_pipes.setdefault(1, sys.stdout.fileno())
        fd_pipes.setdefault(2, sys.stderr.fileno())

        # flush any pending output
        for fd in fd_pipes.values():
            if fd == sys.stdout.fileno():
                sys.stdout.flush()
            if fd == sys.stderr.fileno():
                sys.stderr.flush()

        fd_pipes_orig = fd_pipes.copy()
        self._files = self._files_dict()
        files = self._files

        master_fd, slave_fd = os.pipe()
        fcntl.fcntl(master_fd, fcntl.F_SETFL,
                    fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)

        fd_pipes[self._metadata_fd] = slave_fd

        self._raw_metadata = []
        files.ebuild = os.fdopen(master_fd, 'rb')
        self._reg_id = self.scheduler.register(files.ebuild.fileno(),
                                               self._registered_events,
                                               self._output_handler)
        self._registered = True

        retval = portage.doebuild(ebuild_path,
                                  "depend",
                                  settings["ROOT"],
                                  settings,
                                  debug,
                                  mydbapi=self.portdb,
                                  tree="porttree",
                                  fd_pipes=fd_pipes,
                                  returnpid=True)

        os.close(slave_fd)

        if isinstance(retval, int):
            # doebuild failed before spawning
            self._unregister()
            self._set_returncode((self.pid, retval))
            self.wait()
            return

        self.pid = retval[0]
        portage.process.spawned_pids.remove(self.pid)

    def _output_handler(self, fd, event):

        if event & PollConstants.POLLIN:
            self._raw_metadata.append(self._files.ebuild.read())
            if not self._raw_metadata[-1]:
                self._unregister()
                self.wait()

        self._unregister_if_appropriate(event)

    def _set_returncode(self, wait_retval):
        SubProcess._set_returncode(self, wait_retval)
        if self.returncode == os.EX_OK:
            metadata_lines = ''.join(
                _unicode_decode(chunk,
                                encoding=_encodings['repo.content'],
                                errors='replace')
                for chunk in self._raw_metadata).splitlines()
            if len(portage.auxdbkeys) != len(metadata_lines):
                # Don't trust bash's returncode if the
                # number of lines is incorrect.
                self.returncode = 1
            else:
                metadata = zip(portage.auxdbkeys, metadata_lines)
                self.metadata = self.metadata_callback(self.cpv,
                                                       self.ebuild_path,
                                                       self.repo_path,
                                                       metadata,
                                                       self.ebuild_mtime)
示例#18
0
class EbuildMetadataPhase(SubProcess):
    """
	Asynchronous interface for the ebuild "depend" phase which is
	used to extract metadata from the ebuild.
	"""

    __slots__ = ("cpv", "eapi", "ebuild_hash", "fd_pipes", "metadata_callback",
     "metadata", "portdb", "repo_path", "settings") + \
     ("_raw_metadata",)

    _file_names = ("ebuild", )
    _files_dict = slot_dict_class(_file_names, prefix="")
    _metadata_fd = 9

    def _start(self):
        settings = self.settings
        settings.setcpv(self.cpv)
        ebuild_path = self.ebuild_hash.location

        # the caller can pass in eapi in order to avoid
        # redundant _parse_eapi_ebuild_head calls
        eapi = self.eapi
        if eapi is None and \
         'parse-eapi-ebuild-head' in settings.features:
            with io.open(_unicode_encode(ebuild_path,
                                         encoding=_encodings['fs'],
                                         errors='strict'),
                         mode='r',
                         encoding=_encodings['repo.content'],
                         errors='replace') as f:
                eapi = portage._parse_eapi_ebuild_head(f)

        if eapi is not None:
            if not portage.eapi_is_supported(eapi):
                self.metadata = self.metadata_callback(self.cpv,
                                                       self.repo_path,
                                                       {'EAPI': eapi},
                                                       self.ebuild_hash)
                self._set_returncode((self.pid, os.EX_OK << 8))
                self.wait()
                return

            settings.configdict['pkg']['EAPI'] = eapi

        debug = settings.get("PORTAGE_DEBUG") == "1"
        master_fd = None
        slave_fd = None
        fd_pipes = None
        if self.fd_pipes is not None:
            fd_pipes = self.fd_pipes.copy()
        else:
            fd_pipes = {}

        null_input = open('/dev/null', 'rb')
        fd_pipes.setdefault(0, null_input.fileno())
        fd_pipes.setdefault(1, sys.stdout.fileno())
        fd_pipes.setdefault(2, sys.stderr.fileno())

        # flush any pending output
        for fd in fd_pipes.values():
            if fd == sys.stdout.fileno():
                sys.stdout.flush()
            if fd == sys.stderr.fileno():
                sys.stderr.flush()

        self._files = self._files_dict()
        files = self._files

        master_fd, slave_fd = os.pipe()
        fcntl.fcntl(master_fd, fcntl.F_SETFL,
                    fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)

        fd_pipes[self._metadata_fd] = slave_fd

        self._raw_metadata = []
        files.ebuild = master_fd
        self._reg_id = self.scheduler.register(files.ebuild,
                                               self._registered_events,
                                               self._output_handler)
        self._registered = True

        retval = portage.doebuild(ebuild_path,
                                  "depend",
                                  settings=settings,
                                  debug=debug,
                                  mydbapi=self.portdb,
                                  tree="porttree",
                                  fd_pipes=fd_pipes,
                                  returnpid=True)

        os.close(slave_fd)
        null_input.close()

        if isinstance(retval, int):
            # doebuild failed before spawning
            self._unregister()
            self._set_returncode((self.pid, retval << 8))
            self.wait()
            return

        self.pid = retval[0]
        portage.process.spawned_pids.remove(self.pid)

    def _output_handler(self, fd, event):

        if event & self.scheduler.IO_IN:
            while True:
                try:
                    self._raw_metadata.append(
                        os.read(self._files.ebuild, self._bufsize))
                except OSError as e:
                    if e.errno not in (errno.EAGAIN, ):
                        raise
                    break
                else:
                    if not self._raw_metadata[-1]:
                        self._unregister()
                        self.wait()
                        break

        self._unregister_if_appropriate(event)

        return True

    def _set_returncode(self, wait_retval):
        SubProcess._set_returncode(self, wait_retval)
        # self._raw_metadata is None when _start returns
        # early due to an unsupported EAPI detected with
        # FEATURES=parse-eapi-ebuild-head
        if self.returncode == os.EX_OK and \
         self._raw_metadata is not None:
            metadata_lines = _unicode_decode(
                b''.join(self._raw_metadata),
                encoding=_encodings['repo.content'],
                errors='replace').splitlines()
            if len(portage.auxdbkeys) != len(metadata_lines):
                # Don't trust bash's returncode if the
                # number of lines is incorrect.
                self.returncode = 1
            else:
                metadata = zip(portage.auxdbkeys, metadata_lines)
                self.metadata = self.metadata_callback(self.cpv,
                                                       self.repo_path,
                                                       metadata,
                                                       self.ebuild_hash)
示例#19
0
class LinkageMapELF(object):

	"""Models dynamic linker dependencies."""

	_needed_aux_key = "NEEDED.ELF.2"
	_soname_map_class = slot_dict_class(
		("consumers", "providers"), prefix="")

	class _obj_properties_class(object):

		__slots__ = ("arch", "needed", "runpaths", "soname", "alt_paths",
			"owner",)

		def __init__(self, arch, needed, runpaths, soname, alt_paths, owner):
			self.arch = arch
			self.needed = needed
			self.runpaths = runpaths
			self.soname = soname
			self.alt_paths = alt_paths
			self.owner = owner

	def __init__(self, vardbapi):
		self._dbapi = vardbapi
		self._root = self._dbapi.settings['ROOT']
		self._libs = {}
		self._obj_properties = {}
		self._obj_key_cache = {}
		self._defpath = set()
		self._path_key_cache = {}

	def _clear_cache(self):
		self._libs.clear()
		self._obj_properties.clear()
		self._obj_key_cache.clear()
		self._defpath.clear()
		self._path_key_cache.clear()

	def _path_key(self, path):
		key = self._path_key_cache.get(path)
		if key is None:
			key = self._ObjectKey(path, self._root)
			self._path_key_cache[path] = key
		return key

	def _obj_key(self, path):
		key = self._obj_key_cache.get(path)
		if key is None:
			key = self._ObjectKey(path, self._root)
			self._obj_key_cache[path] = key
		return key

	class _ObjectKey(object):

		"""Helper class used as _obj_properties keys for objects."""

		__slots__ = ("_key",)

		def __init__(self, obj, root):
			"""
			This takes a path to an object.

			@param object: path to a file
			@type object: string (example: '/usr/bin/bar')

			"""
			self._key = self._generate_object_key(obj, root)

		def __hash__(self):
			return hash(self._key)

		def __eq__(self, other):
			return self._key == other._key

		def _generate_object_key(self, obj, root):
			"""
			Generate object key for a given object.

			@param object: path to a file
			@type object: string (example: '/usr/bin/bar')
			@rtype: 2-tuple of types (long, int) if object exists. string if
				object does not exist.
			@return:
				1. 2-tuple of object's inode and device from a stat call, if object
					exists.
				2. realpath of object if object does not exist.

			"""

			os = _os_merge

			try:
				_unicode_encode(obj,
					encoding=_encodings['merge'], errors='strict')
			except UnicodeEncodeError:
				# The package appears to have been merged with a 
				# different value of sys.getfilesystemencoding(),
				# so fall back to utf_8 if appropriate.
				try:
					_unicode_encode(obj,
						encoding=_encodings['fs'], errors='strict')
				except UnicodeEncodeError:
					pass
				else:
					os = portage.os

			abs_path = os.path.join(root, obj.lstrip(os.sep))
			try:
				object_stat = os.stat(abs_path)
			except OSError:
				# Use the realpath as the key if the file does not exists on the
				# filesystem.
				return os.path.realpath(abs_path)
			# Return a tuple of the device and inode.
			return (object_stat.st_dev, object_stat.st_ino)

		def file_exists(self):
			"""
			Determine if the file for this key exists on the filesystem.

			@rtype: Boolean
			@return:
				1. True if the file exists.
				2. False if the file does not exist or is a broken symlink.

			"""
			return isinstance(self._key, tuple)

	class _LibGraphNode(_ObjectKey):
		__slots__ = ("alt_paths",)

		def __init__(self, key):
			"""
			Create a _LibGraphNode from an existing _ObjectKey.
			This re-uses the _key attribute in order to avoid repeating
			any previous stat calls, which helps to avoid potential race
			conditions due to inconsistent stat results when the
			file system is being modified concurrently.
			"""
			self._key = key._key
			self.alt_paths = set()

		def __str__(self):
			return str(sorted(self.alt_paths))

	def rebuild(self, exclude_pkgs=None, include_file=None,
		preserve_paths=None):
		"""
		Raises CommandNotFound if there are preserved libs
		and the scanelf binary is not available.

		@param exclude_pkgs: A set of packages that should be excluded from
			the LinkageMap, since they are being unmerged and their NEEDED
			entries are therefore irrelevant and would only serve to corrupt
			the LinkageMap.
		@type exclude_pkgs: set
		@param include_file: The path of a file containing NEEDED entries for
			a package which does not exist in the vardbapi yet because it is
			currently being merged.
		@type include_file: String
		@param preserve_paths: Libraries preserved by a package instance that
			is currently being merged. They need to be explicitly passed to the
			LinkageMap, since they are not registered in the
			PreservedLibsRegistry yet.
		@type preserve_paths: set
		"""

		os = _os_merge
		root = self._root
		root_len = len(root) - 1
		self._clear_cache()
		self._defpath.update(getlibpaths(self._dbapi.settings['EROOT'],
			env=self._dbapi.settings))
		libs = self._libs
		obj_properties = self._obj_properties

		lines = []

		# Data from include_file is processed first so that it
		# overrides any data from previously installed files.
		if include_file is not None:
			for line in grabfile(include_file):
				lines.append((None, include_file, line))

		aux_keys = [self._needed_aux_key]
		can_lock = os.access(os.path.dirname(self._dbapi._dbroot), os.W_OK)
		if can_lock:
			self._dbapi.lock()
		try:
			for cpv in self._dbapi.cpv_all():
				if exclude_pkgs is not None and cpv in exclude_pkgs:
					continue
				needed_file = self._dbapi.getpath(cpv,
					filename=self._needed_aux_key)
				for line in self._dbapi.aux_get(cpv, aux_keys)[0].splitlines():
					lines.append((cpv, needed_file, line))
		finally:
			if can_lock:
				self._dbapi.unlock()

		# have to call scanelf for preserved libs here as they aren't 
		# registered in NEEDED.ELF.2 files
		plibs = {}
		if preserve_paths is not None:
			plibs.update((x, None) for x in preserve_paths)
		if self._dbapi._plib_registry and \
			self._dbapi._plib_registry.hasEntries():
			for cpv, items in \
				self._dbapi._plib_registry.getPreservedLibs().items():
				if exclude_pkgs is not None and cpv in exclude_pkgs:
					# These preserved libs will either be unmerged,
					# rendering them irrelevant, or they will be
					# preserved in the replacement package and are
					# already represented via the preserve_paths
					# parameter.
					continue
				plibs.update((x, cpv) for x in items)
		if plibs:
			args = [os.path.join(EPREFIX or "/", "usr/bin/scanelf"), "-qF", "%a;%F;%S;%r;%n"]
			args.extend(os.path.join(root, x.lstrip("." + os.sep)) \
				for x in plibs)
			try:
				proc = subprocess.Popen(args, stdout=subprocess.PIPE)
			except EnvironmentError as e:
				if e.errno != errno.ENOENT:
					raise
				raise CommandNotFound(args[0])
			else:
				for l in proc.stdout:
					try:
						l = _unicode_decode(l,
							encoding=_encodings['content'], errors='strict')
					except UnicodeDecodeError:
						l = _unicode_decode(l,
							encoding=_encodings['content'], errors='replace')
						writemsg_level(_("\nError decoding characters " \
							"returned from scanelf: %s\n\n") % (l,),
							level=logging.ERROR, noiselevel=-1)
					l = l[3:].rstrip("\n")
					if not l:
						continue
					try:
						entry = NeededEntry.parse("scanelf", l)
					except InvalidData as e:
						writemsg_level("\n%s\n\n" % (e,),
							level=logging.ERROR, noiselevel=-1)
						continue
					try:
						with open(_unicode_encode(entry.filename,
							encoding=_encodings['fs'],
							errors='strict'), 'rb') as f:
							elf_header = ELFHeader.read(f)
					except EnvironmentError as e:
						if e.errno != errno.ENOENT:
							raise
						# File removed concurrently.
						continue
					entry.multilib_category = compute_multilib_category(elf_header)
					entry.filename = entry.filename[root_len:]
					owner = plibs.pop(entry.filename, None)
					lines.append((owner, "scanelf", _unicode(entry)))
				proc.wait()
				proc.stdout.close()

		if plibs:
			# Preserved libraries that did not appear in the scanelf output.
			# This is known to happen with statically linked libraries.
			# Generate dummy lines for these, so we can assume that every
			# preserved library has an entry in self._obj_properties. This
			# is important in order to prevent findConsumers from raising
			# an unwanted KeyError.
			for x, cpv in plibs.items():
				lines.append((cpv, "plibs", ";".join(['', x, '', '', ''])))

		# Share identical frozenset instances when available,
		# in order to conserve memory.
		frozensets = {}

		for owner, location, l in lines:
			l = l.rstrip("\n")
			if not l:
				continue
			if '\0' in l:
				# os.stat() will raise "TypeError: must be encoded string
				# without NULL bytes, not str" in this case.
				writemsg_level(_("\nLine contains null byte(s) " \
					"in %s: %s\n\n") % (location, l),
					level=logging.ERROR, noiselevel=-1)
				continue
			try:
				entry = NeededEntry.parse(location, l)
			except InvalidData as e:
				writemsg_level("\n%s\n\n" % (e,),
					level=logging.ERROR, noiselevel=-1)
				continue

			# If NEEDED.ELF.2 contains the new multilib category field,
			# then use that for categorization. Otherwise, if a mapping
			# exists, map e_machine (entry.arch) to an approximate
			# multilib category. If all else fails, use e_machine, just
			# as older versions of portage did.
			arch = entry.multilib_category
			if arch is None:
				arch = _approx_multilib_categories.get(
					entry.arch, entry.arch)

			obj = entry.filename
			soname = entry.soname
			expand = {"ORIGIN": os.path.dirname(entry.filename)}
			path = frozenset(normalize_path(
				varexpand(x, expand, error_leader=lambda: "%s: " % location))
				for x in entry.runpaths)
			path = frozensets.setdefault(path, path)
			needed = frozenset(entry.needed)

			needed = frozensets.setdefault(needed, needed)

			obj_key = self._obj_key(obj)
			indexed = True
			myprops = obj_properties.get(obj_key)
			if myprops is None:
				indexed = False
				myprops = self._obj_properties_class(
					arch, needed, path, soname, [], owner)
				obj_properties[obj_key] = myprops
			# All object paths are added into the obj_properties tuple.
			myprops.alt_paths.append(obj)

			# Don't index the same file more that once since only one
			# set of data can be correct and therefore mixing data
			# may corrupt the index (include_file overrides previously
			# installed).
			if indexed:
				continue

			arch_map = libs.get(arch)
			if arch_map is None:
				arch_map = {}
				libs[arch] = arch_map
			if soname:
				soname_map = arch_map.get(soname)
				if soname_map is None:
					soname_map = self._soname_map_class(
						providers=[], consumers=[])
					arch_map[soname] = soname_map
				soname_map.providers.append(obj_key)
			for needed_soname in needed:
				soname_map = arch_map.get(needed_soname)
				if soname_map is None:
					soname_map = self._soname_map_class(
						providers=[], consumers=[])
					arch_map[needed_soname] = soname_map
				soname_map.consumers.append(obj_key)

		for arch, sonames in libs.items():
			for soname_node in sonames.values():
				soname_node.providers = tuple(set(soname_node.providers))
				soname_node.consumers = tuple(set(soname_node.consumers))

	def listBrokenBinaries(self, debug=False):
		"""
		Find binaries and their needed sonames, which have no providers.

		@param debug: Boolean to enable debug output
		@type debug: Boolean
		@rtype: dict (example: {'/usr/bin/foo': set(['libbar.so'])})
		@return: The return value is an object -> set-of-sonames mapping, where
			object is a broken binary and the set consists of sonames needed by
			object that have no corresponding libraries to fulfill the dependency.

		"""

		os = _os_merge

		class _LibraryCache(object):

			"""
			Caches properties associated with paths.

			The purpose of this class is to prevent multiple instances of
			_ObjectKey for the same paths.

			"""

			def __init__(cache_self):
				cache_self.cache = {}

			def get(cache_self, obj):
				"""
				Caches and returns properties associated with an object.

				@param obj: absolute path (can be symlink)
				@type obj: string (example: '/usr/lib/libfoo.so')
				@rtype: 4-tuple with types
					(string or None, string or None, 2-tuple, Boolean)
				@return: 4-tuple with the following components:
					1. arch as a string or None if it does not exist,
					2. soname as a string or None if it does not exist,
					3. obj_key as 2-tuple,
					4. Boolean representing whether the object exists.
					(example: ('libfoo.so.1', (123L, 456L), True))

				"""
				if obj in cache_self.cache:
					return cache_self.cache[obj]
				else:
					obj_key = self._obj_key(obj)
					# Check that the library exists on the filesystem.
					if obj_key.file_exists():
						# Get the arch and soname from LinkageMap._obj_properties if
						# it exists. Otherwise, None.
						obj_props = self._obj_properties.get(obj_key)
						if obj_props is None:
							arch = None
							soname = None
						else:
							arch = obj_props.arch
							soname = obj_props.soname
						return cache_self.cache.setdefault(obj, \
								(arch, soname, obj_key, True))
					else:
						return cache_self.cache.setdefault(obj, \
								(None, None, obj_key, False))

		rValue = {}
		cache = _LibraryCache()
		providers = self.listProviders()

		# Iterate over all obj_keys and their providers.
		for obj_key, sonames in providers.items():
			obj_props = self._obj_properties[obj_key]
			arch = obj_props.arch
			path = obj_props.runpaths
			objs = obj_props.alt_paths
			path = path.union(self._defpath)
			# Iterate over each needed soname and the set of library paths that
			# fulfill the soname to determine if the dependency is broken.
			for soname, libraries in sonames.items():
				# validLibraries is used to store libraries, which satisfy soname,
				# so if no valid libraries are found, the soname is not satisfied
				# for obj_key.  If unsatisfied, objects associated with obj_key
				# must be emerged.
				validLibraries = set()
				# It could be the case that the library to satisfy the soname is
				# not in the obj's runpath, but a symlink to the library is (eg
				# libnvidia-tls.so.1 in nvidia-drivers).  Also, since LinkageMap
				# does not catalog symlinks, broken or missing symlinks may go
				# unnoticed.  As a result of these cases, check that a file with
				# the same name as the soname exists in obj's runpath.
				# XXX If we catalog symlinks in LinkageMap, this could be improved.
				for directory in path:
					cachedArch, cachedSoname, cachedKey, cachedExists = \
							cache.get(os.path.join(directory, soname))
					# Check that this library provides the needed soname.  Doing
					# this, however, will cause consumers of libraries missing
					# sonames to be unnecessarily emerged. (eg libmix.so)
					if cachedSoname == soname and cachedArch == arch:
						validLibraries.add(cachedKey)
						if debug and cachedKey not in \
								set(map(self._obj_key_cache.get, libraries)):
							# XXX This is most often due to soname symlinks not in
							# a library's directory.  We could catalog symlinks in
							# LinkageMap to avoid checking for this edge case here.
							writemsg_level(
								_("Found provider outside of findProviders:") + \
								(" %s -> %s %s\n" % (os.path.join(directory, soname),
								self._obj_properties[cachedKey].alt_paths, libraries)),
								level=logging.DEBUG,
								noiselevel=-1)
						# A valid library has been found, so there is no need to
						# continue.
						break
					if debug and cachedArch == arch and \
							cachedKey in self._obj_properties:
						writemsg_level((_("Broken symlink or missing/bad soname: " + \
							"%(dir_soname)s -> %(cachedKey)s " + \
							"with soname %(cachedSoname)s but expecting %(soname)s") % \
							{"dir_soname":os.path.join(directory, soname),
							"cachedKey": self._obj_properties[cachedKey],
							"cachedSoname": cachedSoname, "soname":soname}) + "\n",
							level=logging.DEBUG,
							noiselevel=-1)
				# This conditional checks if there are no libraries to satisfy the
				# soname (empty set).
				if not validLibraries:
					for obj in objs:
						rValue.setdefault(obj, set()).add(soname)
					# If no valid libraries have been found by this point, then
					# there are no files named with the soname within obj's runpath,
					# but if there are libraries (from the providers mapping), it is
					# likely that soname symlinks or the actual libraries are
					# missing or broken.  Thus those libraries are added to rValue
					# in order to emerge corrupt library packages.
					for lib in libraries:
						rValue.setdefault(lib, set()).add(soname)
						if debug:
							if not os.path.isfile(lib):
								writemsg_level(_("Missing library:") + " %s\n" % (lib,),
									level=logging.DEBUG,
									noiselevel=-1)
							else:
								writemsg_level(_("Possibly missing symlink:") + \
									"%s\n" % (os.path.join(os.path.dirname(lib), soname)),
									level=logging.DEBUG,
									noiselevel=-1)
		return rValue

	def listProviders(self):
		"""
		Find the providers for all object keys in LinkageMap.

		@rtype: dict (example:
			{(123L, 456L): {'libbar.so': set(['/lib/libbar.so.1.5'])}})
		@return: The return value is an object key -> providers mapping, where
			providers is a mapping of soname -> set-of-library-paths returned
			from the findProviders method.

		"""
		rValue = {}
		if not self._libs:
			self.rebuild()
		# Iterate over all object keys within LinkageMap.
		for obj_key in self._obj_properties:
			rValue.setdefault(obj_key, self.findProviders(obj_key))
		return rValue

	def isMasterLink(self, obj):
		"""
		Determine whether an object is a "master" symlink, which means
		that its basename is the same as the beginning part of the
		soname and it lacks the soname's version component.

		Examples:

		soname                 | master symlink name
		--------------------------------------------
		libarchive.so.2.8.4    | libarchive.so
		libproc-3.2.8.so       | libproc.so

		@param obj: absolute path to an object
		@type obj: string (example: '/usr/bin/foo')
		@rtype: Boolean
		@return:
			1. True if obj is a master link
			2. False if obj is not a master link

		"""
		os = _os_merge
		obj_key = self._obj_key(obj)
		if obj_key not in self._obj_properties:
			raise KeyError("%s (%s) not in object list" % (obj_key, obj))
		basename = os.path.basename(obj)
		soname = self._obj_properties[obj_key].soname
		return len(basename) < len(soname) and \
			basename.endswith(".so") and \
			soname.startswith(basename[:-3])

	def listLibraryObjects(self):
		"""
		Return a list of library objects.

		Known limitation: library objects lacking an soname are not included.

		@rtype: list of strings
		@return: list of paths to all providers

		"""
		rValue = []
		if not self._libs:
			self.rebuild()
		for arch_map in self._libs.values():
			for soname_map in arch_map.values():
				for obj_key in soname_map.providers:
					rValue.extend(self._obj_properties[obj_key].alt_paths)
		return rValue

	def getOwners(self, obj):
		"""
		Return the package(s) associated with an object. Raises KeyError
		if the object is unknown. Returns an empty tuple if the owner(s)
		are unknown.

		NOTE: For preserved libraries, the owner(s) may have been
		previously uninstalled, but these uninstalled owners can be
		returned by this method since they are registered in the
		PreservedLibsRegistry.

		@param obj: absolute path to an object
		@type obj: string (example: '/usr/bin/bar')
		@rtype: tuple
		@return: a tuple of cpv
		"""
		if not self._libs:
			self.rebuild()
		if isinstance(obj, self._ObjectKey):
			obj_key = obj
		else:
			obj_key = self._obj_key_cache.get(obj)
			if obj_key is None:
				raise KeyError("%s not in object list" % obj)
		obj_props = self._obj_properties.get(obj_key)
		if obj_props is None:
			raise KeyError("%s not in object list" % obj_key)
		if obj_props.owner is None:
			return ()
		return (obj_props.owner,)

	def getSoname(self, obj):
		"""
		Return the soname associated with an object.

		@param obj: absolute path to an object
		@type obj: string (example: '/usr/bin/bar')
		@rtype: string
		@return: soname as a string

		"""
		if not self._libs:
			self.rebuild()
		if isinstance(obj, self._ObjectKey):
			obj_key = obj
			if obj_key not in self._obj_properties:
				raise KeyError("%s not in object list" % obj_key)
			return self._obj_properties[obj_key].soname
		if obj not in self._obj_key_cache:
			raise KeyError("%s not in object list" % obj)
		return self._obj_properties[self._obj_key_cache[obj]].soname

	def findProviders(self, obj):
		"""
		Find providers for an object or object key.

		This method may be called with a key from _obj_properties.

		In some cases, not all valid libraries are returned.  This may occur when
		an soname symlink referencing a library is in an object's runpath while
		the actual library is not.  We should consider cataloging symlinks within
		LinkageMap as this would avoid those cases and would be a better model of
		library dependencies (since the dynamic linker actually searches for
		files named with the soname in the runpaths).

		@param obj: absolute path to an object or a key from _obj_properties
		@type obj: string (example: '/usr/bin/bar') or _ObjectKey
		@rtype: dict (example: {'libbar.so': set(['/lib/libbar.so.1.5'])})
		@return: The return value is a soname -> set-of-library-paths, where
		set-of-library-paths satisfy soname.

		"""

		os = _os_merge

		rValue = {}

		if not self._libs:
			self.rebuild()

		# Determine the obj_key from the arguments.
		if isinstance(obj, self._ObjectKey):
			obj_key = obj
			if obj_key not in self._obj_properties:
				raise KeyError("%s not in object list" % obj_key)
		else:
			obj_key = self._obj_key(obj)
			if obj_key not in self._obj_properties:
				raise KeyError("%s (%s) not in object list" % (obj_key, obj))

		obj_props = self._obj_properties[obj_key]
		arch = obj_props.arch
		needed = obj_props.needed
		path = obj_props.runpaths
		path_keys = set(self._path_key(x) for x in path.union(self._defpath))
		for soname in needed:
			rValue[soname] = set()
			if arch not in self._libs or soname not in self._libs[arch]:
				continue
			# For each potential provider of the soname, add it to rValue if it
			# resides in the obj's runpath.
			for provider_key in self._libs[arch][soname].providers:
				providers = self._obj_properties[provider_key].alt_paths
				for provider in providers:
					if self._path_key(os.path.dirname(provider)) in path_keys:
						rValue[soname].add(provider)
		return rValue

	def findConsumers(self, obj, exclude_providers=None, greedy=True):
		"""
		Find consumers of an object or object key.

		This method may be called with a key from _obj_properties.  If this
		method is going to be called with an object key, to avoid not catching
		shadowed libraries, do not pass new _ObjectKey instances to this method.
		Instead pass the obj as a string.

		In some cases, not all consumers are returned.  This may occur when
		an soname symlink referencing a library is in an object's runpath while
		the actual library is not. For example, this problem is noticeable for
		binutils since it's libraries are added to the path via symlinks that
		are gemerated in the /usr/$CHOST/lib/ directory by binutils-config.
		Failure to recognize consumers of these symlinks makes preserve-libs
		fail to preserve binutils libs that are needed by these unrecognized
		consumers.

		Note that library consumption via dlopen (common for kde plugins) is
		currently undetected. However, it is possible to use the
		corresponding libtool archive (*.la) files to detect such consumers
		(revdep-rebuild is able to detect them).

		The exclude_providers argument is useful for determining whether
		removal of one or more packages will create unsatisfied consumers. When
		this option is given, consumers are excluded from the results if there
		is an alternative provider (which is not excluded) of the required
		soname such that the consumers will remain satisfied if the files
		owned by exclude_providers are removed.

		@param obj: absolute path to an object or a key from _obj_properties
		@type obj: string (example: '/usr/bin/bar') or _ObjectKey
		@param exclude_providers: A collection of callables that each take a
			single argument referring to the path of a library (example:
			'/usr/lib/libssl.so.0.9.8'), and return True if the library is
			owned by a provider which is planned for removal.
		@type exclude_providers: collection
		@param greedy: If True, then include consumers that are satisfied
		by alternative providers, otherwise omit them. Default is True.
		@type greedy: Boolean
		@rtype: set of strings (example: set(['/bin/foo', '/usr/bin/bar']))
		@return: The return value is a soname -> set-of-library-paths, where
		set-of-library-paths satisfy soname.

		"""

		os = _os_merge

		if not self._libs:
			self.rebuild()

		# Determine the obj_key and the set of objects matching the arguments.
		if isinstance(obj, self._ObjectKey):
			obj_key = obj
			if obj_key not in self._obj_properties:
				raise KeyError("%s not in object list" % obj_key)
			objs = self._obj_properties[obj_key].alt_paths
		else:
			objs = set([obj])
			obj_key = self._obj_key(obj)
			if obj_key not in self._obj_properties:
				raise KeyError("%s (%s) not in object list" % (obj_key, obj))

		# If there is another version of this lib with the
		# same soname and the soname symlink points to that
		# other version, this lib will be shadowed and won't
		# have any consumers.
		if not isinstance(obj, self._ObjectKey):
			soname = self._obj_properties[obj_key].soname
			soname_link = os.path.join(self._root,
				os.path.dirname(obj).lstrip(os.path.sep), soname)
			obj_path = os.path.join(self._root, obj.lstrip(os.sep))
			try:
				soname_st = os.stat(soname_link)
				obj_st = os.stat(obj_path)
			except OSError:
				pass
			else:
				if (obj_st.st_dev, obj_st.st_ino) != \
					(soname_st.st_dev, soname_st.st_ino):
					return set()

		obj_props = self._obj_properties[obj_key]
		arch = obj_props.arch
		soname = obj_props.soname

		soname_node = None
		arch_map = self._libs.get(arch)
		if arch_map is not None:
			soname_node = arch_map.get(soname)

		defpath_keys = set(self._path_key(x) for x in self._defpath)
		satisfied_consumer_keys = set()
		if soname_node is not None:
			if exclude_providers is not None or not greedy:
				relevant_dir_keys = set()
				for provider_key in soname_node.providers:
					if not greedy and provider_key == obj_key:
						continue
					provider_objs = self._obj_properties[provider_key].alt_paths
					for p in provider_objs:
						provider_excluded = False
						if exclude_providers is not None:
							for excluded_provider_isowner in exclude_providers:
								if excluded_provider_isowner(p):
									provider_excluded = True
									break
						if not provider_excluded:
							# This provider is not excluded. It will
							# satisfy a consumer of this soname if it
							# is in the default ld.so path or the
							# consumer's runpath.
							relevant_dir_keys.add(
								self._path_key(os.path.dirname(p)))

				if relevant_dir_keys:
					for consumer_key in soname_node.consumers:
						path = self._obj_properties[consumer_key].runpaths
						path_keys = defpath_keys.copy()
						path_keys.update(self._path_key(x) for x in path)
						if relevant_dir_keys.intersection(path_keys):
							satisfied_consumer_keys.add(consumer_key)

		rValue = set()
		if soname_node is not None:
			# For each potential consumer, add it to rValue if an object from the
			# arguments resides in the consumer's runpath.
			objs_dir_keys = set(self._path_key(os.path.dirname(x))
				for x in objs)
			for consumer_key in soname_node.consumers:
				if consumer_key in satisfied_consumer_keys:
					continue
				consumer_props = self._obj_properties[consumer_key]
				path = consumer_props.runpaths
				consumer_objs = consumer_props.alt_paths
				path_keys = defpath_keys.union(self._path_key(x) for x in path)
				if objs_dir_keys.intersection(path_keys):
					rValue.update(consumer_objs)
		return rValue
示例#20
0
class LinkageMapELF(object):

	"""Models dynamic linker dependencies."""

	_needed_aux_key = "NEEDED.ELF.2"
	_soname_map_class = slot_dict_class(
		("consumers", "providers"), prefix="")

	def __init__(self, vardbapi):
		self._dbapi = vardbapi
		self._eroot = self._dbapi._eroot
		self._libs = {}
		self._obj_properties = {}
		self._obj_key_cache = {}
		self._defpath = set()
		self._path_key_cache = {}

	def _clear_cache(self):
		self._libs.clear()
		self._obj_properties.clear()
		self._obj_key_cache.clear()
		self._defpath.clear()
		self._path_key_cache.clear()

	def _path_key(self, path):
		key = self._path_key_cache.get(path)
		if key is None:
			key = self._ObjectKey(path, self._eroot)
			self._path_key_cache[path] = key
		return key

	def _obj_key(self, path):
		key = self._obj_key_cache.get(path)
		if key is None:
			key = self._ObjectKey(path, self._eroot)
			self._obj_key_cache[path] = key
		return key

	class _ObjectKey(object):

		"""Helper class used as _obj_properties keys for objects."""

		__slots__ = ("__weakref__", "_key")

		def __init__(self, obj, root):
			"""
			This takes a path to an object.

			@param object: path to a file
			@type object: string (example: '/usr/bin/bar')

			"""
			self._key = self._generate_object_key(obj, root)

		def __hash__(self):
			return hash(self._key)

		def __eq__(self, other):
			return self._key == other._key

		def _generate_object_key(self, obj, root):
			"""
			Generate object key for a given object.

			@param object: path to a file
			@type object: string (example: '/usr/bin/bar')
			@rtype: 2-tuple of types (long, int) if object exists. string if
				object does not exist.
			@return:
				1. 2-tuple of object's inode and device from a stat call, if object
					exists.
				2. realpath of object if object does not exist.

			"""

			os = _os_merge

			try:
				_unicode_encode(obj,
					encoding=_encodings['merge'], errors='strict')
			except UnicodeEncodeError:
				# The package appears to have been merged with a 
				# different value of sys.getfilesystemencoding(),
				# so fall back to utf_8 if appropriate.
				try:
					_unicode_encode(obj,
						encoding=_encodings['fs'], errors='strict')
				except UnicodeEncodeError:
					pass
				else:
					os = portage.os

			abs_path = os.path.join(root, obj.lstrip(os.sep))
			try:
				object_stat = os.stat(abs_path)
			except OSError:
				# Use the realpath as the key if the file does not exists on the
				# filesystem.
				return os.path.realpath(abs_path)
			# Return a tuple of the device and inode.
			return (object_stat.st_dev, object_stat.st_ino)

		def file_exists(self):
			"""
			Determine if the file for this key exists on the filesystem.

			@rtype: Boolean
			@return:
				1. True if the file exists.
				2. False if the file does not exist or is a broken symlink.

			"""
			return isinstance(self._key, tuple)

	class _LibGraphNode(_ObjectKey):
		__slots__ = ("alt_paths",)

		def __init__(self, obj, root):
			LinkageMapELF._ObjectKey.__init__(self, obj, root)
			self.alt_paths = set()

		def __str__(self):
			return str(sorted(self.alt_paths))

	def rebuild(self, exclude_pkgs=None, include_file=None):
		"""
		Raises CommandNotFound if there are preserved libs
		and the scanelf binary is not available.
		"""

		os = _os_merge
		root = self._eroot
		root_len = len(root) - 1
		self._clear_cache()
		self._defpath.update(getlibpaths(self._eroot))
		libs = self._libs
		obj_properties = self._obj_properties

		lines = []

		# Data from include_file is processed first so that it
		# overrides any data from previously installed files.
		if include_file is not None:
			lines += grabfile(include_file)

		aux_keys = [self._needed_aux_key]
		for cpv in self._dbapi.cpv_all():
			if exclude_pkgs is not None and cpv in exclude_pkgs:
				continue
			lines += self._dbapi.aux_get(cpv, aux_keys)[0].split('\n')
		# Cache NEEDED.* files avoid doing excessive IO for every rebuild.
		self._dbapi.flush_cache()

		# have to call scanelf for preserved libs here as they aren't 
		# registered in NEEDED.ELF.2 files
		plibs = set()
		if self._dbapi._plib_registry and self._dbapi._plib_registry.getPreservedLibs():
			args = ["/usr/bin/scanelf", "-qF", "%a;%F;%S;%r;%n"]
			for items in self._dbapi._plib_registry.getPreservedLibs().values():
				plibs.update(items)
				args.extend(os.path.join(root, x.lstrip("." + os.sep)) \
					for x in items)
			try:
				proc = subprocess.Popen(args, stdout=subprocess.PIPE)
			except EnvironmentError as e:
				if e.errno != errno.ENOENT:
					raise
				raise CommandNotFound(args[0])
			else:
				for l in proc.stdout:
					try:
						l = _unicode_decode(l,
							encoding=_encodings['content'], errors='strict')
					except UnicodeDecodeError:
						l = _unicode_decode(l,
							encoding=_encodings['content'], errors='replace')
						writemsg_level(_("\nError decoding characters " \
							"returned from scanelf: %s\n\n") % (l,),
							level=logging.ERROR, noiselevel=-1)
					l = l[3:].rstrip("\n")
					if not l:
						continue
					fields = l.split(";")
					if len(fields) < 5:
						writemsg_level(_("\nWrong number of fields " \
							"returned from scanelf: %s\n\n") % (l,),
							level=logging.ERROR, noiselevel=-1)
						continue
					fields[1] = fields[1][root_len:]
					plibs.discard(fields[1])
					lines.append(";".join(fields))
				proc.wait()

		if plibs:
			# Preserved libraries that did not appear in the scanelf output.
			# This is known to happen with statically linked libraries.
			# Generate dummy lines for these, so we can assume that every
			# preserved library has an entry in self._obj_properties. This
			# is important in order to prevent findConsumers from raising
			# an unwanted KeyError.
			for x in plibs:
				lines.append(";".join(['', x, '', '', '']))

		for l in lines:
			l = l.rstrip("\n")
			if not l:
				continue
			fields = l.split(";")
			if len(fields) < 5:
				writemsg_level(_("\nWrong number of fields " \
					"in %s: %s\n\n") % (self._needed_aux_key, l),
					level=logging.ERROR, noiselevel=-1)
				continue
			arch = fields[0]
			obj = fields[1]
			soname = fields[2]
			path = set([normalize_path(x) \
				for x in filter(None, fields[3].replace(
				"${ORIGIN}", os.path.dirname(obj)).replace(
				"$ORIGIN", os.path.dirname(obj)).split(":"))])
			needed = [x for x in fields[4].split(",") if x]

			obj_key = self._obj_key(obj)
			indexed = True
			myprops = obj_properties.get(obj_key)
			if myprops is None:
				indexed = False
				myprops = (arch, needed, path, soname, set())
				obj_properties[obj_key] = myprops
			# All object paths are added into the obj_properties tuple.
			myprops[4].add(obj)

			# Don't index the same file more that once since only one
			# set of data can be correct and therefore mixing data
			# may corrupt the index (include_file overrides previously
			# installed).
			if indexed:
				continue

			arch_map = libs.get(arch)
			if arch_map is None:
				arch_map = {}
				libs[arch] = arch_map
			if soname:
				soname_map = arch_map.get(soname)
				if soname_map is None:
					soname_map = self._soname_map_class(
						providers=set(), consumers=set())
					arch_map[soname] = soname_map
				soname_map.providers.add(obj_key)
			for needed_soname in needed:
				soname_map = arch_map.get(needed_soname)
				if soname_map is None:
					soname_map = self._soname_map_class(
						providers=set(), consumers=set())
					arch_map[needed_soname] = soname_map
				soname_map.consumers.add(obj_key)

	def listBrokenBinaries(self, debug=False):
		"""
		Find binaries and their needed sonames, which have no providers.

		@param debug: Boolean to enable debug output
		@type debug: Boolean
		@rtype: dict (example: {'/usr/bin/foo': set(['libbar.so'])})
		@return: The return value is an object -> set-of-sonames mapping, where
			object is a broken binary and the set consists of sonames needed by
			object that have no corresponding libraries to fulfill the dependency.

		"""

		os = _os_merge

		class _LibraryCache(object):

			"""
			Caches properties associated with paths.

			The purpose of this class is to prevent multiple instances of
			_ObjectKey for the same paths.

			"""

			def __init__(cache_self):
				cache_self.cache = {}

			def get(cache_self, obj):
				"""
				Caches and returns properties associated with an object.

				@param obj: absolute path (can be symlink)
				@type obj: string (example: '/usr/lib/libfoo.so')
				@rtype: 4-tuple with types
					(string or None, string or None, 2-tuple, Boolean)
				@return: 4-tuple with the following components:
					1. arch as a string or None if it does not exist,
					2. soname as a string or None if it does not exist,
					3. obj_key as 2-tuple,
					4. Boolean representing whether the object exists.
					(example: ('libfoo.so.1', (123L, 456L), True))

				"""
				if obj in cache_self.cache:
					return cache_self.cache[obj]
				else:
					obj_key = self._obj_key(obj)
					# Check that the library exists on the filesystem.
					if obj_key.file_exists():
						# Get the arch and soname from LinkageMap._obj_properties if
						# it exists. Otherwise, None.
						arch, _needed, _path, soname, _objs = \
								self._obj_properties.get(obj_key, (None,)*5)
						return cache_self.cache.setdefault(obj, \
								(arch, soname, obj_key, True))
					else:
						return cache_self.cache.setdefault(obj, \
								(None, None, obj_key, False))

		rValue = {}
		cache = _LibraryCache()
		providers = self.listProviders()

		# Iterate over all obj_keys and their providers.
		for obj_key, sonames in providers.items():
			arch, _needed, path, _soname, objs = self._obj_properties[obj_key]
			path = path.union(self._defpath)
			# Iterate over each needed soname and the set of library paths that
			# fulfill the soname to determine if the dependency is broken.
			for soname, libraries in sonames.items():
				# validLibraries is used to store libraries, which satisfy soname,
				# so if no valid libraries are found, the soname is not satisfied
				# for obj_key.  If unsatisfied, objects associated with obj_key
				# must be emerged.
				validLibraries = set()
				# It could be the case that the library to satisfy the soname is
				# not in the obj's runpath, but a symlink to the library is (eg
				# libnvidia-tls.so.1 in nvidia-drivers).  Also, since LinkageMap
				# does not catalog symlinks, broken or missing symlinks may go
				# unnoticed.  As a result of these cases, check that a file with
				# the same name as the soname exists in obj's runpath.
				# XXX If we catalog symlinks in LinkageMap, this could be improved.
				for directory in path:
					cachedArch, cachedSoname, cachedKey, cachedExists = \
							cache.get(os.path.join(directory, soname))
					# Check that this library provides the needed soname.  Doing
					# this, however, will cause consumers of libraries missing
					# sonames to be unnecessarily emerged. (eg libmix.so)
					if cachedSoname == soname and cachedArch == arch:
						validLibraries.add(cachedKey)
						if debug and cachedKey not in \
								set(map(self._obj_key_cache.get, libraries)):
							# XXX This is most often due to soname symlinks not in
							# a library's directory.  We could catalog symlinks in
							# LinkageMap to avoid checking for this edge case here.
							writemsg_level(
								_("Found provider outside of findProviders:") + \
								(" %s -> %s %s\n" % (os.path.join(directory, soname),
								self._obj_properties[cachedKey][4], libraries)),
								level=logging.DEBUG,
								noiselevel=-1)
						# A valid library has been found, so there is no need to
						# continue.
						break
					if debug and cachedArch == arch and \
							cachedKey in self._obj_properties:
						writemsg_level((_("Broken symlink or missing/bad soname: " + \
							"%(dir_soname)s -> %(cachedKey)s " + \
							"with soname %(cachedSoname)s but expecting %(soname)s") % \
							{"dir_soname":os.path.join(directory, soname),
							"cachedKey": self._obj_properties[cachedKey],
							"cachedSoname": cachedSoname, "soname":soname}) + "\n",
							level=logging.DEBUG,
							noiselevel=-1)
				# This conditional checks if there are no libraries to satisfy the
				# soname (empty set).
				if not validLibraries:
					for obj in objs:
						rValue.setdefault(obj, set()).add(soname)
					# If no valid libraries have been found by this point, then
					# there are no files named with the soname within obj's runpath,
					# but if there are libraries (from the providers mapping), it is
					# likely that soname symlinks or the actual libraries are
					# missing or broken.  Thus those libraries are added to rValue
					# in order to emerge corrupt library packages.
					for lib in libraries:
						rValue.setdefault(lib, set()).add(soname)
						if debug:
							if not os.path.isfile(lib):
								writemsg_level(_("Missing library:") + " %s\n" % (lib,),
									level=logging.DEBUG,
									noiselevel=-1)
							else:
								writemsg_level(_("Possibly missing symlink:") + \
									"%s\n" % (os.path.join(os.path.dirname(lib), soname)),
									level=logging.DEBUG,
									noiselevel=-1)
		return rValue

	def listProviders(self):
		"""
		Find the providers for all object keys in LinkageMap.

		@rtype: dict (example:
			{(123L, 456L): {'libbar.so': set(['/lib/libbar.so.1.5'])}})
		@return: The return value is an object key -> providers mapping, where
			providers is a mapping of soname -> set-of-library-paths returned
			from the findProviders method.

		"""
		rValue = {}
		if not self._libs:
			self.rebuild()
		# Iterate over all object keys within LinkageMap.
		for obj_key in self._obj_properties:
			rValue.setdefault(obj_key, self.findProviders(obj_key))
		return rValue

	def isMasterLink(self, obj):
		"""
		Determine whether an object is a master link.

		@param obj: absolute path to an object
		@type obj: string (example: '/usr/bin/foo')
		@rtype: Boolean
		@return:
			1. True if obj is a master link
			2. False if obj is not a master link

		"""
		os = _os_merge
		basename = os.path.basename(obj)
		obj_key = self._obj_key(obj)
		if obj_key not in self._obj_properties:
			raise KeyError("%s (%s) not in object list" % (obj_key, obj))
		soname = self._obj_properties[obj_key][3]
		return (len(basename) < len(soname))

	def listLibraryObjects(self):
		"""
		Return a list of library objects.

		Known limitation: library objects lacking an soname are not included.

		@rtype: list of strings
		@return: list of paths to all providers

		"""
		rValue = []
		if not self._libs:
			self.rebuild()
		for arch_map in self._libs.values():
			for soname_map in arch_map.values():
				for obj_key in soname_map.providers:
					rValue.extend(self._obj_properties[obj_key][4])
		return rValue

	def getSoname(self, obj):
		"""
		Return the soname associated with an object.

		@param obj: absolute path to an object
		@type obj: string (example: '/usr/bin/bar')
		@rtype: string
		@return: soname as a string

		"""
		if not self._libs:
			self.rebuild()
		if isinstance(obj, self._ObjectKey):
			obj_key = obj
			if obj_key not in self._obj_properties:
				raise KeyError("%s not in object list" % obj_key)
			return self._obj_properties[obj_key][3]
		if obj not in self._obj_key_cache:
			raise KeyError("%s not in object list" % obj)
		return self._obj_properties[self._obj_key_cache[obj]][3]

	def findProviders(self, obj):
		"""
		Find providers for an object or object key.

		This method may be called with a key from _obj_properties.

		In some cases, not all valid libraries are returned.  This may occur when
		an soname symlink referencing a library is in an object's runpath while
		the actual library is not.  We should consider cataloging symlinks within
		LinkageMap as this would avoid those cases and would be a better model of
		library dependencies (since the dynamic linker actually searches for
		files named with the soname in the runpaths).

		@param obj: absolute path to an object or a key from _obj_properties
		@type obj: string (example: '/usr/bin/bar') or _ObjectKey
		@rtype: dict (example: {'libbar.so': set(['/lib/libbar.so.1.5'])})
		@return: The return value is a soname -> set-of-library-paths, where
		set-of-library-paths satisfy soname.

		"""

		os = _os_merge

		rValue = {}

		if not self._libs:
			self.rebuild()

		# Determine the obj_key from the arguments.
		if isinstance(obj, self._ObjectKey):
			obj_key = obj
			if obj_key not in self._obj_properties:
				raise KeyError("%s not in object list" % obj_key)
		else:
			obj_key = self._obj_key(obj)
			if obj_key not in self._obj_properties:
				raise KeyError("%s (%s) not in object list" % (obj_key, obj))

		arch, needed, path, _soname, _objs = self._obj_properties[obj_key]
		path_keys = set(self._path_key(x) for x in path.union(self._defpath))
		for soname in needed:
			rValue[soname] = set()
			if arch not in self._libs or soname not in self._libs[arch]:
				continue
			# For each potential provider of the soname, add it to rValue if it
			# resides in the obj's runpath.
			for provider_key in self._libs[arch][soname].providers:
				providers = self._obj_properties[provider_key][4]
				for provider in providers:
					if self._path_key(os.path.dirname(provider)) in path_keys:
						rValue[soname].add(provider)
		return rValue

	def findConsumers(self, obj):
		"""
		Find consumers of an object or object key.

		This method may be called with a key from _obj_properties.  If this
		method is going to be called with an object key, to avoid not catching
		shadowed libraries, do not pass new _ObjectKey instances to this method.
		Instead pass the obj as a string.

		In some cases, not all consumers are returned.  This may occur when
		an soname symlink referencing a library is in an object's runpath while
		the actual library is not. For example, this problem is noticeable for
		binutils since it's libraries are added to the path via symlinks that
		are gemerated in the /usr/$CHOST/lib/ directory by binutils-config.
		Failure to recognize consumers of these symlinks makes preserve-libs
		fail to preserve binutils libs that are needed by these unrecognized
		consumers.

		Note that library consumption via dlopen (common for kde plugins) is
		currently undetected. However, it is possible to use the
		corresponding libtool archive (*.la) files to detect such consumers
		(revdep-rebuild is able to detect them).

		@param obj: absolute path to an object or a key from _obj_properties
		@type obj: string (example: '/usr/bin/bar') or _ObjectKey
		@rtype: set of strings (example: set(['/bin/foo', '/usr/bin/bar']))
		@return: The return value is a soname -> set-of-library-paths, where
		set-of-library-paths satisfy soname.

		"""

		os = _os_merge

		rValue = set()

		if not self._libs:
			self.rebuild()

		# Determine the obj_key and the set of objects matching the arguments.
		if isinstance(obj, self._ObjectKey):
			obj_key = obj
			if obj_key not in self._obj_properties:
				raise KeyError("%s not in object list" % obj_key)
			objs = self._obj_properties[obj_key][4]
		else:
			objs = set([obj])
			obj_key = self._obj_key(obj)
			if obj_key not in self._obj_properties:
				raise KeyError("%s (%s) not in object list" % (obj_key, obj))

		# If there is another version of this lib with the
		# same soname and the master link points to that
		# other version, this lib will be shadowed and won't
		# have any consumers.
		if not isinstance(obj, self._ObjectKey):
			soname = self._obj_properties[obj_key][3]
			master_link = os.path.join(self._eroot,
				os.path.dirname(obj).lstrip(os.path.sep), soname)
			try:
				master_st = os.stat(master_link)
				obj_st = os.stat(obj)
			except OSError:
				pass
			else:
				if (obj_st.st_dev, obj_st.st_ino) != \
					(master_st.st_dev, master_st.st_ino):
					return set()

		# Determine the directory(ies) from the set of objects.
		objs_dir_keys = set(self._path_key(os.path.dirname(x)) for x in objs)
		defpath_keys = set(self._path_key(x) for x in self._defpath)

		arch, _needed, _path, soname, _objs = self._obj_properties[obj_key]
		if arch in self._libs and soname in self._libs[arch]:
			# For each potential consumer, add it to rValue if an object from the
			# arguments resides in the consumer's runpath.
			for consumer_key in self._libs[arch][soname].consumers:
				_arch, _needed, path, _soname, consumer_objs = \
						self._obj_properties[consumer_key]
				path_keys = defpath_keys.union(self._path_key(x) for x in path)
				if objs_dir_keys.intersection(path_keys):
					rValue.update(consumer_objs)
		return rValue