Пример #1
0
    def _spawn(self, args, fd_pipes, **kwargs):
        """
        Extend the superclass _spawn method to perform some pre-fork and
        post-fork actions.
        """

        elog_reader_fd, elog_writer_fd = os.pipe()

        fcntl.fcntl(
            elog_reader_fd,
            fcntl.F_SETFL,
            fcntl.fcntl(elog_reader_fd, fcntl.F_GETFL) | os.O_NONBLOCK,
        )

        blockers = None
        if self.blockers is not None:
            # Query blockers in the main process, since closing
            # of file descriptors in the subprocess can prevent
            # access to open database connections such as that
            # used by the sqlite metadata cache module.
            blockers = self.blockers()
        mylink = portage.dblink(
            self.mycat,
            self.mypkg,
            settings=self.settings,
            treetype=self.treetype,
            vartree=self.vartree,
            blockers=blockers,
            pipe=elog_writer_fd,
        )
        fd_pipes[elog_writer_fd] = elog_writer_fd
        self.scheduler.add_reader(elog_reader_fd, self._elog_output_handler)

        # If a concurrent emerge process tries to install a package
        # in the same SLOT as this one at the same time, there is an
        # extremely unlikely chance that the COUNTER values will not be
        # ordered correctly unless we lock the vdb here.
        # FEATURES=parallel-install skips this lock in order to
        # improve performance, and the risk is practically negligible.
        self._lock_vdb()
        if not self.unmerge:
            self._counter = self.vartree.dbapi.counter_tick()

        self._dblink = mylink
        self._elog_reader_fd = elog_reader_fd
        pids = super(MergeProcess, self)._spawn(args, fd_pipes, **kwargs)
        os.close(elog_writer_fd)
        self._buf = ""
        self._elog_keys = set()
        # Discard messages which will be collected by the subprocess,
        # in order to avoid duplicates (bug #446136).
        portage.elog.messages.collect_messages(key=mylink.mycpv)

        # invalidate relevant vardbapi caches
        if self.vartree.dbapi._categories is not None:
            self.vartree.dbapi._categories = None
        self.vartree.dbapi._pkgs_changed = True
        self.vartree.dbapi._clear_pkg_cache(mylink)

        return pids
def _GetFilesForTarget(target, root='/'):
    """Locate all the files to package for |target|

  This does not cover ELF dependencies.

  Args:
    target: The toolchain target name
    root: The root path to pull all packages from

  Returns:
    A tuple of a set of all packable paths, and a set of all paths which
    are also native ELFs
  """
    paths = set()
    elfs = set()

    # Find all the files owned by the packages for this target.
    for pkg in GetTargetPackages(target):
        # Ignore packages that are part of the target sysroot.
        if pkg in ('kernel', 'libc'):
            continue

        # Skip Go compiler from redistributable packages.
        # The "go" executable has GOROOT=/usr/lib/go/${CTARGET} hardcoded
        # into it. Due to this, the toolchain cannot be unpacked anywhere
        # else and be readily useful. To enable packaging Go, we need to:
        # -) Tweak the wrappers/environment to override GOROOT
        #    automatically based on the unpack location.
        # -) Make sure the ELF dependency checking and wrapping logic
        #    below skips the Go toolchain executables and libraries.
        # -) Make sure the packaging process maintains the relative
        #    timestamps of precompiled standard library packages.
        #    (see dev-lang/go ebuild for details).
        if pkg == 'ex_go':
            continue

        atom = GetPortagePackage(target, pkg)
        cat, pn = atom.split('/')
        ver = GetInstalledPackageVersions(atom, root=root)[0]
        logging.info('packaging %s-%s', atom, ver)

        # pylint: disable=E1101
        dblink = portage.dblink(cat,
                                pn + '-' + ver,
                                myroot=root,
                                settings=portage.settings)
        contents = dblink.getcontents()
        for obj in contents:
            ptype = contents[obj][0]
            if not IsPathPackagable(ptype, obj):
                continue

            if ptype == 'obj':
                # For native ELFs, we need to pull in their dependencies too.
                if FileIsCrosSdkElf(obj):
                    elfs.add(obj)
            paths.add(obj)

    return paths, elfs
def _GetFilesForTarget(target, root='/'):
  """Locate all the files to package for |target|

  This does not cover ELF dependencies.

  Args:
    target: The toolchain target name
    root: The root path to pull all packages from

  Returns:
    A tuple of a set of all packable paths, and a set of all paths which
    are also native ELFs
  """
  paths = set()
  elfs = set()

  # Find all the files owned by the packages for this target.
  for pkg in GetTargetPackages(target):
    # Ignore packages that are part of the target sysroot.
    if pkg in ('kernel', 'libc'):
      continue

    # Skip Go compiler from redistributable packages.
    # The "go" executable has GOROOT=/usr/lib/go/${CTARGET} hardcoded
    # into it. Due to this, the toolchain cannot be unpacked anywhere
    # else and be readily useful. To enable packaging Go, we need to:
    # -) Tweak the wrappers/environment to override GOROOT
    #    automatically based on the unpack location.
    # -) Make sure the ELF dependency checking and wrapping logic
    #    below skips the Go toolchain executables and libraries.
    # -) Make sure the packaging process maintains the relative
    #    timestamps of precompiled standard library packages.
    #    (see dev-lang/go ebuild for details).
    if pkg == 'ex_go':
      continue

    atom = GetPortagePackage(target, pkg)
    cat, pn = atom.split('/')
    ver = GetInstalledPackageVersions(atom, root=root)[0]
    logging.info('packaging %s-%s', atom, ver)

    # pylint: disable=E1101
    dblink = portage.dblink(cat, pn + '-' + ver, myroot=root,
                            settings=portage.settings)
    contents = dblink.getcontents()
    for obj in contents:
      ptype = contents[obj][0]
      if not IsPathPackagable(ptype, obj):
        continue

      if ptype == 'obj':
        # For native ELFs, we need to pull in their dependencies too.
        if FileIsCrosSdkElf(obj):
          elfs.add(obj)
      paths.add(obj)

  return paths, elfs
Пример #4
0
    def dblink(self):
        """Instantiate a L{portage.dbapi.vartree.dblink} object here."""

        if self._dblink is None:
            self._dblink = portage.dblink(
                self.category, "%s-%s" % (self.name, self.fullversion),
                self._settings["ROOT"], self._settings)

        return self._dblink
Пример #5
0
def UpdatePackageContents(change_report, package_cp, portage_root=None):
    """
  Add newly created files/directors to package contents.

  Given an ItemizedChangeReport, add the newly created files and directories
  to the CONTENTS of an installed portage package, such that these files are
  considered owned by that package.

  Arguments:
    changereport: ItemizedChangeReport object for the changes to be
                  made to the package.
    package_cp: A string similar to 'chromeos-base/autotest-tests' giving
                the package category and name of the package to be altered.
    portage_root: Portage root path, corresponding to the board that
                  we are working on. Defaults to '/'
  """
    if portage_root is None:
        portage_root = portage.root  # pylint: disable-msg=E1101
    # Ensure that portage_root ends with trailing slash.
    portage_root = os.path.join(portage_root, "")

    # Create vartree object corresponding to portage_root
    trees = portage.create_trees(portage_root, portage_root)
    vartree = trees[portage_root]["vartree"]

    # List matching installed packages in cpv format
    matching_packages = vartree.dbapi.cp_list(package_cp)

    if not matching_packages:
        raise ValueError("No matching package for %s in portage_root %s" % (package_cp, portage_root))

    if len(matching_packages) > 1:
        raise ValueError("Too many matching packages for %s in portage_root " "%s" % (package_cp, portage_root))

    # Convert string match to package dblink
    package_cpv = matching_packages[0]
    package_split = portage_utilities.SplitCPV(package_cpv)
    package = portage.dblink(
        package_split.category,  # pylint: disable-msg=E1101
        package_split.pv,
        settings=vartree.settings,
        vartree=vartree,
    )

    # Append new contents to package contents dictionary
    contents = package.getcontents().copy()
    for _, filename in change_report.new_files:
        contents.setdefault(filename, (u"obj", "0", "0"))
    for _, dirname in change_report.new_directories:
        # String trailing slashes if present.
        dirname = dirname.rstrip("/")
        contents.setdefault(dirname, (u"dir",))

    # Write new contents dictionary to file
    vartree.dbapi.writeContentsToContentsFile(package, contents)
Пример #6
0
	def dblink(self):
		"""Instantiate a L{portage.dbapi.vartree.dblink} object here."""

		if self._dblink is None:
			self._dblink = portage.dblink(
				self.category,
				"%s-%s" % (self.name, self.fullversion),
				self._settings["ROOT"],
				self._settings
			)

		return self._dblink
Пример #7
0
def GetPackageAPI(portage_root, package_cp):
    """Gets portage API handles for the given package.

  Arguments:
    portage_root: Root directory of portage tree. Eg '/' or '/build/lumpy'
    package_cp:   A string similar to 'chromeos-base/autotest-tests'.

  Returns:
    Returns (package, vartree) tuple, where
      package is of type portage.dbapi.vartree.dblink
      vartree is of type portage.dbapi.vartree.vartree
  """
    if portage_root is None:
        # pylint: disable-msg=E1101
        portage_root = portage.root
    # Ensure that portage_root ends with trailing slash.
    portage_root = os.path.join(portage_root, '')

    # Create a vartree object corresponding to portage_root.
    trees = portage.create_trees(portage_root, portage_root)
    vartree = trees[portage_root]['vartree']

    # List the matching installed packages in cpv format.
    matching_packages = vartree.dbapi.cp_list(package_cp)

    if not matching_packages:
        raise PortagePackageAPIError(
            'No matching package for %s in portage_root '
            '%s' % (package_cp, portage_root))

    if len(matching_packages) > 1:
        raise PortagePackageAPIError('Too many matching packages for %s in '
                                     'portage_root %s' %
                                     (package_cp, portage_root))

    # Convert string match to package dblink.
    package_cpv = matching_packages[0]
    package_split = portage_utilities.SplitCPV(package_cpv)
    # pylint: disable-msg=E1101
    package = portage.dblink(package_split.category,
                             package_split.pv,
                             settings=vartree.settings,
                             vartree=vartree)

    return package, vartree
Пример #8
0
def _GetFilesForTarget(target, root='/'):
    """Locate all the files to package for |target|

  This does not cover ELF dependencies.

  Args:
    target: The toolchain target name
    root: The root path to pull all packages from
  Returns:
    A tuple of a set of all packable paths, and a set of all paths which
    are also native ELFs
  """
    paths = set()
    elfs = set()

    # Find all the files owned by the packages for this target.
    for pkg in GetTargetPackages(target):
        # Ignore packages that are part of the target sysroot.
        if pkg in ('kernel', 'libc'):
            continue

        atom = GetPortagePackage(target, pkg)
        cat, pn = atom.split('/')
        ver = GetInstalledPackageVersions(atom)[0]
        cros_build_lib.Info('packaging %s-%s', atom, ver)

        # pylint: disable=E1101
        dblink = portage.dblink(cat,
                                pn + '-' + ver,
                                myroot=root,
                                settings=portage.settings)
        contents = dblink.getcontents()
        for obj in contents:
            ptype = contents[obj][0]
            if not IsPathPackagable(ptype, obj):
                continue

            if ptype == 'obj':
                # For native ELFs, we need to pull in their dependencies too.
                if FileIsCrosSdkElf(obj):
                    elfs.add(obj)
            paths.add(obj)

    return paths, elfs
Пример #9
0
def GetPackageAPI(portage_root, package_cp):
  """Gets portage API handles for the given package.

  Args:
    portage_root: Root directory of portage tree. Eg '/' or '/build/lumpy'
    package_cp: A string similar to 'chromeos-base/autotest-tests'.

  Returns:
    Returns (package, vartree) tuple, where
      package is of type portage.dbapi.vartree.dblink
      vartree is of type portage.dbapi.vartree.vartree
  """
  if portage_root is None:
    # pylint: disable=no-member
    portage_root = portage.root
  # Ensure that portage_root ends with trailing slash.
  portage_root = os.path.join(portage_root, '')

  # Create a vartree object corresponding to portage_root.
  trees = portage.create_trees(portage_root, portage_root)
  vartree = trees[portage_root]['vartree']

  # List the matching installed packages in cpv format.
  matching_packages = vartree.dbapi.cp_list(package_cp)

  if not matching_packages:
    raise PortagePackageAPIError('No matching package for %s in portage_root '
                                 '%s' % (package_cp, portage_root))

  if len(matching_packages) > 1:
    raise PortagePackageAPIError('Too many matching packages for %s in '
                                 'portage_root %s' % (package_cp,
                                                      portage_root))

  # Convert string match to package dblink.
  package_cpv = matching_packages[0]
  package_split = portage_util.SplitCPV(package_cpv)
  # pylint: disable=no-member
  package = portage.dblink(package_split.category,
                           package_split.pv, settings=vartree.settings,
                           vartree=vartree)

  return package, vartree
def _GetFilesForTarget(target, root='/'):
  """Locate all the files to package for |target|

  This does not cover ELF dependencies.

  Args:
    target: The toolchain target name
    root: The root path to pull all packages from

  Returns:
    A tuple of a set of all packable paths, and a set of all paths which
    are also native ELFs
  """
  paths = set()
  elfs = set()

  # Find all the files owned by the packages for this target.
  for pkg in GetTargetPackages(target):
    # Ignore packages that are part of the target sysroot.
    if pkg in ('kernel', 'libc'):
      continue

    atom = GetPortagePackage(target, pkg)
    cat, pn = atom.split('/')
    ver = GetInstalledPackageVersions(atom)[0]
    cros_build_lib.Info('packaging %s-%s', atom, ver)

    # pylint: disable=E1101
    dblink = portage.dblink(cat, pn + '-' + ver, myroot=root,
                            settings=portage.settings)
    contents = dblink.getcontents()
    for obj in contents:
      ptype = contents[obj][0]
      if not IsPathPackagable(ptype, obj):
        continue

      if ptype == 'obj':
        # For native ELFs, we need to pull in their dependencies too.
        if FileIsCrosSdkElf(obj):
          elfs.add(obj)
      paths.add(obj)

  return paths, elfs
Пример #11
0
    def _spawn(self, args, fd_pipes, **kwargs):
        """
		Fork a subprocess, apply local settings, and call
		dblink.merge(). TODO: Share code with ForkProcess.
		"""

        elog_reader_fd, elog_writer_fd = os.pipe()
        fcntl.fcntl(elog_reader_fd, fcntl.F_SETFL,
                    fcntl.fcntl(elog_reader_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
        blockers = None
        if self.blockers is not None:
            # Query blockers in the main process, since closing
            # of file descriptors in the subprocess can prevent
            # access to open database connections such as that
            # used by the sqlite metadata cache module.
            blockers = self.blockers()
        mylink = portage.dblink(self.mycat,
                                self.mypkg,
                                settings=self.settings,
                                treetype=self.treetype,
                                vartree=self.vartree,
                                blockers=blockers,
                                pipe=elog_writer_fd)
        fd_pipes[elog_writer_fd] = elog_writer_fd
        self._elog_reg_id = self.scheduler.io_add_watch(
            elog_reader_fd, self._registered_events, self._elog_output_handler)

        # If a concurrent emerge process tries to install a package
        # in the same SLOT as this one at the same time, there is an
        # extremely unlikely chance that the COUNTER values will not be
        # ordered correctly unless we lock the vdb here.
        # FEATURES=parallel-install skips this lock in order to
        # improve performance, and the risk is practically negligible.
        self._lock_vdb()
        counter = None
        if not self.unmerge:
            counter = self.vartree.dbapi.counter_tick()

        parent_pid = os.getpid()
        pid = None
        try:
            pid = os.fork()

            if pid != 0:
                if not isinstance(pid, int):
                    raise AssertionError("fork returned non-integer: %s" %
                                         (repr(pid), ))

                os.close(elog_writer_fd)
                self._elog_reader_fd = elog_reader_fd
                self._buf = ""
                self._elog_keys = set()

                # invalidate relevant vardbapi caches
                if self.vartree.dbapi._categories is not None:
                    self.vartree.dbapi._categories = None
                self.vartree.dbapi._pkgs_changed = True
                self.vartree.dbapi._clear_pkg_cache(mylink)

                portage.process.spawned_pids.append(pid)
                return [pid]

            os.close(elog_reader_fd)

            # Use default signal handlers in order to avoid problems
            # killing subprocesses as reported in bug #353239.
            signal.signal(signal.SIGINT, signal.SIG_DFL)
            signal.signal(signal.SIGTERM, signal.SIG_DFL)

            portage.locks._close_fds()
            # We don't exec, so use close_fds=False
            # (see _setup_pipes docstring).
            portage.process._setup_pipes(fd_pipes, close_fds=False)

            portage.output.havecolor = self.settings.get('NOCOLOR') \
             not in ('yes', 'true')

            # Avoid wastful updates of the vdb cache.
            self.vartree.dbapi._flush_cache_enabled = False

            # In this subprocess we don't want PORTAGE_BACKGROUND to
            # suppress stdout/stderr output since they are pipes. We
            # also don't want to open PORTAGE_LOG_FILE, since it will
            # already be opened by the parent process, so we set the
            # "subprocess" value for use in conditional logging code
            # involving PORTAGE_LOG_FILE.
            if not self.unmerge:
                # unmerge phases have separate logs
                if self.settings.get("PORTAGE_BACKGROUND") == "1":
                    self.settings["PORTAGE_BACKGROUND_UNMERGE"] = "1"
                else:
                    self.settings["PORTAGE_BACKGROUND_UNMERGE"] = "0"
                self.settings.backup_changes("PORTAGE_BACKGROUND_UNMERGE")
            self.settings["PORTAGE_BACKGROUND"] = "subprocess"
            self.settings.backup_changes("PORTAGE_BACKGROUND")

            rval = 1
            try:
                if self.unmerge:
                    if not mylink.exists():
                        rval = os.EX_OK
                    elif mylink.unmerge(
                            ldpath_mtimes=self.prev_mtimes) == os.EX_OK:
                        mylink.lockdb()
                        try:
                            mylink.delete()
                        finally:
                            mylink.unlockdb()
                        rval = os.EX_OK
                else:
                    rval = mylink.merge(self.pkgloc,
                                        self.infloc,
                                        myebuild=self.myebuild,
                                        mydbapi=self.mydbapi,
                                        prev_mtimes=self.prev_mtimes,
                                        counter=counter)
            except SystemExit:
                raise
            except:
                traceback.print_exc()
            finally:
                os._exit(rval)

        finally:
            if pid == 0 or (pid is None and os.getpid() != parent_pid):
                # Call os._exit() from a finally block in order
                # to suppress any finally blocks from earlier
                # in the call stack (see bug #345289). This
                # finally block has to be setup before the fork
                # in order to avoid a race condition.
                os._exit(1)
Пример #12
0
	def _spawn(self, args, fd_pipes, **kwargs):
		"""
		Fork a subprocess, apply local settings, and call
		dblink.merge(). TODO: Share code with ForkProcess.
		"""

		elog_reader_fd, elog_writer_fd = os.pipe()
		fcntl.fcntl(elog_reader_fd, fcntl.F_SETFL,
			fcntl.fcntl(elog_reader_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
		blockers = None
		if self.blockers is not None:
			# Query blockers in the main process, since closing
			# of file descriptors in the subprocess can prevent
			# access to open database connections such as that
			# used by the sqlite metadata cache module.
			blockers = self.blockers()
		mylink = portage.dblink(self.mycat, self.mypkg, settings=self.settings,
			treetype=self.treetype, vartree=self.vartree,
			blockers=blockers, pipe=elog_writer_fd)
		fd_pipes[elog_writer_fd] = elog_writer_fd
		self._elog_reg_id = self.scheduler.io_add_watch(elog_reader_fd,
			self._registered_events, self._elog_output_handler)

		# If a concurrent emerge process tries to install a package
		# in the same SLOT as this one at the same time, there is an
		# extremely unlikely chance that the COUNTER values will not be
		# ordered correctly unless we lock the vdb here.
		# FEATURES=parallel-install skips this lock in order to
		# improve performance, and the risk is practically negligible.
		self._lock_vdb()
		counter = None
		if not self.unmerge:
			counter = self.vartree.dbapi.counter_tick()

		parent_pid = os.getpid()
		pid = None
		try:
			pid = os.fork()

			if pid != 0:
				if not isinstance(pid, int):
					raise AssertionError(
						"fork returned non-integer: %s" % (repr(pid),))

				os.close(elog_writer_fd)
				self._elog_reader_fd = elog_reader_fd
				self._buf = ""
				self._elog_keys = set()

				# invalidate relevant vardbapi caches
				if self.vartree.dbapi._categories is not None:
					self.vartree.dbapi._categories = None
				self.vartree.dbapi._pkgs_changed = True
				self.vartree.dbapi._clear_pkg_cache(mylink)

				portage.process.spawned_pids.append(pid)
				return [pid]

			os.close(elog_reader_fd)

			# Use default signal handlers in order to avoid problems
			# killing subprocesses as reported in bug #353239.
			signal.signal(signal.SIGINT, signal.SIG_DFL)
			signal.signal(signal.SIGTERM, signal.SIG_DFL)

			portage.locks._close_fds()
			# We don't exec, so use close_fds=False
			# (see _setup_pipes docstring).
			portage.process._setup_pipes(fd_pipes, close_fds=False)

			portage.output.havecolor = self.settings.get('NOCOLOR') \
				not in ('yes', 'true')

			# Avoid wastful updates of the vdb cache.
			self.vartree.dbapi._flush_cache_enabled = False

			# In this subprocess we don't want PORTAGE_BACKGROUND to
			# suppress stdout/stderr output since they are pipes. We
			# also don't want to open PORTAGE_LOG_FILE, since it will
			# already be opened by the parent process, so we set the
			# "subprocess" value for use in conditional logging code
			# involving PORTAGE_LOG_FILE.
			if not self.unmerge:
				# unmerge phases have separate logs
				if self.settings.get("PORTAGE_BACKGROUND") == "1":
					self.settings["PORTAGE_BACKGROUND_UNMERGE"] = "1"
				else:
					self.settings["PORTAGE_BACKGROUND_UNMERGE"] = "0"
				self.settings.backup_changes("PORTAGE_BACKGROUND_UNMERGE")
			self.settings["PORTAGE_BACKGROUND"] = "subprocess"
			self.settings.backup_changes("PORTAGE_BACKGROUND")

			rval = 1
			try:
				if self.unmerge:
					if not mylink.exists():
						rval = os.EX_OK
					elif mylink.unmerge(
						ldpath_mtimes=self.prev_mtimes) == os.EX_OK:
						mylink.lockdb()
						try:
							mylink.delete()
						finally:
							mylink.unlockdb()
						rval = os.EX_OK
				else:
					rval = mylink.merge(self.pkgloc, self.infloc,
						myebuild=self.myebuild, mydbapi=self.mydbapi,
						prev_mtimes=self.prev_mtimes, counter=counter)
			except SystemExit:
				raise
			except:
				traceback.print_exc()
			finally:
				os._exit(rval)

		finally:
			if pid == 0 or (pid is None and os.getpid() != parent_pid):
				# Call os._exit() from a finally block in order
				# to suppress any finally blocks from earlier
				# in the call stack (see bug #345289). This
				# finally block has to be setup before the fork
				# in order to avoid a race condition.
				os._exit(1)
Пример #13
0
	def _spawn(self, args, fd_pipes, **kwargs):
		"""
		Fork a subprocess, apply local settings, and call
		dblink.merge().
		"""

		elog_reader_fd, elog_writer_fd = os.pipe()
		fcntl.fcntl(elog_reader_fd, fcntl.F_SETFL,
			fcntl.fcntl(elog_reader_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
		blockers = None
		if self.blockers is not None:
			# Query blockers in the main process, since closing
			# of file descriptors in the subprocess can prevent
			# access to open database connections such as that
			# used by the sqlite metadata cache module.
			blockers = self.blockers()
		mylink = portage.dblink(self.mycat, self.mypkg, settings=self.settings,
			treetype=self.treetype, vartree=self.vartree,
			blockers=blockers, scheduler=self.scheduler,
			pipe=elog_writer_fd)
		fd_pipes[elog_writer_fd] = elog_writer_fd
		self._elog_reg_id = self.scheduler.register(elog_reader_fd,
			self._registered_events, self._elog_output_handler)

		# If a concurrent emerge process tries to install a package
		# in the same SLOT as this one at the same time, there is an
		# extremely unlikely chance that the COUNTER values will not be
		# ordered correctly unless we lock the vdb here.
		# FEATURES=parallel-install skips this lock in order to
		# improve performance, and the risk is practically negligible.
		self._lock_vdb()
		counter = None
		if not self.unmerge:
			counter = self.vartree.dbapi.counter_tick()

		pid = os.fork()
		if pid != 0:
			if not isinstance(pid, int):
				raise AssertionError(
					"fork returned non-integer: %s" % (repr(pid),))

			os.close(elog_writer_fd)
			self._elog_reader_fd = elog_reader_fd
			self._buf = ""
			self._elog_keys = set()

			# invalidate relevant vardbapi caches
			if self.vartree.dbapi._categories is not None:
				self.vartree.dbapi._categories = None
			self.vartree.dbapi._pkgs_changed = True
			self.vartree.dbapi._clear_pkg_cache(mylink)

			portage.process.spawned_pids.append(pid)
			return [pid]

		os.close(elog_reader_fd)

		# TODO: Find out why PyPy 1.8 with close_fds=True triggers
		# "[Errno 9] Bad file descriptor" in subprocesses. It could
		# be due to garbage collection of file objects that were not
		# closed before going out of scope, since PyPy's garbage
		# collector does not support the refcounting semantics that
		# CPython does.
		close_fds = platform.python_implementation() != 'PyPy'
		portage.process._setup_pipes(fd_pipes, close_fds=close_fds)

		# Use default signal handlers since the ones inherited
		# from the parent process are irrelevant here.
		signal.signal(signal.SIGINT, signal.SIG_DFL)
		signal.signal(signal.SIGTERM, signal.SIG_DFL)

		portage.output.havecolor = self.settings.get('NOCOLOR') \
			not in ('yes', 'true')

		# In this subprocess we want mylink._display_merge() to use
		# stdout/stderr directly since they are pipes. This behavior
		# is triggered when mylink._scheduler is None.
		mylink._scheduler = None

		# Avoid wastful updates of the vdb cache.
		self.vartree.dbapi._flush_cache_enabled = False

		# In this subprocess we don't want PORTAGE_BACKGROUND to
		# suppress stdout/stderr output since they are pipes. We
		# also don't want to open PORTAGE_LOG_FILE, since it will
		# already be opened by the parent process, so we set the
		# "subprocess" value for use in conditional logging code
		# involving PORTAGE_LOG_FILE.
		if not self.unmerge:
			# unmerge phases have separate logs
			if self.settings.get("PORTAGE_BACKGROUND") == "1":
				self.settings["PORTAGE_BACKGROUND_UNMERGE"] = "1"
			else:
				self.settings["PORTAGE_BACKGROUND_UNMERGE"] = "0"
			self.settings.backup_changes("PORTAGE_BACKGROUND_UNMERGE")
		self.settings["PORTAGE_BACKGROUND"] = "subprocess"
		self.settings.backup_changes("PORTAGE_BACKGROUND")

		rval = 1
		try:
			if self.unmerge:
				if not mylink.exists():
					rval = os.EX_OK
				elif mylink.unmerge(
					ldpath_mtimes=self.prev_mtimes) == os.EX_OK:
					mylink.lockdb()
					try:
						mylink.delete()
					finally:
						mylink.unlockdb()
					rval = os.EX_OK
			else:
				rval = mylink.merge(self.pkgloc, self.infloc,
					myebuild=self.myebuild, mydbapi=self.mydbapi,
					prev_mtimes=self.prev_mtimes, counter=counter)
		except SystemExit:
			raise
		except:
			traceback.print_exc()
		finally:
			# Call os._exit() from finally block, in order to suppress any
			# finally blocks from earlier in the call stack. See bug #345289.
			os._exit(rval)
Пример #14
0
    def _spawn(self, args, fd_pipes, **kwargs):
        """
		Fork a subprocess, apply local settings, and call
		dblink.merge().
		"""

        elog_reader_fd, elog_writer_fd = os.pipe()
        fcntl.fcntl(elog_reader_fd, fcntl.F_SETFL,
                    fcntl.fcntl(elog_reader_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
        blockers = None
        if self.blockers is not None:
            # Query blockers in the main process, since closing
            # of file descriptors in the subprocess can prevent
            # access to open database connections such as that
            # used by the sqlite metadata cache module.
            blockers = self.blockers()
        mylink = portage.dblink(self.mycat,
                                self.mypkg,
                                settings=self.settings,
                                treetype=self.treetype,
                                vartree=self.vartree,
                                blockers=blockers,
                                scheduler=self.scheduler,
                                pipe=elog_writer_fd)
        fd_pipes[elog_writer_fd] = elog_writer_fd
        self._elog_reg_id = self.scheduler.register(elog_reader_fd,
                                                    self._registered_events,
                                                    self._elog_output_handler)

        # If a concurrent emerge process tries to install a package
        # in the same SLOT as this one at the same time, there is an
        # extremely unlikely chance that the COUNTER values will not be
        # ordered correctly unless we lock the vdb here.
        # FEATURES=parallel-install skips this lock in order to
        # improve performance, and the risk is practically negligible.
        self._lock_vdb()
        counter = None
        if not self.unmerge:
            counter = self.vartree.dbapi.counter_tick()

        pid = os.fork()
        if pid != 0:
            if not isinstance(pid, int):
                raise AssertionError("fork returned non-integer: %s" %
                                     (repr(pid), ))

            os.close(elog_writer_fd)
            self._elog_reader_fd = elog_reader_fd
            self._buf = ""
            self._elog_keys = set()

            # invalidate relevant vardbapi caches
            if self.vartree.dbapi._categories is not None:
                self.vartree.dbapi._categories = None
            self.vartree.dbapi._pkgs_changed = True
            self.vartree.dbapi._clear_pkg_cache(mylink)

            portage.process.spawned_pids.append(pid)
            return [pid]

        os.close(elog_reader_fd)

        # TODO: Find out why PyPy 1.8 with close_fds=True triggers
        # "[Errno 9] Bad file descriptor" in subprocesses. It could
        # be due to garbage collection of file objects that were not
        # closed before going out of scope, since PyPy's garbage
        # collector does not support the refcounting semantics that
        # CPython does.
        close_fds = platform.python_implementation() != 'PyPy'
        portage.process._setup_pipes(fd_pipes, close_fds=close_fds)

        # Use default signal handlers since the ones inherited
        # from the parent process are irrelevant here.
        signal.signal(signal.SIGINT, signal.SIG_DFL)
        signal.signal(signal.SIGTERM, signal.SIG_DFL)

        portage.output.havecolor = self.settings.get('NOCOLOR') \
         not in ('yes', 'true')

        # In this subprocess we want mylink._display_merge() to use
        # stdout/stderr directly since they are pipes. This behavior
        # is triggered when mylink._scheduler is None.
        mylink._scheduler = None

        # Avoid wastful updates of the vdb cache.
        self.vartree.dbapi._flush_cache_enabled = False

        # In this subprocess we don't want PORTAGE_BACKGROUND to
        # suppress stdout/stderr output since they are pipes. We
        # also don't want to open PORTAGE_LOG_FILE, since it will
        # already be opened by the parent process, so we set the
        # "subprocess" value for use in conditional logging code
        # involving PORTAGE_LOG_FILE.
        if not self.unmerge:
            # unmerge phases have separate logs
            if self.settings.get("PORTAGE_BACKGROUND") == "1":
                self.settings["PORTAGE_BACKGROUND_UNMERGE"] = "1"
            else:
                self.settings["PORTAGE_BACKGROUND_UNMERGE"] = "0"
            self.settings.backup_changes("PORTAGE_BACKGROUND_UNMERGE")
        self.settings["PORTAGE_BACKGROUND"] = "subprocess"
        self.settings.backup_changes("PORTAGE_BACKGROUND")

        rval = 1
        try:
            if self.unmerge:
                if not mylink.exists():
                    rval = os.EX_OK
                elif mylink.unmerge(
                        ldpath_mtimes=self.prev_mtimes) == os.EX_OK:
                    mylink.lockdb()
                    try:
                        mylink.delete()
                    finally:
                        mylink.unlockdb()
                    rval = os.EX_OK
            else:
                rval = mylink.merge(self.pkgloc,
                                    self.infloc,
                                    myebuild=self.myebuild,
                                    mydbapi=self.mydbapi,
                                    prev_mtimes=self.prev_mtimes,
                                    counter=counter)
        except SystemExit:
            raise
        except:
            traceback.print_exc()
        finally:
            # Call os._exit() from finally block, in order to suppress any
            # finally blocks from earlier in the call stack. See bug #345289.
            os._exit(rval)
Пример #15
0
def pkgmerge(mytbz2, myroot, mysettings, mydbapi=None, vartree=None, prev_mtimes=None, blockers=None):
    """will merge a .tbz2 file, returning a list of runtime dependencies
		that must be satisfied, or None if there was a merge error.	This
		code assumes the package exists."""

    warnings.warn("portage.pkgmerge() is deprecated", DeprecationWarning, stacklevel=2)

    if mydbapi is None:
        mydbapi = portage.db[myroot]["bintree"].dbapi
    if vartree is None:
        vartree = portage.db[myroot]["vartree"]
    if mytbz2[-5:] != ".tbz2":
        print(_("!!! Not a .tbz2 file"))
        return 1

    tbz2_lock = None
    mycat = None
    mypkg = None
    did_merge_phase = False
    success = False
    try:
        """ Don't lock the tbz2 file because the filesytem could be readonly or
		shared by a cluster."""
        # tbz2_lock = portage.locks.lockfile(mytbz2, wantnewlockfile=1)

        mypkg = os.path.basename(mytbz2)[:-5]
        xptbz2 = portage.xpak.tbz2(mytbz2)
        mycat = xptbz2.getfile(_unicode_encode("CATEGORY", encoding=_encodings["repo.content"]))
        if not mycat:
            writemsg(_("!!! CATEGORY info missing from info chunk, aborting...\n"), noiselevel=-1)
            return 1
        mycat = _unicode_decode(mycat, encoding=_encodings["repo.content"], errors="replace")
        mycat = mycat.strip()

        # These are the same directories that would be used at build time.
        builddir = os.path.join(mysettings["PORTAGE_TMPDIR"], "portage", mycat, mypkg)
        catdir = os.path.dirname(builddir)
        pkgloc = os.path.join(builddir, "image")
        infloc = os.path.join(builddir, "build-info")
        myebuild = os.path.join(infloc, os.path.basename(mytbz2)[:-4] + "ebuild")
        portage.util.ensure_dirs(os.path.dirname(catdir), uid=portage_uid, gid=portage_gid, mode=0o70, mask=0)
        portage.util.ensure_dirs(catdir, uid=portage_uid, gid=portage_gid, mode=0o70, mask=0)
        try:
            shutil.rmtree(builddir)
        except (IOError, OSError) as e:
            if e.errno != errno.ENOENT:
                raise
            del e
        for mydir in (builddir, pkgloc, infloc):
            portage.util.ensure_dirs(mydir, uid=portage_uid, gid=portage_gid, mode=0o755)
        writemsg_stdout(_(">>> Extracting info\n"))
        xptbz2.unpackinfo(infloc)
        mysettings.setcpv(mycat + "/" + mypkg, mydb=mydbapi)
        # Store the md5sum in the vdb.
        fp = open(_unicode_encode(os.path.join(infloc, "BINPKGMD5")), "w")
        fp.write(str(portage.checksum.perform_md5(mytbz2)) + "\n")
        fp.close()

        # This gives bashrc users an opportunity to do various things
        # such as remove binary packages after they're installed.
        mysettings["PORTAGE_BINPKG_FILE"] = mytbz2
        mysettings.backup_changes("PORTAGE_BINPKG_FILE")
        debug = mysettings.get("PORTAGE_DEBUG", "") == "1"

        # Eventually we'd like to pass in the saved ebuild env here.
        retval = portage.doebuild(
            myebuild, "setup", myroot, mysettings, debug=debug, tree="bintree", mydbapi=mydbapi, vartree=vartree
        )
        if retval != os.EX_OK:
            writemsg(_("!!! Setup failed: %s\n") % retval, noiselevel=-1)
            return retval

        writemsg_stdout(_(">>> Extracting %s\n") % mypkg)
        retval = portage.process.spawn_bash(
            "bzip2 -dqc -- '%s' | tar -xp -C '%s' -f -" % (mytbz2, pkgloc), env=mysettings.environ()
        )
        if retval != os.EX_OK:
            writemsg(_("!!! Error Extracting '%s'\n") % mytbz2, noiselevel=-1)
            return retval
            # portage.locks.unlockfile(tbz2_lock)
            # tbz2_lock = None

        mylink = portage.dblink(
            mycat, mypkg, myroot, mysettings, vartree=vartree, treetype="bintree", blockers=blockers
        )
        retval = mylink.merge(pkgloc, infloc, myroot, myebuild, cleanup=0, mydbapi=mydbapi, prev_mtimes=prev_mtimes)
        did_merge_phase = True
        success = retval == os.EX_OK
        return retval
    finally:
        mysettings.pop("PORTAGE_BINPKG_FILE", None)
        if tbz2_lock:
            portage.locks.unlockfile(tbz2_lock)
        if True:
            if not did_merge_phase:
                # The merge phase handles this already.  Callers don't know how
                # far this function got, so we have to call elog_process() here
                # so that it's only called once.
                from portage.elog import elog_process

                elog_process(mycat + "/" + mypkg, mysettings)
            try:
                if success:
                    shutil.rmtree(builddir)
            except (IOError, OSError) as e:
                if e.errno != errno.ENOENT:
                    raise
                del e
def get_contents(cpv):
    cpv = portage.catsplit(cpv)
    return set(portage.dblink(cpv[0], cpv[1], root, settings).getcontents().keys())
Пример #17
0
def pkgmerge(mytbz2,
             myroot,
             mysettings,
             mydbapi=None,
             vartree=None,
             prev_mtimes=None,
             blockers=None):
    """will merge a .tbz2 file, returning a list of runtime dependencies
		that must be satisfied, or None if there was a merge error.	This
		code assumes the package exists."""

    warnings.warn("portage.pkgmerge() is deprecated",
                  DeprecationWarning,
                  stacklevel=2)

    if mydbapi is None:
        mydbapi = portage.db[myroot]["bintree"].dbapi
    if vartree is None:
        vartree = portage.db[myroot]["vartree"]
    if mytbz2[-5:] != ".tbz2":
        print(_("!!! Not a .tbz2 file"))
        return 1

    tbz2_lock = None
    mycat = None
    mypkg = None
    did_merge_phase = False
    success = False
    try:
        """ Don't lock the tbz2 file because the filesytem could be readonly or
		shared by a cluster."""
        #tbz2_lock = portage.locks.lockfile(mytbz2, wantnewlockfile=1)

        mypkg = os.path.basename(mytbz2)[:-5]
        xptbz2 = portage.xpak.tbz2(mytbz2)
        mycat = xptbz2.getfile(
            _unicode_encode("CATEGORY", encoding=_encodings['repo.content']))
        if not mycat:
            writemsg(
                _("!!! CATEGORY info missing from info chunk, aborting...\n"),
                noiselevel=-1)
            return 1
        mycat = _unicode_decode(mycat,
                                encoding=_encodings['repo.content'],
                                errors='replace')
        mycat = mycat.strip()

        # These are the same directories that would be used at build time.
        builddir = os.path.join(mysettings["PORTAGE_TMPDIR"], "portage", mycat,
                                mypkg)
        catdir = os.path.dirname(builddir)
        pkgloc = os.path.join(builddir, "image")
        infloc = os.path.join(builddir, "build-info")
        myebuild = os.path.join(infloc,
                                os.path.basename(mytbz2)[:-4] + "ebuild")
        portage.util.ensure_dirs(os.path.dirname(catdir),
                                 uid=portage_uid,
                                 gid=portage_gid,
                                 mode=0o70,
                                 mask=0)
        portage.util.ensure_dirs(catdir,
                                 uid=portage_uid,
                                 gid=portage_gid,
                                 mode=0o70,
                                 mask=0)
        try:
            shutil.rmtree(builddir)
        except (IOError, OSError) as e:
            if e.errno != errno.ENOENT:
                raise
            del e
        for mydir in (builddir, pkgloc, infloc):
            portage.util.ensure_dirs(mydir,
                                     uid=portage_uid,
                                     gid=portage_gid,
                                     mode=0o755)
        writemsg_stdout(_(">>> Extracting info\n"))
        xptbz2.unpackinfo(infloc)
        mysettings.setcpv(mycat + "/" + mypkg, mydb=mydbapi)
        # Store the md5sum in the vdb.
        fp = open(_unicode_encode(os.path.join(infloc, 'BINPKGMD5')), 'w')
        fp.write(str(portage.checksum.perform_md5(mytbz2)) + "\n")
        fp.close()

        # This gives bashrc users an opportunity to do various things
        # such as remove binary packages after they're installed.
        mysettings["PORTAGE_BINPKG_FILE"] = mytbz2
        mysettings.backup_changes("PORTAGE_BINPKG_FILE")
        debug = mysettings.get("PORTAGE_DEBUG", "") == "1"

        # Eventually we'd like to pass in the saved ebuild env here.
        retval = portage.doebuild(myebuild,
                                  "setup",
                                  myroot,
                                  mysettings,
                                  debug=debug,
                                  tree="bintree",
                                  mydbapi=mydbapi,
                                  vartree=vartree)
        if retval != os.EX_OK:
            writemsg(_("!!! Setup failed: %s\n") % retval, noiselevel=-1)
            return retval

        writemsg_stdout(_(">>> Extracting %s\n") % mypkg)
        retval = portage.process.spawn_bash(
            "bzip2 -dqc -- '%s' | tar -xp -C '%s' -f -" % (mytbz2, pkgloc),
            env=mysettings.environ())
        if retval != os.EX_OK:
            writemsg(_("!!! Error Extracting '%s'\n") % mytbz2, noiselevel=-1)
            return retval
        #portage.locks.unlockfile(tbz2_lock)
        #tbz2_lock = None

        mylink = portage.dblink(mycat,
                                mypkg,
                                myroot,
                                mysettings,
                                vartree=vartree,
                                treetype="bintree",
                                blockers=blockers)
        retval = mylink.merge(pkgloc,
                              infloc,
                              myroot,
                              myebuild,
                              cleanup=0,
                              mydbapi=mydbapi,
                              prev_mtimes=prev_mtimes)
        did_merge_phase = True
        success = retval == os.EX_OK
        return retval
    finally:
        mysettings.pop("PORTAGE_BINPKG_FILE", None)
        if tbz2_lock:
            portage.locks.unlockfile(tbz2_lock)
        if True:
            if not did_merge_phase:
                # The merge phase handles this already.  Callers don't know how
                # far this function got, so we have to call elog_process() here
                # so that it's only called once.
                from portage.elog import elog_process
                elog_process(mycat + "/" + mypkg, mysettings)
            try:
                if success:
                    shutil.rmtree(builddir)
            except (IOError, OSError) as e:
                if e.errno != errno.ENOENT:
                    raise
                del e
Пример #18
0
def get_contents(cpv):
    cpv = portage.catsplit(cpv)
    return set(
        portage.dblink(cpv[0], cpv[1], root, settings).getcontents().keys())
Пример #19
0
 def get_contents(self, c, p, v):
     dbl = portage.dblink(c, '%s-%s' % (p, v), self._settings['ROOT'],
                          self._settings)
     return dbl.getcontents()
def _GetFilesForTarget(target, root='/'):
  """Locate all the files to package for |target|

  This does not cover ELF dependencies.

  Args:
    target: The toolchain target name
    root: The root path to pull all packages from

  Returns:
    A tuple of a set of all packable paths, and a set of all paths which
    are also native ELFs
  """
  paths = set()
  elfs = set()

  # Find all the files owned by the packages for this target.
  for pkg in GetTargetPackages(target):

    # Skip Go compiler from redistributable packages.
    # The "go" executable has GOROOT=/usr/lib/go/${CTARGET} hardcoded
    # into it. Due to this, the toolchain cannot be unpacked anywhere
    # else and be readily useful. To enable packaging Go, we need to:
    # -) Tweak the wrappers/environment to override GOROOT
    #    automatically based on the unpack location.
    # -) Make sure the ELF dependency checking and wrapping logic
    #    below skips the Go toolchain executables and libraries.
    # -) Make sure the packaging process maintains the relative
    #    timestamps of precompiled standard library packages.
    #    (see dev-lang/go ebuild for details).
    if pkg == 'ex_go':
      continue

    # Use armv7a-cros-linux-gnueabi/compiler-rt for
    # armv7a-cros-linux-gnueabihf/compiler-rt.
    # Currently the armv7a-cros-linux-gnueabi is actually
    # the same as armv7a-cros-linux-gnueabihf with different names.
    # Because of that, for compiler-rt, it generates the same binary in
    # the same location. To avoid the installation conflict, we do not
    # install anything for 'armv7a-cros-linux-gnueabihf'. This would cause
    # problem if other people try to use standalone armv7a-cros-linux-gnueabihf
    # toolchain.
    if 'compiler-rt' in pkg and 'armv7a-cros-linux-gnueabi' in target:
      atom = GetPortagePackage(target, pkg)
      cat, pn = atom.split('/')
      ver = GetInstalledPackageVersions(atom, root=root)[0]
      dblink = portage.dblink(cat, pn + '-' + ver, myroot=root,
                              settings=portage.settings)
      contents = dblink.getcontents()
      if not contents:
        if 'hf' in target:
          new_target = 'armv7a-cros-linux-gnueabi'
        else:
          new_target = 'armv7a-cros-linux-gnueabihf'
        atom = GetPortagePackage(new_target, pkg)
    else:
      atom = GetPortagePackage(target, pkg)

    cat, pn = atom.split('/')
    ver = GetInstalledPackageVersions(atom, root=root)[0]
    logging.info('packaging %s-%s', atom, ver)

    dblink = portage.dblink(cat, pn + '-' + ver, myroot=root,
                            settings=portage.settings)
    contents = dblink.getcontents()
    for obj in contents:
      ptype = contents[obj][0]
      if not IsPathPackagable(ptype, obj):
        continue

      if ptype == 'obj':
        # For native ELFs, we need to pull in their dependencies too.
        if FileIsCrosSdkElf(obj):
          logging.debug('Adding ELF %s', obj)
          elfs.add(obj)
      logging.debug('Adding path %s', obj)
      paths.add(obj)

  return paths, elfs