Exemplo n.º 1
0
	def _start_unmerge(self, lock_task):
		self._assert_current(lock_task)
		if lock_task.cancelled:
			self._default_final_exit(lock_task)
			return

		lock_task.future.result()
		portage.prepare_build_dirs(
			settings=self.settings, cleanup=True)

		# Output only gets logged if it comes after prepare_build_dirs()
		# which initializes PORTAGE_LOG_FILE.
		retval, pkgmap = _unmerge_display(self.pkg.root_config,
			self.opts, "unmerge", [self.pkg.cpv], clean_delay=0,
			writemsg_level=self._writemsg_level)

		if retval != os.EX_OK:
			self._async_unlock_builddir(returncode=retval)
			return

		self._writemsg_level(">>> Unmerging %s...\n" % (self.pkg.cpv,),
			noiselevel=-1)
		self._emergelog("=== Unmerging... (%s)" % (self.pkg.cpv,))

		cat, pf = portage.catsplit(self.pkg.cpv)
		unmerge_task = MergeProcess(
			mycat=cat, mypkg=pf, settings=self.settings,
			treetype="vartree", vartree=self.pkg.root_config.trees["vartree"],
			scheduler=self.scheduler, background=self.background,
			mydbapi=self.pkg.root_config.trees["vartree"].dbapi,
			prev_mtimes=self.ldpath_mtimes,
			logfile=self.settings.get("PORTAGE_LOG_FILE"), unmerge=True)

		self._start_task(unmerge_task, self._unmerge_exit)
Exemplo n.º 2
0
	def _start(self):

		vardb = self.pkg.root_config.trees["vartree"].dbapi
		dbdir = vardb.getpath(self.pkg.cpv)
		if not os.path.exists(dbdir):
			# Apparently the package got uninstalled
			# already, so we can safely return early.
			self.returncode = os.EX_OK
			self._async_wait()
			return

		self.settings.setcpv(self.pkg)
		cat, pf = portage.catsplit(self.pkg.cpv)
		myebuildpath = os.path.join(dbdir, pf + ".ebuild")

		try:
			portage.doebuild_environment(myebuildpath, "prerm",
				settings=self.settings, db=vardb)
		except UnsupportedAPIException:
			# This is safe to ignore since this function is
			# guaranteed to set PORTAGE_BUILDDIR even though
			# it raises UnsupportedAPIException. The error
			# will be logged when it prevents the pkg_prerm
			# and pkg_postrm phases from executing.
			pass

		self._builddir_lock = EbuildBuildDir(
			scheduler=self.scheduler, settings=self.settings)
		self._builddir_lock.lock()

		portage.prepare_build_dirs(
			settings=self.settings, cleanup=True)

		# Output only gets logged if it comes after prepare_build_dirs()
		# which initializes PORTAGE_LOG_FILE.
		retval, pkgmap = _unmerge_display(self.pkg.root_config,
			self.opts, "unmerge", [self.pkg.cpv], clean_delay=0,
			writemsg_level=self._writemsg_level)

		if retval != os.EX_OK:
			self._builddir_lock.unlock()
			self.returncode = retval
			self._async_wait()
			return

		self._writemsg_level(">>> Unmerging %s...\n" % (self.pkg.cpv,),
			noiselevel=-1)
		self._emergelog("=== Unmerging... (%s)" % (self.pkg.cpv,))

		unmerge_task = MergeProcess(
			mycat=cat, mypkg=pf, settings=self.settings,
			treetype="vartree", vartree=self.pkg.root_config.trees["vartree"],
			scheduler=self.scheduler, background=self.background,
			mydbapi=self.pkg.root_config.trees["vartree"].dbapi,
			prev_mtimes=self.ldpath_mtimes,
			logfile=self.settings.get("PORTAGE_LOG_FILE"), unmerge=True)

		self._start_task(unmerge_task, self._unmerge_exit)
Exemplo n.º 3
0
    def _start_unmerge(self, lock_task):
        self._assert_current(lock_task)
        if lock_task.cancelled:
            self._default_final_exit(lock_task)
            return

        lock_task.future.result()
        portage.prepare_build_dirs(settings=self.settings, cleanup=True)

        # Output only gets logged if it comes after prepare_build_dirs()
        # which initializes PORTAGE_LOG_FILE.
        retval, _ = _unmerge_display(
            self.pkg.root_config,
            self.opts,
            "unmerge",
            [self.pkg.cpv],
            clean_delay=0,
            writemsg_level=self._writemsg_level,
        )

        if retval != os.EX_OK:
            self._async_unlock_builddir(returncode=retval)
            return

        self._writemsg_level(">>> Unmerging %s...\n" % (self.pkg.cpv, ),
                             noiselevel=-1)
        self._emergelog("=== Unmerging... (%s)" % (self.pkg.cpv, ))

        cat, pf = portage.catsplit(self.pkg.cpv)
        unmerge_task = MergeProcess(
            mycat=cat,
            mypkg=pf,
            settings=self.settings,
            treetype="vartree",
            vartree=self.pkg.root_config.trees["vartree"],
            scheduler=self.scheduler,
            background=self.background,
            mydbapi=self.pkg.root_config.trees["vartree"].dbapi,
            prev_mtimes=self.ldpath_mtimes,
            logfile=self.settings.get("PORTAGE_LOG_FILE"),
            unmerge=True,
        )

        self._start_task(unmerge_task, self._unmerge_exit)
Exemplo n.º 4
0
    def testUnmergeOrder(self):
        ebuilds = {
            "c/x-1": {},
            "c/y-2": {},
            "c/y-3": {},
            "c/z-4": {},
            "c/z-5": {},
            "c/z-6": {},
            "c/zz-4": {},
            "c/zz-5": {},
            "c/zz-6": {},
        }
        installed = {
            "c/x-1": {},
            "c/y-2": {},
            "c/z-4": {},
            "c/z-5": {},
            "c/z-6": {},
            "c/zz-4": {},
            "c/zz-5": {},
            "c/zz-6": {},
        }
        test_cases = (

            # cp = category/package
            # cpv = category/package-version

            # Single cpv atom, representing the only available instance of the cp.
            # The pkgmap should contain exactly that cpv and no omitted packages.
            _TestData(["c/x-1"], [(["c/x-1"], [])]),

            # Single cp atom. The pkgmap should contain the only available cpv to
            # which the cp expands, no omitted packages.
            _TestData(["c/x"], [(["c/x-1"], [])]),

            # Duplicate cpv atom, representing the only available instance of the cp.
            # The pkgmap should contain the cpv with no omitted packages, and an empty
            # entry representing the duplicate.
            _TestData(["c/x-1", "c/x-1"], [(["c/x-1"], []), ([], [])]),

            # Duplicate cp atom, representing the only available instance. The pkgmap
            # should contain the only available cpv to which the cp expands, with no
            # omitted packages, and a second empty entry representing the duplicate.
            _TestData(["c/x", "c/x"], [(["c/x-1"], []), ([], [])]),

            # Single cpv atom, representing one of the two available instances. The
            # pkgmap should contain exactly that cpv. Since the other instance is not
            # installed, there should be no omitted packages.
            _TestData(["c/y-2"], [(["c/y-2"], [])]),

            # Single cp atom. The pkgmap should contain exactly the only installed
            # instance and no omitted packages.
            _TestData(["c/y"], [(["c/y-2"], [])]),

            # Single cpv atom, representing one of the three available instances.
            # The pkgmap should contain exactly the cpv. Since all three instances
            # are installed, the other two instances should be in the omitted packages.
            _TestData(["c/z-4"], [(["c/z-4"], ["c/z-5", "c/z-6"])]),

            # Single cp atom. The pkgmap should contain all three installed instances.
            # Since there are no other installed instances, there should be no omitted
            # packages.
            _TestData(["c/z"], [(["c/z-4", "c/z-5", "c/z-6"], [])]),

            # Two cpv atoms belonging to the same cp. The pkgmap should contain an
            # entry for each cpv, in the same order. The third installed cpv belonging
            # to the cp should be listed in the omitted section of each entry.
            _TestData(["c/z-4", "c/z-5"], [(["c/z-4"], ["c/z-6"]),
                                           (["c/z-5"], ["c/z-6"])]),
            _TestData(["c/z-5", "c/z-4"], [(["c/z-5"], ["c/z-6"]),
                                           (["c/z-4"], ["c/z-6"])]),

            # Three cpv atoms belonging to the same cp. The pkgmap should contain an
            # entry for each cpv, in the same order. Since there are no other instances
            # of the cp, the omitted section of each entry should be empty.
            _TestData(["c/z-4", "c/z-5", "c/z-6"], [(["c/z-4"], []),
                                                    (["c/z-5"], []),
                                                    (["c/z-6"], [])]),
            _TestData(["c/z-6", "c/z-5", "c/z-4"], [(["c/z-6"], []),
                                                    (["c/z-5"], []),
                                                    (["c/z-4"], [])]),

            # First a cp atom, then a cpv atom that is an instance of the cp. The
            # pkgmap should contain an entry containing all installed cpv's that the cp
            # expands to, in sorted order. It should then contain an empty entry
            # representing the input cpv that is already covered by the expansion of
            # the cp.
            _TestData(["c/z", "c/z-4"], [(["c/z-4", "c/z-5", "c/z-6"], []),
                                         ([], [])]),
            _TestData(["c/z", "c/z-6"], [(["c/z-4", "c/z-5", "c/z-6"], []),
                                         ([], [])]),

            # First a cpv atom, then the cp to which the cpv belongs. The pkgmap
            # should contain an entry for the first cpv, then an entry containing
            # the remaining cpv's to which the cp expands.
            _TestData(["c/z-4", "c/z"], [(["c/z-4"], []),
                                         (["c/z-5", "c/z-6"], [])]),
            _TestData(["c/z-6", "c/z"], [(["c/z-6"], []),
                                         (["c/z-4", "c/z-5"], [])]),

            # More mixed cp/cpv's. The cp should expand to all cpv's except those
            # covered by a preceding cpv. The cpv's after the cp should result in empty
            # entries, since they are already covered by the expansion of the cp.
            _TestData(["c/z", "c/z-4", "c/z-5"],
                      [(["c/z-4", "c/z-5", "c/z-6"], []), ([], []), ([], [])]),
            _TestData(["c/z", "c/z-5", "c/z-4"],
                      [(["c/z-4", "c/z-5", "c/z-6"], []), ([], []), ([], [])]),
            _TestData(["c/z-4", "c/z", "c/z-5"], [(["c/z-4"], []),
                                                  (["c/z-5", "c/z-6"], []),
                                                  ([], [])]),
            _TestData(["c/z-5", "c/z", "c/z-4"], [(["c/z-5"], []),
                                                  (["c/z-4", "c/z-6"], []),
                                                  ([], [])]),
            _TestData(["c/z-4", "c/z-5", "c/z"], [(["c/z-4"], []),
                                                  (["c/z-5"], []),
                                                  (["c/z-6"], [])]),
            _TestData(["c/z-5", "c/z-4", "c/z"], [(["c/z-5"], []),
                                                  (["c/z-4"], []),
                                                  (["c/z-6"], [])]),
            _TestData(["c/z", "c/z-4", "c/z-5", "c/z-6"],
                      [(["c/z-4", "c/z-5", "c/z-6"], []), ([], []), ([], []),
                       ([], [])]),
            _TestData(["c/z", "c/z-6", "c/z-5", "c/z-4"],
                      [(["c/z-4", "c/z-5", "c/z-6"], []), ([], []), ([], []),
                       ([], [])]),
            _TestData(["c/z-4", "c/z", "c/z-5", "c/z-6"],
                      [(["c/z-4"], []), (["c/z-5", "c/z-6"], []), ([], []),
                       ([], [])]),
            _TestData(["c/z-6", "c/z", "c/z-5", "c/z-4"],
                      [(["c/z-6"], []), (["c/z-4", "c/z-5"], []), ([], []),
                       ([], [])]),
            _TestData(["c/z-4", "c/z-5", "c/z", "c/z-6"], [(["c/z-4"], []),
                                                           (["c/z-5"], []),
                                                           (["c/z-6"], []),
                                                           ([], [])]),
            _TestData(["c/z-6", "c/z-5", "c/z", "c/z-4"], [(["c/z-6"], []),
                                                           (["c/z-5"], []),
                                                           (["c/z-4"], []),
                                                           ([], [])]),
            _TestData(["c/z-4", "c/z-5", "c/z-6", "c/z"], [(["c/z-4"], []),
                                                           (["c/z-5"], []),
                                                           (["c/z-6"], []),
                                                           ([], [])]),
            _TestData(["c/z-6", "c/z-5", "c/z-4", "c/z"], [(["c/z-6"], []),
                                                           (["c/z-5"], []),
                                                           (["c/z-4"], []),
                                                           ([], [])]),

            # Two cpv that do not belong to the same cp. The pkgmap should contain an
            # entry for each cpv, in the same order. If there are other installed
            # instances of the cp to which the cpv belongs, they should be listed
            # in the omitted section.
            _TestData(["c/x-1", "c/y-2"], [(["c/x-1"], []), (["c/y-2"], [])]),
            _TestData(["c/y-2", "c/x-1"], [(["c/y-2"], []), (["c/x-1"], [])]),
            _TestData(["c/x-1", "c/z-4"], [(["c/x-1"], []),
                                           (["c/z-4"], ["c/z-5", "c/z-6"])]),
            _TestData(["c/z-4", "c/x-1"], [(["c/z-4"], ["c/z-5", "c/z-6"]),
                                           (["c/x-1"], [])]),

            # cpv's/cp where some cpv's are not instances of the cp. The pkgmap should
            # contain an entry for each in the same order, with the cp expanded
            # to all installed instances.
            _TestData(["c/x-1", "c/z"], [(["c/x-1"], []),
                                         (["c/z-4", "c/z-5", "c/z-6"], [])]),
            _TestData(["c/z", "c/x-1"], [(["c/z-4", "c/z-5", "c/z-6"], []),
                                         (["c/x-1"], [])]),
            _TestData(["c/x-1", "c/z-4", "c/z"], [(["c/x-1"], []),
                                                  (["c/z-4"], []),
                                                  (["c/z-5", "c/z-6"], [])]),
            _TestData(["c/z-4", "c/z", "c/x-1"], [(["c/z-4"], []),
                                                  (["c/z-5", "c/z-6"], []),
                                                  (["c/x-1"], [])]),
            _TestData(["c/x-1", "c/z", "c/z-4"],
                      [(["c/x-1"], []), (["c/z-4", "c/z-5", "c/z-6"], []),
                       ([], [])]),
            _TestData(["c/z", "c/z-4", "c/x-1"],
                      [(["c/z-4", "c/z-5", "c/z-6"], []), ([], []),
                       (["c/x-1"], [])]),

            # Two different cp's. The pkglist should contain an entry for each cp,
            # in the same order, containing all cpv's that the cp's expands to.
            _TestData(["c/z", "c/zz"], [(["c/z-4", "c/z-5", "c/z-6"], []),
                                        (["c/zz-4", "c/zz-5", "c/zz-6"], [])]),
            _TestData(["c/zz", "c/z"], [(["c/zz-4", "c/zz-5", "c/zz-6"], []),
                                        (["c/z-4", "c/z-5", "c/z-6"], [])]),
        )

        playground = ResolverPlayground(ebuilds=ebuilds, installed=installed)

        try:
            for test_case in test_cases:
                eroot = playground.settings['EROOT']
                root_config = playground.trees[eroot]["root_config"]

                res, pkgmap = _unmerge_display(root_config, [],
                                               "unmerge",
                                               test_case.unmerge_files,
                                               ordered=True)

                self.assertEqual(res, os.EX_OK)
                self.assertEqual(pkgmap, test_case.expected_pkgmap)
        finally:
            playground.cleanup()
Exemplo n.º 5
0
    def _start(self):

        vardb = self.pkg.root_config.trees["vartree"].dbapi
        dbdir = vardb.getpath(self.pkg.cpv)
        if not os.path.exists(dbdir):
            # Apparently the package got uninstalled
            # already, so we can safely return early.
            self.returncode = os.EX_OK
            self._async_wait()
            return

        self.settings.setcpv(self.pkg)
        cat, pf = portage.catsplit(self.pkg.cpv)
        myebuildpath = os.path.join(dbdir, pf + ".ebuild")

        try:
            portage.doebuild_environment(myebuildpath,
                                         "prerm",
                                         settings=self.settings,
                                         db=vardb)
        except UnsupportedAPIException:
            # This is safe to ignore since this function is
            # guaranteed to set PORTAGE_BUILDDIR even though
            # it raises UnsupportedAPIException. The error
            # will be logged when it prevents the pkg_prerm
            # and pkg_postrm phases from executing.
            pass

        self._builddir_lock = EbuildBuildDir(scheduler=self.scheduler,
                                             settings=self.settings)
        self._builddir_lock.lock()

        portage.prepare_build_dirs(settings=self.settings, cleanup=True)

        # Output only gets logged if it comes after prepare_build_dirs()
        # which initializes PORTAGE_LOG_FILE.
        retval, pkgmap = _unmerge_display(self.pkg.root_config,
                                          self.opts,
                                          "unmerge", [self.pkg.cpv],
                                          clean_delay=0,
                                          writemsg_level=self._writemsg_level)

        if retval != os.EX_OK:
            self._builddir_lock.unlock()
            self.returncode = retval
            self._async_wait()
            return

        self._writemsg_level(">>> Unmerging %s...\n" % (self.pkg.cpv, ),
                             noiselevel=-1)
        self._emergelog("=== Unmerging... (%s)" % (self.pkg.cpv, ))

        unmerge_task = MergeProcess(
            mycat=cat,
            mypkg=pf,
            settings=self.settings,
            treetype="vartree",
            vartree=self.pkg.root_config.trees["vartree"],
            scheduler=self.scheduler,
            background=self.background,
            mydbapi=self.pkg.root_config.trees["vartree"].dbapi,
            prev_mtimes=self.ldpath_mtimes,
            logfile=self.settings.get("PORTAGE_LOG_FILE"),
            unmerge=True)

        self._start_task(unmerge_task, self._unmerge_exit)