def process(mysettings, key, logentries, fulltext):
	if mysettings["PORT_LOGDIR"] != "":
		elogdir = os.path.join(mysettings["PORT_LOGDIR"], "elog")
	else:
		elogdir = os.path.join(os.sep, "var", "log", "portage", "elog")
	ensure_dirs(elogdir, uid=portage_uid, gid=portage_gid, mode=0o2770)

	# TODO: Locking
	elogfilename = elogdir+"/summary.log"
	elogfile = codecs.open(_unicode_encode(elogfilename,
		encoding=_encodings['fs'], errors='strict'),
		mode='a', encoding=_encodings['content'], errors='backslashreplace')
	apply_permissions(elogfilename, mode=0o60, mask=0)
	time_str = time.strftime("%Y-%m-%d %H:%M:%S %Z",
		time.localtime(time.time()))
	# Avoid potential UnicodeDecodeError later.
	time_str = _unicode_decode(time_str,
		encoding=_encodings['content'], errors='replace')
	elogfile.write(_(">>> Messages generated by process %(pid)d on %(time)s for package %(pkg)s:\n\n") %
			{"pid": os.getpid(), "time": time_str, "pkg": key})
	elogfile.write(fulltext)
	elogfile.write("\n")
	elogfile.close()

	return elogfilename
Пример #2
0
	def _create_binpkgs(self, binpkgs):
		# When using BUILD_ID, there can be mutiple instances for the
		# same cpv. Therefore, binpkgs may be an iterable instead of
		# a dict.
		items = getattr(binpkgs, 'items', None)
		items = items() if items is not None else binpkgs
		for cpv, metadata in items:
			a = Atom("=" + cpv, allow_repo=True)
			repo = a.repo
			if repo is None:
				repo = "test_repo"

			pn = catsplit(a.cp)[1]
			cat, pf = catsplit(a.cpv)
			metadata = metadata.copy()
			metadata.setdefault("SLOT", "0")
			metadata.setdefault("KEYWORDS", "x86")
			metadata.setdefault("BUILD_TIME", "0")
			metadata["repository"] = repo
			metadata["CATEGORY"] = cat
			metadata["PF"] = pf

			repo_dir = self.pkgdir
			category_dir = os.path.join(repo_dir, cat)
			if "BUILD_ID" in metadata:
				binpkg_path = os.path.join(category_dir, pn,
					"%s-%s.xpak"% (pf, metadata["BUILD_ID"]))
			else:
				binpkg_path = os.path.join(category_dir, pf + ".tbz2")

			ensure_dirs(os.path.dirname(binpkg_path))
			t = portage.xpak.tbz2(binpkg_path)
			t.recompose_mem(portage.xpak.xpak_mem(metadata))
Пример #3
0
	def _chpathtool_exit(self, chpathtool):
		if self._final_exit(chpathtool) != os.EX_OK:
			self._unlock_builddir()
			self._writemsg_level("!!! Error Adjusting Prefix to %s\n" %
				(self.settings["EPREFIX"],),
				noiselevel=-1, level=logging.ERROR)
			self.wait()
			return

		# We want to install in "our" prefix, not the binary one
		with io.open(_unicode_encode(os.path.join(self._infloc, "EPREFIX"),
			encoding=_encodings['fs'], errors='strict'), mode='w',
			encoding=_encodings['repo.content'], errors='strict') as f:
			f.write(self.settings["EPREFIX"] + "\n")

		# Move the files to the correct location for merge.
		image_tmp_dir = os.path.join(
			self.settings["PORTAGE_BUILDDIR"], "image_tmp")
		build_d = os.path.join(self.settings["D"],
			self._build_prefix.lstrip(os.sep))
		if not os.path.isdir(build_d):
			# Assume this is a virtual package or something.
			shutil.rmtree(self._image_dir)
			ensure_dirs(self.settings["ED"])
		else:
			os.rename(build_d, image_tmp_dir)
			shutil.rmtree(self._image_dir)
			ensure_dirs(os.path.dirname(self.settings["ED"].rstrip(os.sep)))
			os.rename(image_tmp_dir, self.settings["ED"])

		self.wait()
Пример #4
0
	def _extractor_exit(self, extractor):
		if self._default_exit(extractor) != os.EX_OK:
			self._unlock_builddir()
			self._writemsg_level("!!! Error Extracting '%s'\n" % \
				self._pkg_path, noiselevel=-1, level=logging.ERROR)
			self.wait()
			return

		try:
			with io.open(_unicode_encode(os.path.join(self._infloc, "EPREFIX"),
				encoding=_encodings['fs'], errors='strict'), mode='r',
				encoding=_encodings['repo.content'], errors='replace') as f:
				self._build_prefix = f.read().rstrip('\n')
		except IOError:
			self._build_prefix = ""

		if self._build_prefix == self.settings["EPREFIX"]:
			ensure_dirs(self.settings["ED"])
			self._current_task = None
			self.returncode = os.EX_OK
			self.wait()
			return

		env = self.settings.environ()
		env["PYTHONPATH"] = self.settings["PORTAGE_PYTHONPATH"]
		chpathtool = SpawnProcess(
			args=[portage._python_interpreter,
			os.path.join(self.settings["PORTAGE_BIN_PATH"], "chpathtool.py"),
			self.settings["D"], self._build_prefix, self.settings["EPREFIX"]],
			background=self.background, env=env,
			scheduler=self.scheduler,
			logfile=self.settings.get('PORTAGE_LOG_FILE'))
		self._writemsg_level(">>> Adjusting Prefix to %s\n" % self.settings["EPREFIX"])
		self._start_task(chpathtool, self._chpathtool_exit)
Пример #5
0
def _ensure_log_subdirs(logdir, subdir):
	"""
	This assumes that logdir exists, and creates subdirectories down
	to subdir as necessary. The gid of logdir is copied to all
	subdirectories, along with 0x2070 mode bits if present. Both logdir
	and subdir are assumed to be normalized absolute paths.
	"""
	st = os.stat(logdir)
	uid = -1
	gid = st.st_gid
	grp_mode = 0o2070 & st.st_mode

	# If logdir is writable by the portage group but its uid
	# is not portage_uid, then set the uid to portage_uid if
	# we have privileges to do so, for compatibility with our
	# default logrotate config (see bug 378451). With the
	# "su portage portage" directive and logrotate-3.8.0,
	# logrotate's chown call during the compression phase will
	# only succeed if the log file's uid is portage_uid.
	if grp_mode and gid == portage_gid and \
		portage.data.secpass >= 2:
		uid = portage_uid
		if st.st_uid != portage_uid:
			ensure_dirs(logdir, uid=uid)

	logdir_split_len = len(logdir.split(os.sep))
	subdir_split = subdir.split(os.sep)[logdir_split_len:]
	subdir_split.reverse()
	current = logdir
	while subdir_split:
		current = os.path.join(current, subdir_split.pop())
		ensure_dirs(current, uid=uid, gid=gid, mode=grp_mode, mask=0)
Пример #6
0
def process(mysettings, key, logentries, fulltext):
	path = key.replace("/", ":")

	if mysettings["PORT_LOGDIR"] != "":
		elogdir = os.path.join(mysettings["PORT_LOGDIR"], "elog")
	else:
		elogdir = os.path.join(os.sep, "var", "log", "portage", "elog")
	ensure_dirs(elogdir, uid=portage_uid, gid=portage_gid, mode=0o2770)

	cat = mysettings['CATEGORY']
	pf = mysettings['PF']

	elogfilename = pf + ":" + _unicode_decode(
		time.strftime("%Y%m%d-%H%M%S", time.gmtime(time.time())),
		encoding=_encodings['content'], errors='replace') + ".log"

	if "split-elog" in mysettings.features:
		elogfilename = os.path.join(elogdir, cat, elogfilename)
	else:
		elogfilename = os.path.join(elogdir, cat + ':' + elogfilename)
	ensure_dirs(os.path.dirname(elogfilename),
		uid=portage_uid, gid=portage_gid, mode=0o2770)

	elogfile = codecs.open(_unicode_encode(elogfilename,
		encoding=_encodings['fs'], errors='strict'),
		mode='w', encoding=_encodings['content'], errors='backslashreplace')
	elogfile.write(fulltext)
	elogfile.close()

	return elogfilename
Пример #7
0
	def set_root_override(self, root_overwrite=None):
		# Allow ROOT setting to come from make.conf if it's not overridden
		# by the constructor argument (from the calling environment).
		if self.target_root is None and root_overwrite is not None:
			self.target_root = root_overwrite
			if not self.target_root.strip():
				self.target_root = None
		self.target_root = self.target_root or os.sep

		self.target_root = normalize_path(os.path.abspath(
			self.target_root)).rstrip(os.path.sep) + os.path.sep

		if self.sysroot != "/" and self.sysroot != self.target_root:
			writemsg(_("!!! Error: SYSROOT (currently %s) must "
				"equal / or ROOT (currently %s).\n") %
				(self.sysroot, self.target_root),
				noiselevel=-1)
			raise InvalidLocation(self.sysroot)

		ensure_dirs(self.target_root)
		self._check_var_directory("ROOT", self.target_root)

		self.eroot = self.target_root.rstrip(os.sep) + self.eprefix + os.sep

		self.global_config_path = GLOBAL_CONFIG_PATH
		if portage.const.EPREFIX:
			self.global_config_path = os.path.join(portage.const.EPREFIX,
				GLOBAL_CONFIG_PATH.lstrip(os.sep))
def process(mysettings, key, logentries, fulltext):
	if mysettings.get("PORT_LOGDIR"):
		logdir = normalize_path(mysettings["PORT_LOGDIR"])
	else:
		logdir = os.path.join(os.sep, mysettings["EPREFIX"].lstrip(os.sep),
			"var", "log", "portage")

	if not os.path.isdir(logdir):
		# Only initialize group/mode if the directory doesn't
		# exist, so that we don't override permissions if they
		# were previously set by the administrator.
		# NOTE: These permissions should be compatible with our
		# default logrotate config as discussed in bug 374287.
		logdir_uid = -1
		if portage.data.secpass >= 2:
			logdir_uid = portage_uid
		ensure_dirs(logdir, uid=logdir_uid, gid=portage_gid, mode=0o2770)

	elogdir = os.path.join(logdir, "elog")
	_ensure_log_subdirs(logdir, elogdir)

	# TODO: Locking
	elogfilename = elogdir+"/summary.log"
	elogfile = io.open(_unicode_encode(elogfilename,
		encoding=_encodings['fs'], errors='strict'),
		mode='a', encoding=_encodings['content'], errors='backslashreplace')

	# Copy group permission bits from parent directory.
	elogdir_st = os.stat(elogdir)
	elogdir_gid = elogdir_st.st_gid
	elogdir_grp_mode = 0o060 & elogdir_st.st_mode

	# Copy the uid from the parent directory if we have privileges
	# to do so, for compatibility with our default logrotate
	# config (see bug 378451). With the "su portage portage"
	# directive and logrotate-3.8.0, logrotate's chown call during
	# the compression phase will only succeed if the log file's uid
	# is portage_uid.
	logfile_uid = -1
	if portage.data.secpass >= 2:
		logfile_uid = elogdir_st.st_uid
	apply_permissions(elogfilename, uid=logfile_uid, gid=elogdir_gid,
		mode=elogdir_grp_mode, mask=0)

	time_str = time.strftime("%Y-%m-%d %H:%M:%S %Z",
		time.localtime(time.time()))
	# Avoid potential UnicodeDecodeError later.
	time_str = _unicode_decode(time_str,
		encoding=_encodings['content'], errors='replace')
	elogfile.write(_unicode_decode(
		_(">>> Messages generated by process " +
		"%(pid)d on %(time)s for package %(pkg)s:\n\n") %
		{"pid": os.getpid(), "time": time_str, "pkg": key}))
	elogfile.write(_unicode_decode(fulltext))
	elogfile.write(_unicode_decode("\n"))
	elogfile.close()

	return elogfilename
Пример #9
0
def process(mysettings, key, logentries, fulltext):

	if mysettings.get("PORT_LOGDIR"):
		logdir = normalize_path(mysettings["PORT_LOGDIR"])
	else:
		logdir = os.path.join(os.sep, mysettings["EPREFIX"].lstrip(os.sep),
			"var", "log", "portage")

	if not os.path.isdir(logdir):
		# Only initialize group/mode if the directory doesn't
		# exist, so that we don't override permissions if they
		# were previously set by the administrator.
		# NOTE: These permissions should be compatible with our
		# default logrotate config as discussed in bug 374287.
		uid = -1
		if portage.data.secpass >= 2:
			uid = portage_uid
		ensure_dirs(logdir, uid=uid, gid=portage_gid, mode=0o2770)

	cat = mysettings['CATEGORY']
	pf = mysettings['PF']

	elogfilename = pf + ":" + _unicode_decode(
		time.strftime("%Y%m%d-%H%M%S", time.gmtime(time.time())),
		encoding=_encodings['content'], errors='replace') + ".log"

	if "split-elog" in mysettings.features:
		log_subdir = os.path.join(logdir, "elog", cat)
		elogfilename = os.path.join(log_subdir, elogfilename)
	else:
		log_subdir = os.path.join(logdir, "elog")
		elogfilename = os.path.join(log_subdir, cat + ':' + elogfilename)
	_ensure_log_subdirs(logdir, log_subdir)

	elogfile = io.open(_unicode_encode(elogfilename,
		encoding=_encodings['fs'], errors='strict'),
		mode='w', encoding=_encodings['content'], errors='backslashreplace')
	elogfile.write(_unicode_decode(fulltext))
	elogfile.close()

	# Copy group permission bits from parent directory.
	elogdir_st = os.stat(log_subdir)
	elogdir_gid = elogdir_st.st_gid
	elogdir_grp_mode = 0o060 & elogdir_st.st_mode

	# Copy the uid from the parent directory if we have privileges
	# to do so, for compatibility with our default logrotate
	# config (see bug 378451). With the "su portage portage"
	# directive and logrotate-3.8.0, logrotate's chown call during
	# the compression phase will only succeed if the log file's uid
	# is portage_uid.
	logfile_uid = -1
	if portage.data.secpass >= 2:
		logfile_uid = elogdir_st.st_uid
	apply_permissions(elogfilename, uid=logfile_uid, gid=elogdir_gid,
		mode=elogdir_grp_mode, mask=0)

	return elogfilename
Пример #10
0
	def _ensure_dir(self, path):
		"""
		Create the specified directory. Also, copy gid and group mode
		bits from self.pkgdir if possible.
		@param cat_dir: Absolute path of the directory to be created.
		@type cat_dir: String
		"""
		try:
			pkgdir_st = os.stat(self.pkgdir)
		except OSError:
			ensure_dirs(path)
			return
		pkgdir_gid = pkgdir_st.st_gid
		pkgdir_grp_mode = 0o2070 & pkgdir_st.st_mode
		try:
			ensure_dirs(path, gid=pkgdir_gid, mode=pkgdir_grp_mode, mask=0)
		except PortageException:
			if not os.path.isdir(path):
				raise
Пример #11
0
	def set_root_override(self, root_overwrite=None):
		# Allow ROOT setting to come from make.conf if it's not overridden
		# by the constructor argument (from the calling environment).
		if self.target_root is None and root_overwrite is not None:
			self.target_root = root_overwrite
			if not self.target_root.strip():
				self.target_root = None
		if self.target_root is None:
			self.target_root = "/"

		self.target_root = normalize_path(os.path.abspath(
			self.target_root)).rstrip(os.path.sep) + os.path.sep

		ensure_dirs(self.target_root)
		self._check_var_directory("ROOT", self.target_root)

		self.eroot = self.target_root.rstrip(os.sep) + self.eprefix + os.sep

		# make.globals should not be relative to config_root
		# because it only contains constants. However, if EPREFIX
		# is set then there are two possible scenarios:
		# 1) If $ROOT == "/" then make.globals should be
		#    relative to EPREFIX.
		# 2) If $ROOT != "/" then the correct location of
		#    make.globals needs to be specified in the constructor
		#    parameters, since it's a property of the host system
		#    (and the current config represents the target system).
		self.global_config_path = GLOBAL_CONFIG_PATH
		if self.eprefix:
			if self.target_root == "/":
				# case (1) above
				self.global_config_path = os.path.join(self.eprefix,
					GLOBAL_CONFIG_PATH.lstrip(os.sep))
			else:
				# case (2) above
				# For now, just assume make.globals is relative
				# to EPREFIX.
				# TODO: Pass in more info to the constructor,
				# so we know the host system configuration.
				self.global_config_path = os.path.join(self.eprefix,
					GLOBAL_CONFIG_PATH.lstrip(os.sep))
Пример #12
0
	def set_root_override(self, root_overwrite=None):
		# Allow ROOT setting to come from make.conf if it's not overridden
		# by the constructor argument (from the calling environment).
		if self.target_root is None and root_overwrite is not None:
			self.target_root = root_overwrite
			if not self.target_root.strip():
				self.target_root = None
		if self.target_root is None:
			self.target_root = "/"

		self.target_root = normalize_path(os.path.abspath(
			self.target_root)).rstrip(os.path.sep) + os.path.sep

		ensure_dirs(self.target_root)
		self._check_var_directory("ROOT", self.target_root)

		self.eroot = self.target_root.rstrip(os.sep) + self.eprefix + os.sep

		self.global_config_path = GLOBAL_CONFIG_PATH
		if portage.const.EPREFIX:
			self.global_config_path = os.path.join(portage.const.EPREFIX,
				GLOBAL_CONFIG_PATH.lstrip(os.sep))
Пример #13
0
	def _create_binpkgs(self, binpkgs):
		for cpv, metadata in binpkgs.items():
			a = Atom("=" + cpv, allow_repo=True)
			repo = a.repo
			if repo is None:
				repo = "test_repo"

			cat, pf = catsplit(a.cpv)
			metadata = metadata.copy()
			metadata.setdefault("SLOT", "0")
			metadata.setdefault("KEYWORDS", "x86")
			metadata.setdefault("BUILD_TIME", "0")
			metadata["repository"] = repo
			metadata["CATEGORY"] = cat
			metadata["PF"] = pf

			repo_dir = self.pkgdir
			category_dir = os.path.join(repo_dir, cat)
			binpkg_path = os.path.join(category_dir, pf + ".tbz2")
			ensure_dirs(category_dir)
			t = portage.xpak.tbz2(binpkg_path)
			t.recompose_mem(portage.xpak.xpak_mem(metadata))
Пример #14
0
    def testSlotAbiEmerge(self):

        debug = False

        ebuilds = {
            "dev-libs/glib-1.2.10": {
                "SLOT": "1"
            },
            "dev-libs/glib-2.30.2": {
                "EAPI": "5",
                "SLOT": "2/2.30"
            },
            "dev-libs/glib-2.32.3": {
                "EAPI": "5",
                "SLOT": "2/2.32"
            },
            "dev-libs/dbus-glib-0.98": {
                "EAPI": "5",
                "DEPEND": "dev-libs/glib:2=",
                "RDEPEND": "dev-libs/glib:2=",
            },
        }
        installed = {
            "dev-libs/glib-1.2.10": {
                "EAPI": "5",
                "SLOT": "1"
            },
            "dev-libs/glib-2.30.2": {
                "EAPI": "5",
                "SLOT": "2/2.30"
            },
            "dev-libs/dbus-glib-0.98": {
                "EAPI": "5",
                "DEPEND": "dev-libs/glib:2/2.30=",
                "RDEPEND": "dev-libs/glib:2/2.30=",
            },
        }

        world = ["dev-libs/glib:1", "dev-libs/dbus-glib"]

        playground = ResolverPlayground(ebuilds=ebuilds,
                                        installed=installed,
                                        world=world,
                                        debug=debug)
        settings = playground.settings
        eprefix = settings["EPREFIX"]
        eroot = settings["EROOT"]
        trees = playground.trees
        portdb = trees[eroot]["porttree"].dbapi
        vardb = trees[eroot]["vartree"].dbapi
        var_cache_edb = os.path.join(eprefix, "var", "cache", "edb")
        user_config_dir = os.path.join(eprefix, USER_CONFIG_PATH)
        package_mask_path = os.path.join(user_config_dir, "package.mask")

        portage_python = portage._python_interpreter
        ebuild_cmd = (portage_python, "-b", "-Wd",
                      os.path.join(self.bindir, "ebuild"))
        emerge_cmd = (portage_python, "-b", "-Wd",
                      os.path.join(self.bindir, "emerge"))

        test_ebuild = portdb.findname("dev-libs/dbus-glib-0.98")
        self.assertFalse(test_ebuild is None)

        test_commands = (
            emerge_cmd + (
                "--oneshot",
                "dev-libs/glib",
            ),
            (lambda: "dev-libs/glib:2/2.32=" in vardb.aux_get(
                "dev-libs/dbus-glib-0.98", ["RDEPEND"])[0], ),
            (
                BASH_BINARY,
                "-c",
                "echo %s >> %s" % tuple(
                    map(
                        portage._shell_quote,
                        (
                            ">=dev-libs/glib-2.32",
                            package_mask_path,
                        ),
                    )),
            ),
            emerge_cmd + (
                "--oneshot",
                "dev-libs/glib",
            ),
            (lambda: "dev-libs/glib:2/2.30=" in vardb.aux_get(
                "dev-libs/dbus-glib-0.98", ["RDEPEND"])[0], ),
        )

        distdir = playground.distdir
        pkgdir = playground.pkgdir
        fake_bin = os.path.join(eprefix, "bin")
        portage_tmpdir = os.path.join(eprefix, "var", "tmp", "portage")
        profile_path = settings.profile_path

        path = os.environ.get("PATH")
        if path is not None and not path.strip():
            path = None
        if path is None:
            path = ""
        else:
            path = ":" + path
        path = fake_bin + path

        pythonpath = os.environ.get("PYTHONPATH")
        if pythonpath is not None and not pythonpath.strip():
            pythonpath = None
        if pythonpath is not None and pythonpath.split(
                ":")[0] == PORTAGE_PYM_PATH:
            pass
        else:
            if pythonpath is None:
                pythonpath = ""
            else:
                pythonpath = ":" + pythonpath
            pythonpath = PORTAGE_PYM_PATH + pythonpath

        env = {
            "PORTAGE_OVERRIDE_EPREFIX":
            eprefix,
            "PATH":
            path,
            "PORTAGE_PYTHON":
            portage_python,
            "PORTAGE_REPOSITORIES":
            settings.repositories.config_string(),
            "PYTHONDONTWRITEBYTECODE":
            os.environ.get("PYTHONDONTWRITEBYTECODE", ""),
            "PYTHONPATH":
            pythonpath,
        }

        if "__PORTAGE_TEST_HARDLINK_LOCKS" in os.environ:
            env["__PORTAGE_TEST_HARDLINK_LOCKS"] = os.environ[
                "__PORTAGE_TEST_HARDLINK_LOCKS"]

        dirs = [
            distdir, fake_bin, portage_tmpdir, user_config_dir, var_cache_edb
        ]
        true_symlinks = ["chown", "chgrp"]
        true_binary = find_binary("true")
        self.assertEqual(true_binary is None, False, "true command not found")
        try:
            for d in dirs:
                ensure_dirs(d)
            for x in true_symlinks:
                os.symlink(true_binary, os.path.join(fake_bin, x))
            with open(os.path.join(var_cache_edb, "counter"), "wb") as f:
                f.write(b"100")
            # non-empty system set keeps --depclean quiet
            with open(os.path.join(profile_path, "packages"), "w") as f:
                f.write("*dev-libs/token-system-pkg")

            if debug:
                # The subprocess inherits both stdout and stderr, for
                # debugging purposes.
                stdout = None
            else:
                # The subprocess inherits stderr so that any warnings
                # triggered by python -Wd will be visible.
                stdout = subprocess.PIPE

            for i, args in enumerate(test_commands):

                if hasattr(args[0], "__call__"):
                    self.assertTrue(args[0](),
                                    "callable at index %s failed" % (i, ))
                    continue

                proc = subprocess.Popen(args, env=env, stdout=stdout)

                if debug:
                    proc.wait()
                else:
                    output = proc.stdout.readlines()
                    proc.wait()
                    proc.stdout.close()
                    if proc.returncode != os.EX_OK:
                        for line in output:
                            sys.stderr.write(_unicode_decode(line))

                self.assertEqual(os.EX_OK, proc.returncode,
                                 "emerge failed with args %s" % (args, ))
        finally:
            playground.cleanup()
	def testProfileDefaultEAPI(self):

		repo_configs = {
			"test_repo": {
				"layout.conf": (
					"profile-formats = profile-default-eapi",
					"profile_eapi_when_unspecified = 5"
				),
			}
		}

		profiles = (
			(
				"",
				{
					"package.mask": ("sys-libs/A:1",),
					"package.use": ("sys-libs/A:1 flag",)
				}
			),
			(
				"default/linux",
				{
					"package.mask": ("sys-libs/B:1",),
					"package.use": ("sys-libs/B:1 flag",),
					"package.keywords": ("sys-libs/B:1 x86",)
				}
			),
			(
				"default/linux/x86",
				{
					"package.mask": ("sys-libs/C:1",),
					"package.use": ("sys-libs/C:1 flag",),
					"package.keywords": ("sys-libs/C:1 x86",),
					"parent": ("..",)
				}
			),
		)

		user_profile = {
			"package.mask": ("sys-libs/D:1",),
			"package.use": ("sys-libs/D:1 flag",),
			"package.keywords": ("sys-libs/D:1 x86",),
		}

		test_cases = (
			(lambda x: x._mask_manager._pmaskdict, {
				"sys-libs/A": ("sys-libs/A:1::test_repo",),
				"sys-libs/B": ("sys-libs/B:1",),
				"sys-libs/C": ("sys-libs/C:1",),
				"sys-libs/D": ("sys-libs/D:1",),
			}),
			(lambda x: x._use_manager._repo_puse_dict, {
				"test_repo": {
					"sys-libs/A": {
						"sys-libs/A:1": ("flag",)
					}
				}
			}),
			(lambda x: x._use_manager._pkgprofileuse, (
				{"sys-libs/B": {"sys-libs/B:1": "flag"}},
				{"sys-libs/C": {"sys-libs/C:1": "flag"}},
				{},
				{"sys-libs/D": {"sys-libs/D:1": "flag"}},
			)),
			(lambda x: x._keywords_manager._pkeywords_list, (
					{"sys-libs/B": {"sys-libs/B:1": ["x86"]}},
					{"sys-libs/C": {"sys-libs/C:1": ["x86"]}},
					{"sys-libs/D": {"sys-libs/D:1": ["x86"]}},
				)
			)
		)

		playground = ResolverPlayground(debug=False,
			repo_configs=repo_configs)
		try:
			repo_dir = (playground.settings.repositories.
				get_location_for_name("test_repo"))
			profile_root = os.path.join(repo_dir, "profiles")
			profile_info = [(os.path.join(profile_root, p), data)
				for p, data in profiles]
			profile_info.append((os.path.join(playground.eroot,
				USER_CONFIG_PATH, "profile"), user_profile))

			for prof_path, data in profile_info:
				ensure_dirs(prof_path)
				for k, v in data.items():
					with io.open(os.path.join(prof_path, k), mode="w",
						encoding=_encodings["repo.content"]) as f:
						for line in v:
							f.write("%s\n" % line)

			# The config must be reloaded in order to account
			# for the above profile customizations.
			playground.reload_config()

			for fn, expected in test_cases:
				result = self._translate_result(fn(playground.settings))
				self.assertEqual(result, expected)

		finally:
			playground.cleanup()
Пример #16
0
    def testProfilePackageSet(self):

        repo_configs = {
            "test_repo": {
                "layout.conf": ("profile-formats = profile-set", ),
            }
        }

        profiles = (
            ('default/linux', {
                "eapi": ("5", ),
                "packages": (
                    "*sys-libs/A",
                    "app-misc/A",
                    "app-misc/B",
                    "app-misc/C",
                ),
            }),
            ('default/linux/x86', {
                "eapi": ("5", ),
                "packages": ("-app-misc/B", ),
                "parent": ("..", )
            }),
        )

        ebuilds = {
            "sys-libs/A-1": {
                "EAPI": "5",
            },
            "app-misc/A-1": {
                "EAPI": "5",
            },
            "app-misc/B-1": {
                "EAPI": "5",
            },
            "app-misc/C-1": {
                "EAPI": "5",
            },
        }

        installed = {
            "sys-libs/A-1": {
                "EAPI": "5",
            },
            "app-misc/A-1": {
                "EAPI": "5",
            },
            "app-misc/B-1": {
                "EAPI": "5",
            },
            "app-misc/C-1": {
                "EAPI": "5",
            },
        }

        test_cases = (
            ResolverPlaygroundTestCase(
                ["@world"],
                options={
                    "--update": True,
                    "--deep": True
                },
                mergelist=[],
                success=True,
            ),
            ResolverPlaygroundTestCase([],
                                       options={"--depclean": True},
                                       success=True,
                                       cleanlist=["app-misc/B-1"]),
        )

        playground = ResolverPlayground(debug=False,
                                        ebuilds=ebuilds,
                                        installed=installed,
                                        repo_configs=repo_configs)
        try:
            repo_dir = (playground.settings.repositories.get_location_for_name(
                "test_repo"))
            profile_root = os.path.join(repo_dir, "profiles")

            for p, data in profiles:
                prof_path = os.path.join(profile_root, p)
                ensure_dirs(prof_path)
                for k, v in data.items():
                    with io.open(os.path.join(prof_path, k),
                                 mode="w",
                                 encoding=_encodings["repo.content"]) as f:
                        for line in v:
                            f.write("%s\n" % line)

            # The config must be reloaded in order to account
            # for the above profile customizations.
            playground.reload_config()

            for test_case in test_cases:
                playground.run_TestCase(test_case)
                self.assertEqual(test_case.test_success, True,
                                 test_case.fail_msg)

        finally:
            playground.cleanup()
Пример #17
0
def process(mysettings, key, logentries, fulltext):
    if mysettings.get("PORTAGE_LOGDIR"):
        logdir = normalize_path(mysettings["PORTAGE_LOGDIR"])
    else:
        logdir = os.path.join(os.sep, mysettings["EPREFIX"].lstrip(os.sep),
                              "var", "log", "portage")

    if not os.path.isdir(logdir):
        # Only initialize group/mode if the directory doesn't
        # exist, so that we don't override permissions if they
        # were previously set by the administrator.
        # NOTE: These permissions should be compatible with our
        # default logrotate config as discussed in bug 374287.
        logdir_uid = -1
        if portage.data.secpass >= 2:
            logdir_uid = portage_uid
        ensure_dirs(logdir, uid=logdir_uid, gid=portage_gid, mode=0o2770)

    elogdir = os.path.join(logdir, "elog")
    _ensure_log_subdirs(logdir, elogdir)

    # TODO: Locking
    elogfilename = elogdir + "/summary.log"
    try:
        elogfile = io.open(
            _unicode_encode(elogfilename,
                            encoding=_encodings["fs"],
                            errors="strict"),
            mode="a",
            encoding=_encodings["content"],
            errors="backslashreplace",
        )
    except IOError as e:
        func_call = "open('%s', 'a')" % elogfilename
        if e.errno == errno.EACCES:
            raise portage.exception.PermissionDenied(func_call)
        elif e.errno == errno.EPERM:
            raise portage.exception.OperationNotPermitted(func_call)
        elif e.errno == errno.EROFS:
            raise portage.exception.ReadOnlyFileSystem(func_call)
        else:
            raise

    # Copy group permission bits from parent directory.
    elogdir_st = os.stat(elogdir)
    elogdir_gid = elogdir_st.st_gid
    elogdir_grp_mode = 0o060 & elogdir_st.st_mode

    # Copy the uid from the parent directory if we have privileges
    # to do so, for compatibility with our default logrotate
    # config (see bug 378451). With the "su portage portage"
    # directive and logrotate-3.8.0, logrotate's chown call during
    # the compression phase will only succeed if the log file's uid
    # is portage_uid.
    logfile_uid = -1
    if portage.data.secpass >= 2:
        logfile_uid = elogdir_st.st_uid
    apply_permissions(elogfilename,
                      uid=logfile_uid,
                      gid=elogdir_gid,
                      mode=elogdir_grp_mode,
                      mask=0)

    time_fmt = "%Y-%m-%d %H:%M:%S %Z"
    time_str = time.strftime(time_fmt, time.localtime(time.time()))
    # Avoid potential UnicodeDecodeError in Python 2, since strftime
    # returns bytes in Python 2, and %Z may contain non-ascii chars.
    time_str = _unicode_decode(time_str,
                               encoding=_encodings["content"],
                               errors="replace")
    elogfile.write(
        _(">>> Messages generated by process "
          "%(pid)d on %(time)s for package %(pkg)s:\n\n") % {
              "pid": portage.getpid(),
              "time": time_str,
              "pkg": key
          })
    elogfile.write(_unicode_decode(fulltext))
    elogfile.write("\n")
    elogfile.close()

    return elogfilename
Пример #18
0
	def testSimple(self):
		debug = False

		skip_reason = self._must_skip()
		if skip_reason:
			self.portage_skip = skip_reason
			self.assertFalse(True, skip_reason)
			return

		copyright_header = """# Copyright 1999-%s Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
# $Header: $
""" % time.gmtime().tm_year

		repo_configs = {
			"test_repo": {
				"layout.conf":
					(
						"update-changelog = true",
					),
			}
		}

		profiles = (
			("x86", "default/linux/x86/test_profile", "stable"),
			("x86", "default/linux/x86/test_dev", "dev"),
			("x86", "default/linux/x86/test_exp", "exp"),
		)

		profile = {
			"eapi": ("5",),
			"package.use.stable.mask": ("dev-libs/A flag",)
		}

		ebuilds = {
			"dev-libs/A-0": {
				"COPYRIGHT_HEADER" : copyright_header,
				"DESCRIPTION" : "Desc goes here",
				"EAPI" : "5",
				"HOMEPAGE" : "https://example.com",
				"IUSE" : "flag",
				"KEYWORDS": "x86",
				"LICENSE": "GPL-2",
				"RDEPEND": "flag? ( dev-libs/B[flag] )",
			},
			"dev-libs/A-1": {
				"COPYRIGHT_HEADER" : copyright_header,
				"DESCRIPTION" : "Desc goes here",
				"EAPI" : "4",
				"HOMEPAGE" : "https://example.com",
				"IUSE" : "flag",
				"KEYWORDS": "~x86",
				"LICENSE": "GPL-2",
				"RDEPEND": "flag? ( dev-libs/B[flag] )",
			},
			"dev-libs/B-1": {
				"COPYRIGHT_HEADER" : copyright_header,
				"DESCRIPTION" : "Desc goes here",
				"EAPI" : "4",
				"HOMEPAGE" : "https://example.com",
				"IUSE" : "flag",
				"KEYWORDS": "~x86",
				"LICENSE": "GPL-2",
			},
			"dev-libs/C-0": {
				"COPYRIGHT_HEADER" : copyright_header,
				"DESCRIPTION" : "Desc goes here",
				"EAPI" : "4",
				"HOMEPAGE" : "https://example.com",
				"IUSE" : "flag",
				# must be unstable, since dev-libs/A[flag] is stable masked
				"KEYWORDS": "~x86",
				"LICENSE": "GPL-2",
				"RDEPEND": "flag? ( dev-libs/A[flag] )",
			},
		}
		licenses = ["GPL-2"]
		arch_list = ["x86"]
		metadata_xsd = os.path.join(REPOMAN_BASE_PATH, "cnf/metadata.xsd")
		metadata_xml_files = (
			(
				"dev-libs/A",
				{
					"flags" : "<flag name='flag' restrict='&gt;=dev-libs/A-0'>Description of how USE='flag' affects this package</flag>",
				},
			),
			(
				"dev-libs/B",
				{
					"flags" : "<flag name='flag'>Description of how USE='flag' affects this package</flag>",
				},
			),
			(
				"dev-libs/C",
				{
					"flags" : "<flag name='flag'>Description of how USE='flag' affects this package</flag>",
				},
			),
		)

		use_desc = (
			("flag", "Description of how USE='flag' affects packages"),
		)

		playground = ResolverPlayground(ebuilds=ebuilds,
			profile=profile, repo_configs=repo_configs, debug=debug)
		settings = playground.settings
		eprefix = settings["EPREFIX"]
		eroot = settings["EROOT"]
		portdb = playground.trees[playground.eroot]["porttree"].dbapi
		homedir = os.path.join(eroot, "home")
		distdir = os.path.join(eprefix, "distdir")
		test_repo_location = settings.repositories["test_repo"].location
		profiles_dir = os.path.join(test_repo_location, "profiles")
		license_dir = os.path.join(test_repo_location, "licenses")

		repoman_cmd = (portage._python_interpreter, "-b", "-Wd",
			os.path.join(self.bindir, "repoman"))

		git_binary = find_binary("git")
		git_cmd = (git_binary,)

		cp_binary = find_binary("cp")
		self.assertEqual(cp_binary is None, False,
			"cp command not found")
		cp_cmd = (cp_binary,)

		test_ebuild = portdb.findname("dev-libs/A-1")
		self.assertFalse(test_ebuild is None)

		committer_name = "Gentoo Dev"
		committer_email = "*****@*****.**"

		git_test = (
			("", repoman_cmd + ("manifest",)),
			("", git_cmd + ("config", "--global", "user.name", committer_name,)),
			("", git_cmd + ("config", "--global", "user.email", committer_email,)),
			("", git_cmd + ("init-db",)),
			("", git_cmd + ("add", ".")),
			("", git_cmd + ("commit", "-a", "-m", "add whole repo")),
			("", repoman_cmd + ("full", "-d")),
			("", cp_cmd + (test_ebuild, test_ebuild[:-8] + "2.ebuild")),
			("", git_cmd + ("add", test_ebuild[:-8] + "2.ebuild")),
			("", repoman_cmd + ("commit", "-m", "cat/pkg: bump to version 2")),
			("", cp_cmd + (test_ebuild, test_ebuild[:-8] + "3.ebuild")),
			("", git_cmd + ("add", test_ebuild[:-8] + "3.ebuild")),
			("dev-libs", repoman_cmd + ("commit", "-m", "cat/pkg: bump to version 3")),
			("", cp_cmd + (test_ebuild, test_ebuild[:-8] + "4.ebuild")),
			("", git_cmd + ("add", test_ebuild[:-8] + "4.ebuild")),
			("dev-libs/A", repoman_cmd + ("commit", "-m", "cat/pkg: bump to version 4")),
		)

		env = {
			"PORTAGE_OVERRIDE_EPREFIX" : eprefix,
			"DISTDIR" : distdir,
			"GENTOO_COMMITTER_NAME" : committer_name,
			"GENTOO_COMMITTER_EMAIL" : committer_email,
			"HOME" : homedir,
			"PATH" : os.environ["PATH"],
			"PORTAGE_GRPNAME" : os.environ["PORTAGE_GRPNAME"],
			"PORTAGE_USERNAME" : os.environ["PORTAGE_USERNAME"],
			"PORTAGE_REPOSITORIES" : settings.repositories.config_string(),
			"PYTHONDONTWRITEBYTECODE" : os.environ.get("PYTHONDONTWRITEBYTECODE", ""),
		}

		if os.environ.get("SANDBOX_ON") == "1":
			# avoid problems from nested sandbox instances
			env["FEATURES"] = "-sandbox -usersandbox"

		dirs = [homedir, license_dir, profiles_dir, distdir]
		try:
			for d in dirs:
				ensure_dirs(d)
			with open(os.path.join(test_repo_location, "skel.ChangeLog"), 'w') as f:
				f.write(copyright_header)
			with open(os.path.join(profiles_dir, "profiles.desc"), 'w') as f:
				for x in profiles:
					f.write("%s %s %s\n" % x)

			# ResolverPlayground only created the first profile,
			# so create the remaining ones.
			for x in profiles[1:]:
				sub_profile_dir = os.path.join(profiles_dir, x[1])
				ensure_dirs(sub_profile_dir)
				for config_file, lines in profile.items():
					file_name = os.path.join(sub_profile_dir, config_file)
					with open(file_name, "w") as f:
						for line in lines:
							f.write("%s\n" % line)

			for x in licenses:
				open(os.path.join(license_dir, x), 'wb').close()
			with open(os.path.join(profiles_dir, "arch.list"), 'w') as f:
				for x in arch_list:
					f.write("%s\n" % x)
			with open(os.path.join(profiles_dir, "use.desc"), 'w') as f:
				for k, v in use_desc:
					f.write("%s - %s\n" % (k, v))
			for cp, xml_data in metadata_xml_files:
				with open(os.path.join(test_repo_location, cp, "metadata.xml"), 'w') as f:
					f.write(playground.metadata_xml_template % xml_data)
			# Use a symlink to test_repo, in order to trigger bugs
			# involving canonical vs. non-canonical paths.
			test_repo_symlink = os.path.join(eroot, "test_repo_symlink")
			os.symlink(test_repo_location, test_repo_symlink)
			metadata_xsd_dest = os.path.join(test_repo_location, 'metadata/xml-schema/metadata.xsd')
			os.makedirs(os.path.dirname(metadata_xsd_dest))
			os.symlink(metadata_xsd, metadata_xsd_dest)

			if debug:
				# The subprocess inherits both stdout and stderr, for
				# debugging purposes.
				stdout = None
			else:
				# The subprocess inherits stderr so that any warnings
				# triggered by python -Wd will be visible.
				stdout = subprocess.PIPE

			for cwd in ("", "dev-libs", "dev-libs/A", "dev-libs/B"):
				abs_cwd = os.path.join(test_repo_symlink, cwd)
				proc = subprocess.Popen(repoman_cmd + ("full",),
					cwd=abs_cwd, env=env, stdout=stdout)

				if debug:
					proc.wait()
				else:
					output = proc.stdout.readlines()
					proc.wait()
					proc.stdout.close()
					if proc.returncode != os.EX_OK:
						for line in output:
							sys.stderr.write(_unicode_decode(line))

				self.assertEqual(os.EX_OK, proc.returncode,
					"repoman failed in %s" % (cwd,))

			if git_binary is not None:
				for cwd, cmd in git_test:
					abs_cwd = os.path.join(test_repo_symlink, cwd)
					proc = subprocess.Popen(cmd,
						cwd=abs_cwd, env=env, stdout=stdout)

					if debug:
						proc.wait()
					else:
						output = proc.stdout.readlines()
						proc.wait()
						proc.stdout.close()
						if proc.returncode != os.EX_OK:
							for line in output:
								sys.stderr.write(_unicode_decode(line))

					self.assertEqual(os.EX_OK, proc.returncode,
						"%s failed in %s" % (cmd, cwd,))
		finally:
			playground.cleanup()
Пример #19
0
def prepare_build_dirs(myroot=None, settings=None, cleanup=False):
	"""
	The myroot parameter is ignored.
	"""
	myroot = None

	if settings is None:
		raise TypeError("settings argument is required")

	mysettings = settings
	clean_dirs = [mysettings["HOME"]]

	# We enable cleanup when we want to make sure old cruft (such as the old
	# environment) doesn't interfere with the current phase.
	if cleanup and 'keeptemp' not in mysettings.features:
		clean_dirs.append(mysettings["T"])

	for clean_dir in clean_dirs:
		try:
			shutil.rmtree(clean_dir)
		except OSError as oe:
			if errno.ENOENT == oe.errno:
				pass
			elif errno.EPERM == oe.errno:
				writemsg("%s\n" % oe, noiselevel=-1)
				writemsg(_("Operation Not Permitted: rmtree('%s')\n") % \
					clean_dir, noiselevel=-1)
				return 1
			else:
				# Wrap with PermissionDenied if appropriate, so that callers
				# display a short error message without a traceback.
				_raise_exc(oe)

	def makedirs(dir_path):
		try:
			os.makedirs(dir_path)
		except OSError as oe:
			if errno.EEXIST == oe.errno:
				pass
			elif errno.EPERM == oe.errno:
				writemsg("%s\n" % oe, noiselevel=-1)
				writemsg(_("Operation Not Permitted: makedirs('%s')\n") % \
					dir_path, noiselevel=-1)
				return False
			else:
				raise
		return True

	mysettings["PKG_LOGDIR"] = os.path.join(mysettings["T"], "logging")

	mydirs = [os.path.dirname(mysettings["PORTAGE_BUILDDIR"])]
	mydirs.append(os.path.dirname(mydirs[-1]))

	try:
		for mydir in mydirs:
			ensure_dirs(mydir)
			try:
				apply_secpass_permissions(mydir,
					gid=portage_gid, uid=portage_uid, mode=0o700, mask=0)
			except PortageException:
				if not os.path.isdir(mydir):
					raise
		for dir_key in ("PORTAGE_BUILDDIR", "HOME", "PKG_LOGDIR", "T"):
			ensure_dirs(mysettings[dir_key], mode=0o755)
			apply_secpass_permissions(mysettings[dir_key],
				uid=portage_uid, gid=portage_gid)
	except PermissionDenied as e:
		writemsg(_("Permission Denied: %s\n") % str(e), noiselevel=-1)
		return 1
	except OperationNotPermitted as e:
		writemsg(_("Operation Not Permitted: %s\n") % str(e), noiselevel=-1)
		return 1
	except FileNotFound as e:
		writemsg(_("File Not Found: '%s'\n") % str(e), noiselevel=-1)
		return 1

	# Reset state for things like noauto and keepwork in FEATURES.
	for x in ('.die_hooks',):
		try:
			os.unlink(os.path.join(mysettings['PORTAGE_BUILDDIR'], x))
		except OSError:
			pass

	_prepare_workdir(mysettings)
	if mysettings.get("EBUILD_PHASE") not in ("info", "fetch", "pretend"):
		# Avoid spurious permissions adjustments when fetching with
		# a temporary PORTAGE_TMPDIR setting (for fetchonly).
		_prepare_features_dirs(mysettings)
Пример #20
0
def _prepare_workdir(mysettings):
    workdir_mode = 0o700
    try:
        mode = mysettings["PORTAGE_WORKDIR_MODE"]
        if mode.isdigit():
            parsed_mode = int(mode, 8)
        elif mode == "":
            raise KeyError()
        else:
            raise ValueError()
        if parsed_mode & 0o7777 != parsed_mode:
            raise ValueError("Invalid file mode: %s" % mode)
        else:
            workdir_mode = parsed_mode
    except KeyError as e:
        writemsg(
            _("!!! PORTAGE_WORKDIR_MODE is unset, using %s.\n") %
            oct(workdir_mode))
    except ValueError as e:
        if len(str(e)) > 0:
            writemsg("%s\n" % e)
        writemsg(_("!!! Unable to parse PORTAGE_WORKDIR_MODE='%s', using %s.\n") % \
        (mysettings["PORTAGE_WORKDIR_MODE"], oct(workdir_mode)))
    mysettings["PORTAGE_WORKDIR_MODE"] = oct(workdir_mode).replace('o', '')
    try:
        apply_secpass_permissions(mysettings["WORKDIR"],
                                  uid=portage_uid,
                                  gid=portage_gid,
                                  mode=workdir_mode)
    except FileNotFound:
        pass  # ebuild.sh will create it

    if mysettings.get("PORT_LOGDIR", "") == "":
        while "PORT_LOGDIR" in mysettings:
            del mysettings["PORT_LOGDIR"]
    if "PORT_LOGDIR" in mysettings:
        try:
            modified = ensure_dirs(mysettings["PORT_LOGDIR"])
            if modified:
                # Only initialize group/mode if the directory doesn't
                # exist, so that we don't override permissions if they
                # were previously set by the administrator.
                # NOTE: These permissions should be compatible with our
                # default logrotate config as discussed in bug 374287.
                apply_secpass_permissions(mysettings["PORT_LOGDIR"],
                                          uid=portage_uid,
                                          gid=portage_gid,
                                          mode=0o2770)
        except PortageException as e:
            writemsg("!!! %s\n" % str(e), noiselevel=-1)
            writemsg(_("!!! Permission issues with PORT_LOGDIR='%s'\n") % \
             mysettings["PORT_LOGDIR"], noiselevel=-1)
            writemsg(_("!!! Disabling logging.\n"), noiselevel=-1)
            while "PORT_LOGDIR" in mysettings:
                del mysettings["PORT_LOGDIR"]

    compress_log_ext = ''
    if 'compress-build-logs' in mysettings.features:
        compress_log_ext = '.gz'

    logdir_subdir_ok = False
    if "PORT_LOGDIR" in mysettings and \
     os.access(mysettings["PORT_LOGDIR"], os.W_OK):
        logdir = normalize_path(mysettings["PORT_LOGDIR"])
        logid_path = os.path.join(mysettings["PORTAGE_BUILDDIR"], ".logid")
        if not os.path.exists(logid_path):
            open(_unicode_encode(logid_path), 'w').close()
        logid_time = _unicode_decode(time.strftime(
            "%Y%m%d-%H%M%S", time.gmtime(os.stat(logid_path).st_mtime)),
                                     encoding=_encodings['content'],
                                     errors='replace')

        if "split-log" in mysettings.features:
            log_subdir = os.path.join(logdir, "build", mysettings["CATEGORY"])
            mysettings["PORTAGE_LOG_FILE"] = os.path.join(
                log_subdir, "%s:%s.log%s" %
                (mysettings["PF"], logid_time, compress_log_ext))
        else:
            log_subdir = logdir
            mysettings["PORTAGE_LOG_FILE"] = os.path.join(
             logdir, "%s:%s:%s.log%s" % \
             (mysettings["CATEGORY"], mysettings["PF"], logid_time,
             compress_log_ext))

        if log_subdir is logdir:
            logdir_subdir_ok = True
        else:
            try:
                _ensure_log_subdirs(logdir, log_subdir)
            except PortageException as e:
                writemsg("!!! %s\n" % (e, ), noiselevel=-1)

            if os.access(log_subdir, os.W_OK):
                logdir_subdir_ok = True
            else:
                writemsg("!!! %s: %s\n" % (_("Permission Denied"), log_subdir),
                         noiselevel=-1)

    tmpdir_log_path = os.path.join(mysettings["T"],
                                   "build.log%s" % compress_log_ext)
    if not logdir_subdir_ok:
        # NOTE: When sesandbox is enabled, the local SELinux security policies
        # may not allow output to be piped out of the sesandbox domain. The
        # current policy will allow it to work when a pty is available, but
        # not through a normal pipe. See bug #162404.
        mysettings["PORTAGE_LOG_FILE"] = tmpdir_log_path
    else:
        # Create a symlink from tmpdir_log_path to PORTAGE_LOG_FILE, as
        # requested in bug #412865.
        make_new_symlink = False
        try:
            target = os.readlink(tmpdir_log_path)
        except OSError:
            make_new_symlink = True
        else:
            if target != mysettings["PORTAGE_LOG_FILE"]:
                make_new_symlink = True
        if make_new_symlink:
            try:
                os.unlink(tmpdir_log_path)
            except OSError:
                pass
            os.symlink(mysettings["PORTAGE_LOG_FILE"], tmpdir_log_path)
Пример #21
0
def prepare_build_dirs(myroot=None, settings=None, cleanup=False):
    """
	The myroot parameter is ignored.
	"""
    myroot = None

    if settings is None:
        raise TypeError("settings argument is required")

    mysettings = settings
    clean_dirs = [mysettings["HOME"]]

    # We enable cleanup when we want to make sure old cruft (such as the old
    # environment) doesn't interfere with the current phase.
    if cleanup and 'keeptemp' not in mysettings.features:
        clean_dirs.append(mysettings["T"])

    for clean_dir in clean_dirs:
        try:
            shutil.rmtree(clean_dir)
        except OSError as oe:
            if errno.ENOENT == oe.errno:
                pass
            elif errno.EPERM == oe.errno:
                writemsg("%s\n" % oe, noiselevel=-1)
                writemsg(_("Operation Not Permitted: rmtree('%s')\n") % \
                 clean_dir, noiselevel=-1)
                return 1
            else:
                # Wrap with PermissionDenied if appropriate, so that callers
                # display a short error message without a traceback.
                _raise_exc(oe)

    def makedirs(dir_path):
        try:
            os.makedirs(dir_path)
        except OSError as oe:
            if errno.EEXIST == oe.errno:
                pass
            elif errno.EPERM == oe.errno:
                writemsg("%s\n" % oe, noiselevel=-1)
                writemsg(_("Operation Not Permitted: makedirs('%s')\n") % \
                 dir_path, noiselevel=-1)
                return False
            else:
                raise
        return True

    mysettings["PKG_LOGDIR"] = os.path.join(mysettings["T"], "logging")

    mydirs = [os.path.dirname(mysettings["PORTAGE_BUILDDIR"])]
    mydirs.append(os.path.dirname(mydirs[-1]))

    try:
        for mydir in mydirs:
            ensure_dirs(mydir)
            try:
                apply_secpass_permissions(mydir,
                                          gid=portage_gid,
                                          uid=portage_uid,
                                          mode=0o700,
                                          mask=0)
            except PortageException:
                if not os.path.isdir(mydir):
                    raise
        for dir_key in ("PORTAGE_BUILDDIR", "HOME", "PKG_LOGDIR", "T"):
            ensure_dirs(mysettings[dir_key], mode=0o755)
            apply_secpass_permissions(mysettings[dir_key],
                                      uid=portage_uid,
                                      gid=portage_gid)
    except PermissionDenied as e:
        writemsg(_("Permission Denied: %s\n") % str(e), noiselevel=-1)
        return 1
    except OperationNotPermitted as e:
        writemsg(_("Operation Not Permitted: %s\n") % str(e), noiselevel=-1)
        return 1
    except FileNotFound as e:
        writemsg(_("File Not Found: '%s'\n") % str(e), noiselevel=-1)
        return 1

    # Reset state for things like noauto and keepwork in FEATURES.
    for x in ('.die_hooks', ):
        try:
            os.unlink(os.path.join(mysettings['PORTAGE_BUILDDIR'], x))
        except OSError:
            pass

    _prepare_workdir(mysettings)
    if mysettings.get("EBUILD_PHASE") not in ("info", "fetch", "pretend"):
        # Avoid spurious permissions adjustments when fetching with
        # a temporary PORTAGE_TMPDIR setting (for fetchonly).
        _prepare_features_dirs(mysettings)
Пример #22
0
def _prepare_features_dirs(mysettings):

    # Use default ABI libdir in accordance with bug #355283.
    libdir = None
    default_abi = mysettings.get("DEFAULT_ABI")
    if default_abi:
        libdir = mysettings.get("LIBDIR_" + default_abi)
    if not libdir:
        libdir = "lib"

    features_dirs = {
        "ccache": {
            "basedir_var": "CCACHE_DIR",
            "default_dir": os.path.join(mysettings["PORTAGE_TMPDIR"],
                                        "ccache"),
            "always_recurse": False
        },
        "distcc": {
            "basedir_var": "DISTCC_DIR",
            "default_dir": os.path.join(mysettings["BUILD_PREFIX"], ".distcc"),
            "subdirs": ("lock", "state"),
            "always_recurse": True
        }
    }
    dirmode = 0o2070
    filemode = 0o60
    modemask = 0o2
    restrict = mysettings.get("PORTAGE_RESTRICT", "").split()
    droppriv = secpass >= 2 and \
     "userpriv" in mysettings.features and \
     "userpriv" not in restrict
    for myfeature, kwargs in features_dirs.items():
        if myfeature in mysettings.features:
            failure = False
            basedir = mysettings.get(kwargs["basedir_var"])
            if basedir is None or not basedir.strip():
                basedir = kwargs["default_dir"]
                mysettings[kwargs["basedir_var"]] = basedir
            try:
                mydirs = [mysettings[kwargs["basedir_var"]]]
                if "subdirs" in kwargs:
                    for subdir in kwargs["subdirs"]:
                        mydirs.append(os.path.join(basedir, subdir))
                for mydir in mydirs:
                    modified = ensure_dirs(mydir)
                    # Generally, we only want to apply permissions for
                    # initial creation.  Otherwise, we don't know exactly what
                    # permissions the user wants, so should leave them as-is.
                    droppriv_fix = False
                    if droppriv:
                        st = os.stat(mydir)
                        if st.st_gid != portage_gid or \
                         not dirmode == (stat.S_IMODE(st.st_mode) & dirmode):
                            droppriv_fix = True
                        if not droppriv_fix:
                            # Check permissions of files in the directory.
                            for filename in os.listdir(mydir):
                                try:
                                    subdir_st = os.lstat(
                                        os.path.join(mydir, filename))
                                except OSError:
                                    continue
                                if subdir_st.st_gid != portage_gid or \
                                 ((stat.S_ISDIR(subdir_st.st_mode) and \
                                 not dirmode == (stat.S_IMODE(subdir_st.st_mode) & dirmode))):
                                    droppriv_fix = True
                                    break

                    if droppriv_fix:
                        _adjust_perms_msg(mysettings,
                         colorize("WARN", " * ") + \
                         _("Adjusting permissions "
                         "for FEATURES=userpriv: '%s'\n") % mydir)
                    elif modified:
                        _adjust_perms_msg(mysettings,
                         colorize("WARN", " * ") + \
                         _("Adjusting permissions "
                         "for FEATURES=%s: '%s'\n") % (myfeature, mydir))

                    if modified or kwargs["always_recurse"] or droppriv_fix:

                        def onerror(e):
                            raise  # The feature is disabled if a single error
                            # occurs during permissions adjustment.

                        if not apply_recursive_permissions(mydir,
                                                           gid=portage_gid,
                                                           dirmode=dirmode,
                                                           dirmask=modemask,
                                                           filemode=filemode,
                                                           filemask=modemask,
                                                           onerror=onerror):
                            raise OperationNotPermitted(
                                _("Failed to apply recursive permissions for the portage group."
                                  ))

            except DirectoryNotFound as e:
                failure = True
                writemsg(_("\n!!! Directory does not exist: '%s'\n") % \
                 (e,), noiselevel=-1)
                writemsg(_("!!! Disabled FEATURES='%s'\n") % myfeature,
                         noiselevel=-1)

            except PortageException as e:
                failure = True
                writemsg("\n!!! %s\n" % str(e), noiselevel=-1)
                writemsg(_("!!! Failed resetting perms on %s='%s'\n") % \
                 (kwargs["basedir_var"], basedir), noiselevel=-1)
                writemsg(_("!!! Disabled FEATURES='%s'\n") % myfeature,
                         noiselevel=-1)

            if failure:
                mysettings.features.remove(myfeature)
                time.sleep(5)
Пример #23
0
def _env_update(makelinks, target_root, prev_mtimes, contents, env,
	writemsg_level):
	if writemsg_level is None:
		writemsg_level = portage.util.writemsg_level
	if target_root is None:
		target_root = portage.settings["ROOT"]
	if prev_mtimes is None:
		prev_mtimes = portage.mtimedb["ldpath"]
	if env is None:
		settings = portage.settings
	else:
		settings = env

	eprefix = settings.get("EPREFIX", "")
	eprefix_lstrip = eprefix.lstrip(os.sep)
	eroot = normalize_path(os.path.join(target_root, eprefix_lstrip)).rstrip(os.sep) + os.sep
	envd_dir = os.path.join(eroot, "etc", "env.d")
	ensure_dirs(envd_dir, mode=0o755)
	fns = listdir(envd_dir, EmptyOnError=1)
	fns.sort()
	templist = []
	for x in fns:
		if len(x) < 3:
			continue
		if not x[0].isdigit() or not x[1].isdigit():
			continue
		if x.startswith(".") or x.endswith("~") or x.endswith(".bak"):
			continue
		templist.append(x)
	fns = templist
	del templist

	space_separated = set(["CONFIG_PROTECT", "CONFIG_PROTECT_MASK"])
	colon_separated = set(["ADA_INCLUDE_PATH", "ADA_OBJECTS_PATH",
		"CLASSPATH", "INFODIR", "INFOPATH", "KDEDIRS", "LDPATH", "MANPATH",
		  "PATH", "PKG_CONFIG_PATH", "PRELINK_PATH", "PRELINK_PATH_MASK",
		  "PYTHONPATH", "ROOTPATH"])

	config_list = []

	for x in fns:
		file_path = os.path.join(envd_dir, x)
		try:
			myconfig = getconfig(file_path, expand=False)
		except ParseError as e:
			writemsg("!!! '%s'\n" % str(e), noiselevel=-1)
			del e
			continue
		if myconfig is None:
			# broken symlink or file removed by a concurrent process
			writemsg("!!! File Not Found: '%s'\n" % file_path, noiselevel=-1)
			continue

		config_list.append(myconfig)
		if "SPACE_SEPARATED" in myconfig:
			space_separated.update(myconfig["SPACE_SEPARATED"].split())
			del myconfig["SPACE_SEPARATED"]
		if "COLON_SEPARATED" in myconfig:
			colon_separated.update(myconfig["COLON_SEPARATED"].split())
			del myconfig["COLON_SEPARATED"]

	env = {}
	specials = {}
	for var in space_separated:
		mylist = []
		for myconfig in config_list:
			if var in myconfig:
				for item in myconfig[var].split():
					if item and not item in mylist:
						mylist.append(item)
				del myconfig[var] # prepare for env.update(myconfig)
		if mylist:
			env[var] = " ".join(mylist)
		specials[var] = mylist

	for var in colon_separated:
		mylist = []
		for myconfig in config_list:
			if var in myconfig:
				for item in myconfig[var].split(":"):
					if item and not item in mylist:
						mylist.append(item)
				del myconfig[var] # prepare for env.update(myconfig)
		if mylist:
			env[var] = ":".join(mylist)
		specials[var] = mylist

	for myconfig in config_list:
		"""Cumulative variables have already been deleted from myconfig so that
		they won't be overwritten by this dict.update call."""
		env.update(myconfig)

	ldsoconf_path = os.path.join(eroot, "etc", "ld.so.conf")
	try:
		myld = io.open(_unicode_encode(ldsoconf_path,
			encoding=_encodings['fs'], errors='strict'),
			mode='r', encoding=_encodings['content'], errors='replace')
		myldlines = myld.readlines()
		myld.close()
		oldld = []
		for x in myldlines:
			#each line has at least one char (a newline)
			if x[:1] == "#":
				continue
			oldld.append(x[:-1])
	except (IOError, OSError) as e:
		if e.errno != errno.ENOENT:
			raise
		oldld = None

	newld = specials["LDPATH"]
	if (oldld != newld):
		#ld.so.conf needs updating and ldconfig needs to be run
		myfd = atomic_ofstream(ldsoconf_path)
		myfd.write("# ld.so.conf autogenerated by env-update; make all changes to\n")
		myfd.write("# contents of /etc/env.d directory\n")
		for x in specials["LDPATH"]:
			myfd.write(x + "\n")
		myfd.close()

	potential_lib_dirs = set()
	for lib_dir_glob in ('usr/lib*', 'lib*'):
		x = os.path.join(eroot, lib_dir_glob)
		for y in glob.glob(_unicode_encode(x,
			encoding=_encodings['fs'], errors='strict')):
			try:
				y = _unicode_decode(y,
					encoding=_encodings['fs'], errors='strict')
			except UnicodeDecodeError:
				continue
			if os.path.basename(y) != 'libexec':
				potential_lib_dirs.add(y[len(eroot):])

	# Update prelink.conf if we are prelink-enabled
	if prelink_capable:
		prelink_d = os.path.join(eroot, 'etc', 'prelink.conf.d')
		ensure_dirs(prelink_d)
		newprelink = atomic_ofstream(os.path.join(prelink_d, 'portage.conf'))
		newprelink.write("# prelink.conf autogenerated by env-update; make all changes to\n")
		newprelink.write("# contents of /etc/env.d directory\n")

		for x in sorted(potential_lib_dirs) + ['bin', 'sbin']:
			newprelink.write('-l /%s\n' % (x,));
		prelink_paths = set()
		prelink_paths |= set(specials.get('LDPATH', []))
		prelink_paths |= set(specials.get('PATH', []))
		prelink_paths |= set(specials.get('PRELINK_PATH', []))
		prelink_path_mask = specials.get('PRELINK_PATH_MASK', [])
		for x in prelink_paths:
			if not x:
				continue
			if x[-1:] != '/':
				x += "/"
			plmasked = 0
			for y in prelink_path_mask:
				if not y:
					continue
				if y[-1] != '/':
					y += "/"
				if y == x[0:len(y)]:
					plmasked = 1
					break
			if not plmasked:
				newprelink.write("-h %s\n" % (x,))
		for x in prelink_path_mask:
			newprelink.write("-b %s\n" % (x,))
		newprelink.close()

		# Migration code path.  If /etc/prelink.conf was generated by us, then
		# point it to the new stuff until the prelink package re-installs.
		prelink_conf = os.path.join(eroot, 'etc', 'prelink.conf')
		try:
			with open(_unicode_encode(prelink_conf,
				encoding=_encodings['fs'], errors='strict'), 'rb') as f:
				if f.readline() == b'# prelink.conf autogenerated by env-update; make all changes to\n':
					f = atomic_ofstream(prelink_conf)
					f.write('-c /etc/prelink.conf.d/*.conf\n')
					f.close()
		except IOError as e:
			if e.errno != errno.ENOENT:
				raise

	current_time = long(time.time())
	mtime_changed = False

	lib_dirs = set()
	for lib_dir in set(specials['LDPATH']) | potential_lib_dirs:
		x = os.path.join(eroot, lib_dir.lstrip(os.sep))
		try:
			newldpathtime = os.stat(x)[stat.ST_MTIME]
			lib_dirs.add(normalize_path(x))
		except OSError as oe:
			if oe.errno == errno.ENOENT:
				try:
					del prev_mtimes[x]
				except KeyError:
					pass
				# ignore this path because it doesn't exist
				continue
			raise
		if newldpathtime == current_time:
			# Reset mtime to avoid the potential ambiguity of times that
			# differ by less than 1 second.
			newldpathtime -= 1
			os.utime(x, (newldpathtime, newldpathtime))
			prev_mtimes[x] = newldpathtime
			mtime_changed = True
		elif x in prev_mtimes:
			if prev_mtimes[x] == newldpathtime:
				pass
			else:
				prev_mtimes[x] = newldpathtime
				mtime_changed = True
		else:
			prev_mtimes[x] = newldpathtime
			mtime_changed = True

	if makelinks and \
		not mtime_changed and \
		contents is not None:
		libdir_contents_changed = False
		for mypath, mydata in contents.items():
			if mydata[0] not in ("obj", "sym"):
				continue
			head, tail = os.path.split(mypath)
			if head in lib_dirs:
				libdir_contents_changed = True
				break
		if not libdir_contents_changed:
			makelinks = False

	ldconfig = "/sbin/ldconfig"
	if "CHOST" in settings and "CBUILD" in settings and \
		settings["CHOST"] != settings["CBUILD"]:
		ldconfig = find_binary("%s-ldconfig" % settings["CHOST"])

	# Only run ldconfig as needed
	if makelinks and ldconfig and not eprefix:
		# ldconfig has very different behaviour between FreeBSD and Linux
		if ostype == "Linux" or ostype.lower().endswith("gnu"):
			# We can't update links if we haven't cleaned other versions first, as
			# an older package installed ON TOP of a newer version will cause ldconfig
			# to overwrite the symlinks we just made. -X means no links. After 'clean'
			# we can safely create links.
			writemsg_level(_(">>> Regenerating %setc/ld.so.cache...\n") % \
				(target_root,))
			os.system("cd / ; %s -X -r '%s'" % (ldconfig, target_root))
		elif ostype in ("FreeBSD", "DragonFly"):
			writemsg_level(_(">>> Regenerating %svar/run/ld-elf.so.hints...\n") % \
				target_root)
			os.system(("cd / ; %s -elf -i " + \
				"-f '%svar/run/ld-elf.so.hints' '%setc/ld.so.conf'") % \
				(ldconfig, target_root, target_root))

	del specials["LDPATH"]

	penvnotice  = "# THIS FILE IS AUTOMATICALLY GENERATED BY env-update.\n"
	penvnotice += "# DO NOT EDIT THIS FILE. CHANGES TO STARTUP PROFILES\n"
	cenvnotice  = penvnotice[:]
	penvnotice += "# GO INTO /etc/profile NOT /etc/profile.env\n\n"
	cenvnotice += "# GO INTO /etc/csh.cshrc NOT /etc/csh.env\n\n"

	#create /etc/profile.env for bash support
	outfile = atomic_ofstream(os.path.join(eroot, "etc", "profile.env"))
	outfile.write(penvnotice)

	env_keys = [x for x in env if x != "LDPATH"]
	env_keys.sort()
	for k in env_keys:
		v = env[k]
		if v.startswith('$') and not v.startswith('${'):
			outfile.write("export %s=$'%s'\n" % (k, v[1:]))
		else:
			outfile.write("export %s='%s'\n" % (k, v))
	outfile.close()

	#create /etc/csh.env for (t)csh support
	outfile = atomic_ofstream(os.path.join(eroot, "etc", "csh.env"))
	outfile.write(cenvnotice)
	for x in env_keys:
		outfile.write("setenv %s '%s'\n" % (x, env[x]))
	outfile.close()
Пример #24
0
    def testPortdbCache(self):
        debug = False

        ebuilds = {
            "dev-libs/A-1": {},
            "dev-libs/A-2": {},
            "sys-apps/B-1": {},
            "sys-apps/B-2": {},
        }

        playground = ResolverPlayground(ebuilds=ebuilds, debug=debug)
        settings = playground.settings
        eprefix = settings["EPREFIX"]
        portdir = settings["PORTDIR"]
        user_config_dir = os.path.join(eprefix, USER_CONFIG_PATH)
        metadata_dir = os.path.join(portdir, "metadata")
        md5_cache_dir = os.path.join(metadata_dir, "md5-cache")
        pms_cache_dir = os.path.join(metadata_dir, "cache")
        layout_conf_path = os.path.join(metadata_dir, "layout.conf")

        portage_python = portage._python_interpreter
        egencache_cmd = (portage_python, "-Wd",
                         os.path.join(PORTAGE_BIN_PATH, "egencache"))
        python_cmd = (portage_python, "-Wd", "-c")

        test_commands = (
            (lambda: not os.path.exists(pms_cache_dir), ),
            (lambda: not os.path.exists(md5_cache_dir), ),
            python_cmd + (textwrap.dedent("""
				import os, sys, portage
				if portage.portdb.porttree_root in portage.portdb._pregen_auxdb:
					sys.exit(1)
			"""), ),
            egencache_cmd + ("--update", ),
            (lambda: os.path.exists(pms_cache_dir), ),
            (lambda: not os.path.exists(md5_cache_dir), ),
            python_cmd + (textwrap.dedent("""
				import os, sys, portage
				if portage.portdb.porttree_root not in portage.portdb._pregen_auxdb:
					sys.exit(1)
			"""), ),
            python_cmd + (textwrap.dedent("""
				import os, sys, portage
				from portage.cache.metadata import database as pms_database
				if not isinstance(portage.portdb._pregen_auxdb[portage.portdb.porttree_root], pms_database):
					sys.exit(1)
			"""), ),
            (BASH_BINARY, "-c", "echo %s > %s" % tuple(
                map(portage._shell_quote, (
                    "cache-formats = md5-dict pms",
                    layout_conf_path,
                )))),
            egencache_cmd + ("--update", ),
            (lambda: os.path.exists(md5_cache_dir), ),
            python_cmd + (textwrap.dedent("""
				import os, sys, portage
				if portage.portdb.porttree_root not in portage.portdb._pregen_auxdb:
					sys.exit(1)
			"""), ),
            python_cmd + (textwrap.dedent("""
				import os, sys, portage
				from portage.cache.flat_hash import md5_database
				if not isinstance(portage.portdb._pregen_auxdb[portage.portdb.porttree_root], md5_database):
					sys.exit(1)
			"""), ),
            (BASH_BINARY, "-c", "echo %s > %s" % tuple(
                map(portage._shell_quote, (
                    "cache-formats = pms md5-dict",
                    layout_conf_path,
                )))),
            python_cmd + (textwrap.dedent("""
				import os, sys, portage
				if portage.portdb.porttree_root not in portage.portdb._pregen_auxdb:
					sys.exit(1)
			"""), ),
            python_cmd + (textwrap.dedent("""
				import os, sys, portage
				from portage.cache.metadata import database as pms_database
				if not isinstance(portage.portdb._pregen_auxdb[portage.portdb.porttree_root], pms_database):
					sys.exit(1)
			"""), ),

            # Test auto-detection and preference for md5-cache when both
            # cache formats are available but layout.conf is absent.
            (BASH_BINARY, "-c",
             "rm %s" % portage._shell_quote(layout_conf_path)),
            python_cmd + (textwrap.dedent("""
				import os, sys, portage
				if portage.portdb.porttree_root not in portage.portdb._pregen_auxdb:
					sys.exit(1)
			"""), ),
            python_cmd + (textwrap.dedent("""
				import os, sys, portage
				from portage.cache.flat_hash import md5_database
				if not isinstance(portage.portdb._pregen_auxdb[portage.portdb.porttree_root], md5_database):
					sys.exit(1)
			"""), ),
        )

        features = []
        if not portage.process.sandbox_capable or \
         os.environ.get("SANDBOX_ON") == "1":
            features.append("-sandbox")

        make_conf = (
            "FEATURES=\"%s\"\n" % (" ".join(features), ),
            "PORTDIR=\"%s\"\n" % (portdir, ),
            "PORTAGE_GRPNAME=\"%s\"\n" % (os.environ["PORTAGE_GRPNAME"], ),
            "PORTAGE_USERNAME=\"%s\"\n" % (os.environ["PORTAGE_USERNAME"], ),
        )

        pythonpath = os.environ.get("PYTHONPATH")
        if pythonpath is not None and not pythonpath.strip():
            pythonpath = None
        if pythonpath is not None and \
         pythonpath.split(":")[0] == PORTAGE_PYM_PATH:
            pass
        else:
            if pythonpath is None:
                pythonpath = ""
            else:
                pythonpath = ":" + pythonpath
            pythonpath = PORTAGE_PYM_PATH + pythonpath

        env = {
            "PATH": os.environ.get("PATH", ""),
            "PORTAGE_OVERRIDE_EPREFIX": eprefix,
            "PORTAGE_PYTHON": portage_python,
            "PYTHONPATH": pythonpath,
        }

        if "__PORTAGE_TEST_HARDLINK_LOCKS" in os.environ:
            env["__PORTAGE_TEST_HARDLINK_LOCKS"] = \
             os.environ["__PORTAGE_TEST_HARDLINK_LOCKS"]

        dirs = [user_config_dir]

        try:
            for d in dirs:
                ensure_dirs(d)
            with open(os.path.join(user_config_dir, "make.conf"), 'w') as f:
                for line in make_conf:
                    f.write(line)

            if debug:
                # The subprocess inherits both stdout and stderr, for
                # debugging purposes.
                stdout = None
            else:
                # The subprocess inherits stderr so that any warnings
                # triggered by python -Wd will be visible.
                stdout = subprocess.PIPE

            for i, args in enumerate(test_commands):

                if hasattr(args[0], '__call__'):
                    self.assertTrue(args[0](),
                                    "callable at index %s failed" % (i, ))
                    continue

                proc = subprocess.Popen(args, env=env, stdout=stdout)

                if debug:
                    proc.wait()
                else:
                    output = proc.stdout.readlines()
                    proc.wait()
                    proc.stdout.close()
                    if proc.returncode != os.EX_OK:
                        for line in output:
                            sys.stderr.write(_unicode_decode(line))

                self.assertEqual(os.EX_OK, proc.returncode,
                                 "command failed with args %s" % (args, ))
        finally:
            playground.cleanup()
Пример #25
0
def process(mysettings, key, logentries, fulltext):

    if mysettings.get("PORTAGE_LOGDIR"):
        logdir = normalize_path(mysettings["PORTAGE_LOGDIR"])
    else:
        logdir = os.path.join(os.sep, mysettings["EPREFIX"].lstrip(os.sep),
                              "var", "log", "portage")

    if not os.path.isdir(logdir):
        # Only initialize group/mode if the directory doesn't
        # exist, so that we don't override permissions if they
        # were previously set by the administrator.
        # NOTE: These permissions should be compatible with our
        # default logrotate config as discussed in bug 374287.
        uid = -1
        if portage.data.secpass >= 2:
            uid = portage_uid
        ensure_dirs(logdir, uid=uid, gid=portage_gid, mode=0o2770)

    cat, pf = portage.catsplit(key)

    elogfilename = pf + ":" + _unicode_decode(time.strftime(
        "%Y%m%d-%H%M%S", time.gmtime(time.time())),
                                              encoding=_encodings['content'],
                                              errors='replace') + ".log"

    if "split-elog" in mysettings.features:
        log_subdir = os.path.join(logdir, "elog", cat)
        elogfilename = os.path.join(log_subdir, elogfilename)
    else:
        log_subdir = os.path.join(logdir, "elog")
        elogfilename = os.path.join(log_subdir, cat + ':' + elogfilename)
    _ensure_log_subdirs(logdir, log_subdir)

    try:
        with io.open(_unicode_encode(elogfilename,
                                     encoding=_encodings['fs'],
                                     errors='strict'),
                     mode='w',
                     encoding=_encodings['content'],
                     errors='backslashreplace') as elogfile:
            elogfile.write(_unicode_decode(fulltext))
    except IOError as e:
        func_call = "open('%s', 'w')" % elogfilename
        if e.errno == errno.EACCES:
            raise portage.exception.PermissionDenied(func_call)
        elif e.errno == errno.EPERM:
            raise portage.exception.OperationNotPermitted(func_call)
        elif e.errno == errno.EROFS:
            raise portage.exception.ReadOnlyFileSystem(func_call)
        else:
            raise

    # Copy group permission bits from parent directory.
    elogdir_st = os.stat(log_subdir)
    elogdir_gid = elogdir_st.st_gid
    elogdir_grp_mode = 0o060 & elogdir_st.st_mode

    # Copy the uid from the parent directory if we have privileges
    # to do so, for compatibility with our default logrotate
    # config (see bug 378451). With the "su portage portage"
    # directive and logrotate-3.8.0, logrotate's chown call during
    # the compression phase will only succeed if the log file's uid
    # is portage_uid.
    logfile_uid = -1
    if portage.data.secpass >= 2:
        logfile_uid = elogdir_st.st_uid
    apply_permissions(elogfilename,
                      uid=logfile_uid,
                      gid=elogdir_gid,
                      mode=elogdir_grp_mode,
                      mask=0)

    return elogfilename
Пример #26
0
def fetch(myuris, mysettings, listonly=0, fetchonly=0,
	locks_in_subdir=".locks", use_locks=1, try_mirrors=1, digests=None,
	allow_missing_digests=True):
	"fetch files.  Will use digest file if available."

	if not myuris:
		return 1

	features = mysettings.features
	restrict = mysettings.get("PORTAGE_RESTRICT","").split()

	userfetch = secpass >= 2 and "userfetch" in features
	userpriv = secpass >= 2 and "userpriv" in features

	# 'nomirror' is bad/negative logic. You Restrict mirroring, not no-mirroring.
	restrict_mirror = "mirror" in restrict or "nomirror" in restrict
	if restrict_mirror:
		if ("mirror" in features) and ("lmirror" not in features):
			# lmirror should allow you to bypass mirror restrictions.
			# XXX: This is not a good thing, and is temporary at best.
			print(_(">>> \"mirror\" mode desired and \"mirror\" restriction found; skipping fetch."))
			return 1

	# Generally, downloading the same file repeatedly from
	# every single available mirror is a waste of bandwidth
	# and time, so there needs to be a cap.
	checksum_failure_max_tries = 5
	v = checksum_failure_max_tries
	try:
		v = int(mysettings.get("PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS",
			checksum_failure_max_tries))
	except (ValueError, OverflowError):
		writemsg(_("!!! Variable PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS"
			" contains non-integer value: '%s'\n") % \
			mysettings["PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS"], noiselevel=-1)
		writemsg(_("!!! Using PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS "
			"default value: %s\n") % checksum_failure_max_tries,
			noiselevel=-1)
		v = checksum_failure_max_tries
	if v < 1:
		writemsg(_("!!! Variable PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS"
			" contains value less than 1: '%s'\n") % v, noiselevel=-1)
		writemsg(_("!!! Using PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS "
			"default value: %s\n") % checksum_failure_max_tries,
			noiselevel=-1)
		v = checksum_failure_max_tries
	checksum_failure_max_tries = v
	del v

	fetch_resume_size_default = "350K"
	fetch_resume_size = mysettings.get("PORTAGE_FETCH_RESUME_MIN_SIZE")
	if fetch_resume_size is not None:
		fetch_resume_size = "".join(fetch_resume_size.split())
		if not fetch_resume_size:
			# If it's undefined or empty, silently use the default.
			fetch_resume_size = fetch_resume_size_default
		match = _fetch_resume_size_re.match(fetch_resume_size)
		if match is None or \
			(match.group(2).upper() not in _size_suffix_map):
			writemsg(_("!!! Variable PORTAGE_FETCH_RESUME_MIN_SIZE"
				" contains an unrecognized format: '%s'\n") % \
				mysettings["PORTAGE_FETCH_RESUME_MIN_SIZE"], noiselevel=-1)
			writemsg(_("!!! Using PORTAGE_FETCH_RESUME_MIN_SIZE "
				"default value: %s\n") % fetch_resume_size_default,
				noiselevel=-1)
			fetch_resume_size = None
	if fetch_resume_size is None:
		fetch_resume_size = fetch_resume_size_default
		match = _fetch_resume_size_re.match(fetch_resume_size)
	fetch_resume_size = int(match.group(1)) * \
		2 ** _size_suffix_map[match.group(2).upper()]

	# Behave like the package has RESTRICT="primaryuri" after a
	# couple of checksum failures, to increase the probablility
	# of success before checksum_failure_max_tries is reached.
	checksum_failure_primaryuri = 2
	thirdpartymirrors = mysettings.thirdpartymirrors()

	# In the background parallel-fetch process, it's safe to skip checksum
	# verification of pre-existing files in $DISTDIR that have the correct
	# file size. The parent process will verify their checksums prior to
	# the unpack phase.

	parallel_fetchonly = "PORTAGE_PARALLEL_FETCHONLY" in mysettings
	if parallel_fetchonly:
		fetchonly = 1

	check_config_instance(mysettings)

	custommirrors = grabdict(os.path.join(mysettings["PORTAGE_CONFIGROOT"],
		CUSTOM_MIRRORS_FILE), recursive=1)

	mymirrors=[]

	if listonly or ("distlocks" not in features):
		use_locks = 0

	fetch_to_ro = 0
	if "skiprocheck" in features:
		fetch_to_ro = 1

	if not os.access(mysettings["DISTDIR"],os.W_OK) and fetch_to_ro:
		if use_locks:
			writemsg(colorize("BAD",
				_("!!! For fetching to a read-only filesystem, "
				"locking should be turned off.\n")), noiselevel=-1)
			writemsg(_("!!! This can be done by adding -distlocks to "
				"FEATURES in /etc/make.conf\n"), noiselevel=-1)
#			use_locks = 0

	# local mirrors are always added
	if "local" in custommirrors:
		mymirrors += custommirrors["local"]

	if restrict_mirror:
		# We don't add any mirrors.
		pass
	else:
		if try_mirrors:
			mymirrors += [x.rstrip("/") for x in mysettings["GENTOO_MIRRORS"].split() if x]

	hash_filter = _hash_filter(mysettings.get("PORTAGE_CHECKSUM_FILTER", ""))
	if hash_filter.transparent:
		hash_filter = None
	skip_manifest = mysettings.get("EBUILD_SKIP_MANIFEST") == "1"
	if skip_manifest:
		allow_missing_digests = True
	pkgdir = mysettings.get("O")
	if digests is None and not (pkgdir is None or skip_manifest):
		mydigests = mysettings.repositories.get_repo_for_location(
			os.path.dirname(os.path.dirname(pkgdir))).load_manifest(
			pkgdir, mysettings["DISTDIR"]).getTypeDigests("DIST")
	elif digests is None or skip_manifest:
		# no digests because fetch was not called for a specific package
		mydigests = {}
	else:
		mydigests = digests

	ro_distdirs = [x for x in \
		shlex_split(mysettings.get("PORTAGE_RO_DISTDIRS", "")) \
		if os.path.isdir(x)]

	fsmirrors = []
	for x in range(len(mymirrors)-1,-1,-1):
		if mymirrors[x] and mymirrors[x][0]=='/':
			fsmirrors += [mymirrors[x]]
			del mymirrors[x]

	restrict_fetch = "fetch" in restrict
	force_mirror = "force-mirror" in features and not restrict_mirror
	custom_local_mirrors = custommirrors.get("local", [])
	if restrict_fetch:
		# With fetch restriction, a normal uri may only be fetched from
		# custom local mirrors (if available).  A mirror:// uri may also
		# be fetched from specific mirrors (effectively overriding fetch
		# restriction, but only for specific mirrors).
		locations = custom_local_mirrors
	else:
		locations = mymirrors

	file_uri_tuples = []
	# Check for 'items' attribute since OrderedDict is not a dict.
	if hasattr(myuris, 'items'):
		for myfile, uri_set in myuris.items():
			for myuri in uri_set:
				file_uri_tuples.append((myfile, myuri))
	else:
		for myuri in myuris:
			file_uri_tuples.append((os.path.basename(myuri), myuri))

	filedict = OrderedDict()
	primaryuri_dict = {}
	thirdpartymirror_uris = {}
	for myfile, myuri in file_uri_tuples:
		if myfile not in filedict:
			filedict[myfile]=[]
			for y in range(0,len(locations)):
				filedict[myfile].append(locations[y]+"/distfiles/"+myfile)
		if myuri[:9]=="mirror://":
			eidx = myuri.find("/", 9)
			if eidx != -1:
				mirrorname = myuri[9:eidx]
				path = myuri[eidx+1:]

				# Try user-defined mirrors first
				if mirrorname in custommirrors:
					for cmirr in custommirrors[mirrorname]:
						filedict[myfile].append(
							cmirr.rstrip("/") + "/" + path)

				# now try the official mirrors
				if mirrorname in thirdpartymirrors:
					uris = [locmirr.rstrip("/") + "/" + path \
						for locmirr in thirdpartymirrors[mirrorname]]
					random.shuffle(uris)
					filedict[myfile].extend(uris)
					thirdpartymirror_uris.setdefault(myfile, []).extend(uris)

				if not filedict[myfile]:
					writemsg(_("No known mirror by the name: %s\n") % (mirrorname))
			else:
				writemsg(_("Invalid mirror definition in SRC_URI:\n"), noiselevel=-1)
				writemsg("  %s\n" % (myuri), noiselevel=-1)
		else:
			if restrict_fetch or force_mirror:
				# Only fetch from specific mirrors is allowed.
				continue
			primaryuris = primaryuri_dict.get(myfile)
			if primaryuris is None:
				primaryuris = []
				primaryuri_dict[myfile] = primaryuris
			primaryuris.append(myuri)

	# Order primaryuri_dict values to match that in SRC_URI.
	for uris in primaryuri_dict.values():
		uris.reverse()

	# Prefer thirdpartymirrors over normal mirrors in cases when
	# the file does not yet exist on the normal mirrors.
	for myfile, uris in thirdpartymirror_uris.items():
		primaryuri_dict.setdefault(myfile, []).extend(uris)

	# Now merge primaryuri values into filedict (includes mirrors
	# explicitly referenced in SRC_URI).
	if "primaryuri" in restrict:
		for myfile, uris in filedict.items():
			filedict[myfile] = primaryuri_dict.get(myfile, []) + uris
	else:
		for myfile in filedict:
			filedict[myfile] += primaryuri_dict.get(myfile, [])

	can_fetch=True

	if listonly:
		can_fetch = False

	if can_fetch and not fetch_to_ro:
		global _userpriv_test_write_file_cache
		dirmode  = 0o070
		filemode =   0o60
		modemask =    0o2
		dir_gid = portage_gid
		if "FAKED_MODE" in mysettings:
			# When inside fakeroot, directories with portage's gid appear
			# to have root's gid. Therefore, use root's gid instead of
			# portage's gid to avoid spurrious permissions adjustments
			# when inside fakeroot.
			dir_gid = 0
		distdir_dirs = [""]
		try:
			
			for x in distdir_dirs:
				mydir = os.path.join(mysettings["DISTDIR"], x)
				write_test_file = os.path.join(
					mydir, ".__portage_test_write__")

				try:
					st = os.stat(mydir)
				except OSError:
					st = None

				if st is not None and stat.S_ISDIR(st.st_mode):
					if not (userfetch or userpriv):
						continue
					if _userpriv_test_write_file(mysettings, write_test_file):
						continue

				_userpriv_test_write_file_cache.pop(write_test_file, None)
				if ensure_dirs(mydir, gid=dir_gid, mode=dirmode, mask=modemask):
					if st is None:
						# The directory has just been created
						# and therefore it must be empty.
						continue
					writemsg(_("Adjusting permissions recursively: '%s'\n") % mydir,
						noiselevel=-1)
					def onerror(e):
						raise # bail out on the first error that occurs during recursion
					if not apply_recursive_permissions(mydir,
						gid=dir_gid, dirmode=dirmode, dirmask=modemask,
						filemode=filemode, filemask=modemask, onerror=onerror):
						raise OperationNotPermitted(
							_("Failed to apply recursive permissions for the portage group."))
		except PortageException as e:
			if not os.path.isdir(mysettings["DISTDIR"]):
				writemsg("!!! %s\n" % str(e), noiselevel=-1)
				writemsg(_("!!! Directory Not Found: DISTDIR='%s'\n") % mysettings["DISTDIR"], noiselevel=-1)
				writemsg(_("!!! Fetching will fail!\n"), noiselevel=-1)

	if can_fetch and \
		not fetch_to_ro and \
		not os.access(mysettings["DISTDIR"], os.W_OK):
		writemsg(_("!!! No write access to '%s'\n") % mysettings["DISTDIR"],
			noiselevel=-1)
		can_fetch = False

	distdir_writable = can_fetch and not fetch_to_ro
	failed_files = set()
	restrict_fetch_msg = False

	for myfile in filedict:
		"""
		fetched  status
		0        nonexistent
		1        partially downloaded
		2        completely downloaded
		"""
		fetched = 0

		orig_digests = mydigests.get(myfile, {})

		if not (allow_missing_digests or listonly):
			verifiable_hash_types = set(orig_digests).intersection(hashfunc_map)
			verifiable_hash_types.discard("size")
			if not verifiable_hash_types:
				expected = set(hashfunc_map)
				expected.discard("size")
				expected = " ".join(sorted(expected))
				got = set(orig_digests)
				got.discard("size")
				got = " ".join(sorted(got))
				reason = (_("Insufficient data for checksum verification"),
					got, expected)
				writemsg(_("!!! Fetched file: %s VERIFY FAILED!\n") % myfile,
					noiselevel=-1)
				writemsg(_("!!! Reason: %s\n") % reason[0],
					noiselevel=-1)
				writemsg(_("!!! Got:      %s\n!!! Expected: %s\n") % \
					(reason[1], reason[2]), noiselevel=-1)

				if fetchonly:
					failed_files.add(myfile)
					continue
				else:
					return 0

		size = orig_digests.get("size")
		if size == 0:
			# Zero-byte distfiles are always invalid, so discard their digests.
			del mydigests[myfile]
			orig_digests.clear()
			size = None
		pruned_digests = orig_digests
		if parallel_fetchonly:
			pruned_digests = {}
			if size is not None:
				pruned_digests["size"] = size

		myfile_path = os.path.join(mysettings["DISTDIR"], myfile)
		has_space = True
		has_space_superuser = True
		file_lock = None
		if listonly:
			writemsg_stdout("\n", noiselevel=-1)
		else:
			# check if there is enough space in DISTDIR to completely store myfile
			# overestimate the filesize so we aren't bitten by FS overhead
			vfs_stat = None
			if size is not None and hasattr(os, "statvfs"):
				try:
					vfs_stat = os.statvfs(mysettings["DISTDIR"])
				except OSError as e:
					writemsg_level("!!! statvfs('%s'): %s\n" %
						(mysettings["DISTDIR"], e),
						noiselevel=-1, level=logging.ERROR)
					del e

			if vfs_stat is not None:
				try:
					mysize = os.stat(myfile_path).st_size
				except OSError as e:
					if e.errno not in (errno.ENOENT, errno.ESTALE):
						raise
					del e
					mysize = 0
				if (size - mysize + vfs_stat.f_bsize) >= \
					(vfs_stat.f_bsize * vfs_stat.f_bavail):

					if (size - mysize + vfs_stat.f_bsize) >= \
						(vfs_stat.f_bsize * vfs_stat.f_bfree):
						has_space_superuser = False

					if not has_space_superuser:
						has_space = False
					elif secpass < 2:
						has_space = False
					elif userfetch:
						has_space = False

			if distdir_writable and use_locks:

				lock_kwargs = {}
				if fetchonly:
					lock_kwargs["flags"] = os.O_NONBLOCK

				try:
					file_lock = lockfile(myfile_path,
						wantnewlockfile=1, **lock_kwargs)
				except TryAgain:
					writemsg(_(">>> File '%s' is already locked by "
						"another fetcher. Continuing...\n") % myfile,
						noiselevel=-1)
					continue
		try:
			if not listonly:

				eout = EOutput()
				eout.quiet = mysettings.get("PORTAGE_QUIET") == "1"
				match, mystat = _check_distfile(
					myfile_path, pruned_digests, eout, hash_filter=hash_filter)
				if match:
					# Skip permission adjustment for symlinks, since we don't
					# want to modify anything outside of the primary DISTDIR,
					# and symlinks typically point to PORTAGE_RO_DISTDIRS.
					if distdir_writable and not os.path.islink(myfile_path):
						try:
							apply_secpass_permissions(myfile_path,
								gid=portage_gid, mode=0o664, mask=0o2,
								stat_cached=mystat)
						except PortageException as e:
							if not os.access(myfile_path, os.R_OK):
								writemsg(_("!!! Failed to adjust permissions:"
									" %s\n") % str(e), noiselevel=-1)
							del e
					continue

				if distdir_writable and mystat is None:
					# Remove broken symlinks if necessary.
					try:
						os.unlink(myfile_path)
					except OSError:
						pass

				if mystat is not None:
					if stat.S_ISDIR(mystat.st_mode):
						writemsg_level(
							_("!!! Unable to fetch file since "
							"a directory is in the way: \n"
							"!!!   %s\n") % myfile_path,
							level=logging.ERROR, noiselevel=-1)
						return 0

					if mystat.st_size == 0:
						if distdir_writable:
							try:
								os.unlink(myfile_path)
							except OSError:
								pass
					elif distdir_writable:
						if mystat.st_size < fetch_resume_size and \
							mystat.st_size < size:
							# If the file already exists and the size does not
							# match the existing digests, it may be that the
							# user is attempting to update the digest. In this
							# case, the digestgen() function will advise the
							# user to use `ebuild --force foo.ebuild manifest`
							# in order to force the old digests to be replaced.
							# Since the user may want to keep this file, rename
							# it instead of deleting it.
							writemsg(_(">>> Renaming distfile with size "
								"%d (smaller than " "PORTAGE_FETCH_RESU"
								"ME_MIN_SIZE)\n") % mystat.st_size)
							temp_filename = \
								_checksum_failure_temp_file(
								mysettings["DISTDIR"], myfile)
							writemsg_stdout(_("Refetching... "
								"File renamed to '%s'\n\n") % \
								temp_filename, noiselevel=-1)
						elif mystat.st_size >= size:
							temp_filename = \
								_checksum_failure_temp_file(
								mysettings["DISTDIR"], myfile)
							writemsg_stdout(_("Refetching... "
								"File renamed to '%s'\n\n") % \
								temp_filename, noiselevel=-1)

				if distdir_writable and ro_distdirs:
					readonly_file = None
					for x in ro_distdirs:
						filename = os.path.join(x, myfile)
						match, mystat = _check_distfile(
							filename, pruned_digests, eout, hash_filter=hash_filter)
						if match:
							readonly_file = filename
							break
					if readonly_file is not None:
						try:
							os.unlink(myfile_path)
						except OSError as e:
							if e.errno not in (errno.ENOENT, errno.ESTALE):
								raise
							del e
						os.symlink(readonly_file, myfile_path)
						continue

				# this message is shown only after we know that
				# the file is not already fetched
				if not has_space:
					writemsg(_("!!! Insufficient space to store %s in %s\n") % \
						(myfile, mysettings["DISTDIR"]), noiselevel=-1)

					if has_space_superuser:
						writemsg(_("!!! Insufficient privileges to use "
							"remaining space.\n"), noiselevel=-1)
						if userfetch:
							writemsg(_("!!! You may set FEATURES=\"-userfetch\""
								" in /etc/make.conf in order to fetch with\n"
								"!!! superuser privileges.\n"), noiselevel=-1)

				if fsmirrors and not os.path.exists(myfile_path) and has_space:
					for mydir in fsmirrors:
						mirror_file = os.path.join(mydir, myfile)
						try:
							shutil.copyfile(mirror_file, myfile_path)
							writemsg(_("Local mirror has file: %s\n") % myfile)
							break
						except (IOError, OSError) as e:
							if e.errno not in (errno.ENOENT, errno.ESTALE):
								raise
							del e

				try:
					mystat = os.stat(myfile_path)
				except OSError as e:
					if e.errno not in (errno.ENOENT, errno.ESTALE):
						raise
					del e
				else:
					# Skip permission adjustment for symlinks, since we don't
					# want to modify anything outside of the primary DISTDIR,
					# and symlinks typically point to PORTAGE_RO_DISTDIRS.
					if not os.path.islink(myfile_path):
						try:
							apply_secpass_permissions(myfile_path,
								gid=portage_gid, mode=0o664, mask=0o2,
								stat_cached=mystat)
						except PortageException as e:
							if not os.access(myfile_path, os.R_OK):
								writemsg(_("!!! Failed to adjust permissions:"
									" %s\n") % (e,), noiselevel=-1)

					# If the file is empty then it's obviously invalid. Remove
					# the empty file and try to download if possible.
					if mystat.st_size == 0:
						if distdir_writable:
							try:
								os.unlink(myfile_path)
							except EnvironmentError:
								pass
					elif myfile not in mydigests:
						# We don't have a digest, but the file exists.  We must
						# assume that it is fully downloaded.
						continue
					else:
						if mystat.st_size < mydigests[myfile]["size"] and \
							not restrict_fetch:
							fetched = 1 # Try to resume this download.
						elif parallel_fetchonly and \
							mystat.st_size == mydigests[myfile]["size"]:
							eout = EOutput()
							eout.quiet = \
								mysettings.get("PORTAGE_QUIET") == "1"
							eout.ebegin(
								"%s size ;-)" % (myfile, ))
							eout.eend(0)
							continue
						else:
							digests = _filter_unaccelarated_hashes(mydigests[myfile])
							if hash_filter is not None:
								digests = _apply_hash_filter(digests, hash_filter)
							verified_ok, reason = verify_all(myfile_path, digests)
							if not verified_ok:
								writemsg(_("!!! Previously fetched"
									" file: '%s'\n") % myfile, noiselevel=-1)
								writemsg(_("!!! Reason: %s\n") % reason[0],
									noiselevel=-1)
								writemsg(_("!!! Got:      %s\n"
									"!!! Expected: %s\n") % \
									(reason[1], reason[2]), noiselevel=-1)
								if reason[0] == _("Insufficient data for checksum verification"):
									return 0
								if distdir_writable:
									temp_filename = \
										_checksum_failure_temp_file(
										mysettings["DISTDIR"], myfile)
									writemsg_stdout(_("Refetching... "
										"File renamed to '%s'\n\n") % \
										temp_filename, noiselevel=-1)
							else:
								eout = EOutput()
								eout.quiet = \
									mysettings.get("PORTAGE_QUIET", None) == "1"
								if digests:
									digests = list(digests)
									digests.sort()
									eout.ebegin(
										"%s %s ;-)" % (myfile, " ".join(digests)))
									eout.eend(0)
								continue # fetch any remaining files

			# Create a reversed list since that is optimal for list.pop().
			uri_list = filedict[myfile][:]
			uri_list.reverse()
			checksum_failure_count = 0
			tried_locations = set()
			while uri_list:
				loc = uri_list.pop()
				# Eliminate duplicates here in case we've switched to
				# "primaryuri" mode on the fly due to a checksum failure.
				if loc in tried_locations:
					continue
				tried_locations.add(loc)
				if listonly:
					writemsg_stdout(loc+" ", noiselevel=-1)
					continue
				# allow different fetchcommands per protocol
				protocol = loc[0:loc.find("://")]

				global_config_path = GLOBAL_CONFIG_PATH
				if mysettings['EPREFIX']:
					global_config_path = os.path.join(mysettings['EPREFIX'],
							GLOBAL_CONFIG_PATH.lstrip(os.sep))

				missing_file_param = False
				fetchcommand_var = "FETCHCOMMAND_" + protocol.upper()
				fetchcommand = mysettings.get(fetchcommand_var)
				if fetchcommand is None:
					fetchcommand_var = "FETCHCOMMAND"
					fetchcommand = mysettings.get(fetchcommand_var)
					if fetchcommand is None:
						writemsg_level(
							_("!!! %s is unset. It should "
							"have been defined in\n!!! %s/make.globals.\n") \
							% (fetchcommand_var, global_config_path),
							level=logging.ERROR, noiselevel=-1)
						return 0
				if "${FILE}" not in fetchcommand:
					writemsg_level(
						_("!!! %s does not contain the required ${FILE}"
						" parameter.\n") % fetchcommand_var,
						level=logging.ERROR, noiselevel=-1)
					missing_file_param = True

				resumecommand_var = "RESUMECOMMAND_" + protocol.upper()
				resumecommand = mysettings.get(resumecommand_var)
				if resumecommand is None:
					resumecommand_var = "RESUMECOMMAND"
					resumecommand = mysettings.get(resumecommand_var)
					if resumecommand is None:
						writemsg_level(
							_("!!! %s is unset. It should "
							"have been defined in\n!!! %s/make.globals.\n") \
							% (resumecommand_var, global_config_path),
							level=logging.ERROR, noiselevel=-1)
						return 0
				if "${FILE}" not in resumecommand:
					writemsg_level(
						_("!!! %s does not contain the required ${FILE}"
						" parameter.\n") % resumecommand_var,
						level=logging.ERROR, noiselevel=-1)
					missing_file_param = True

				if missing_file_param:
					writemsg_level(
						_("!!! Refer to the make.conf(5) man page for "
						"information about how to\n!!! correctly specify "
						"FETCHCOMMAND and RESUMECOMMAND.\n"),
						level=logging.ERROR, noiselevel=-1)
					if myfile != os.path.basename(loc):
						return 0

				if not can_fetch:
					if fetched != 2:
						try:
							mysize = os.stat(myfile_path).st_size
						except OSError as e:
							if e.errno not in (errno.ENOENT, errno.ESTALE):
								raise
							del e
							mysize = 0

						if mysize == 0:
							writemsg(_("!!! File %s isn't fetched but unable to get it.\n") % myfile,
								noiselevel=-1)
						elif size is None or size > mysize:
							writemsg(_("!!! File %s isn't fully fetched, but unable to complete it\n") % myfile,
								noiselevel=-1)
						else:
							writemsg(_("!!! File %s is incorrect size, "
								"but unable to retry.\n") % myfile, noiselevel=-1)
						return 0
					else:
						continue

				if fetched != 2 and has_space:
					#we either need to resume or start the download
					if fetched == 1:
						try:
							mystat = os.stat(myfile_path)
						except OSError as e:
							if e.errno not in (errno.ENOENT, errno.ESTALE):
								raise
							del e
							fetched = 0
						else:
							if mystat.st_size < fetch_resume_size:
								writemsg(_(">>> Deleting distfile with size "
									"%d (smaller than " "PORTAGE_FETCH_RESU"
									"ME_MIN_SIZE)\n") % mystat.st_size)
								try:
									os.unlink(myfile_path)
								except OSError as e:
									if e.errno not in \
										(errno.ENOENT, errno.ESTALE):
										raise
									del e
								fetched = 0
					if fetched == 1:
						#resume mode:
						writemsg(_(">>> Resuming download...\n"))
						locfetch=resumecommand
						command_var = resumecommand_var
					else:
						#normal mode:
						locfetch=fetchcommand
						command_var = fetchcommand_var
					writemsg_stdout(_(">>> Downloading '%s'\n") % \
						_hide_url_passwd(loc))
					variables = {
						"DISTDIR": mysettings["DISTDIR"],
						"URI":     loc,
						"FILE":    myfile
					}

					myfetch = shlex_split(locfetch)
					myfetch = [varexpand(x, mydict=variables) for x in myfetch]
					myret = -1
					try:

						myret = _spawn_fetch(mysettings, myfetch)

					finally:
						try:
							apply_secpass_permissions(myfile_path,
								gid=portage_gid, mode=0o664, mask=0o2)
						except FileNotFound:
							pass
						except PortageException as e:
							if not os.access(myfile_path, os.R_OK):
								writemsg(_("!!! Failed to adjust permissions:"
									" %s\n") % str(e), noiselevel=-1)
							del e

					# If the file is empty then it's obviously invalid.  Don't
					# trust the return value from the fetcher.  Remove the
					# empty file and try to download again.
					try:
						if os.stat(myfile_path).st_size == 0:
							os.unlink(myfile_path)
							fetched = 0
							continue
					except EnvironmentError:
						pass

					if mydigests is not None and myfile in mydigests:
						try:
							mystat = os.stat(myfile_path)
						except OSError as e:
							if e.errno not in (errno.ENOENT, errno.ESTALE):
								raise
							del e
							fetched = 0
						else:

							if stat.S_ISDIR(mystat.st_mode):
								# This can happen if FETCHCOMMAND erroneously
								# contains wget's -P option where it should
								# instead have -O.
								writemsg_level(
									_("!!! The command specified in the "
									"%s variable appears to have\n!!! "
									"created a directory instead of a "
									"normal file.\n") % command_var,
									level=logging.ERROR, noiselevel=-1)
								writemsg_level(
									_("!!! Refer to the make.conf(5) "
									"man page for information about how "
									"to\n!!! correctly specify "
									"FETCHCOMMAND and RESUMECOMMAND.\n"),
									level=logging.ERROR, noiselevel=-1)
								return 0

							# no exception?  file exists. let digestcheck() report
							# an appropriately for size or checksum errors

							# If the fetcher reported success and the file is
							# too small, it's probably because the digest is
							# bad (upstream changed the distfile).  In this
							# case we don't want to attempt to resume. Show a
							# digest verification failure to that the user gets
							# a clue about what just happened.
							if myret != os.EX_OK and \
								mystat.st_size < mydigests[myfile]["size"]:
								# Fetch failed... Try the next one... Kill 404 files though.
								if (mystat[stat.ST_SIZE]<100000) and (len(myfile)>4) and not ((myfile[-5:]==".html") or (myfile[-4:]==".htm")):
									html404=re.compile("<title>.*(not found|404).*</title>",re.I|re.M)
									with io.open(
										_unicode_encode(myfile_path,
										encoding=_encodings['fs'], errors='strict'),
										mode='r', encoding=_encodings['content'], errors='replace'
										) as f:
										if html404.search(f.read()):
											try:
												os.unlink(mysettings["DISTDIR"]+"/"+myfile)
												writemsg(_(">>> Deleting invalid distfile. (Improper 404 redirect from server.)\n"))
												fetched = 0
												continue
											except (IOError, OSError):
												pass
								fetched = 1
								continue
							if True:
								# File is the correct size--check the checksums for the fetched
								# file NOW, for those users who don't have a stable/continuous
								# net connection. This way we have a chance to try to download
								# from another mirror...
								digests = _filter_unaccelarated_hashes(mydigests[myfile])
								if hash_filter is not None:
									digests = _apply_hash_filter(digests, hash_filter)
								verified_ok, reason = verify_all(myfile_path, digests)
								if not verified_ok:
									writemsg(_("!!! Fetched file: %s VERIFY FAILED!\n") % myfile,
										noiselevel=-1)
									writemsg(_("!!! Reason: %s\n") % reason[0],
										noiselevel=-1)
									writemsg(_("!!! Got:      %s\n!!! Expected: %s\n") % \
										(reason[1], reason[2]), noiselevel=-1)
									if reason[0] == _("Insufficient data for checksum verification"):
										return 0
									temp_filename = \
										_checksum_failure_temp_file(
										mysettings["DISTDIR"], myfile)
									writemsg_stdout(_("Refetching... "
										"File renamed to '%s'\n\n") % \
										temp_filename, noiselevel=-1)
									fetched=0
									checksum_failure_count += 1
									if checksum_failure_count == \
										checksum_failure_primaryuri:
										# Switch to "primaryuri" mode in order
										# to increase the probablility of
										# of success.
										primaryuris = \
											primaryuri_dict.get(myfile)
										if primaryuris:
											uri_list.extend(
												reversed(primaryuris))
									if checksum_failure_count >= \
										checksum_failure_max_tries:
										break
								else:
									eout = EOutput()
									eout.quiet = mysettings.get("PORTAGE_QUIET", None) == "1"
									if digests:
										eout.ebegin("%s %s ;-)" % \
											(myfile, " ".join(sorted(digests))))
										eout.eend(0)
									fetched=2
									break
					else:
						if not myret:
							fetched=2
							break
						elif mydigests!=None:
							writemsg(_("No digest file available and download failed.\n\n"),
								noiselevel=-1)
		finally:
			if use_locks and file_lock:
				unlockfile(file_lock)
				file_lock = None

		if listonly:
			writemsg_stdout("\n", noiselevel=-1)
		if fetched != 2:
			if restrict_fetch and not restrict_fetch_msg:
				restrict_fetch_msg = True
				msg = _("\n!!! %s/%s"
					" has fetch restriction turned on.\n"
					"!!! This probably means that this "
					"ebuild's files must be downloaded\n"
					"!!! manually.  See the comments in"
					" the ebuild for more information.\n\n") % \
					(mysettings["CATEGORY"], mysettings["PF"])
				writemsg_level(msg,
					level=logging.ERROR, noiselevel=-1)
			elif restrict_fetch:
				pass
			elif listonly:
				pass
			elif not filedict[myfile]:
				writemsg(_("Warning: No mirrors available for file"
					" '%s'\n") % (myfile), noiselevel=-1)
			else:
				writemsg(_("!!! Couldn't download '%s'. Aborting.\n") % myfile,
					noiselevel=-1)

			if listonly:
				failed_files.add(myfile)
				continue
			elif fetchonly:
				failed_files.add(myfile)
				continue
			return 0
	if failed_files:
		return 0
	return 1
Пример #27
0
    def testSyncLocal(self):
        debug = False

        skip_reason = self._must_skip()
        if skip_reason:
            self.portage_skip = skip_reason
            self.assertFalse(True, skip_reason)
            return

        repos_conf = textwrap.dedent("""
			[DEFAULT]
			%(default_keys)s
			[test_repo]
			location = %(EPREFIX)s/var/repositories/test_repo
			sync-type = %(sync-type)s
			sync-uri = file://%(EPREFIX)s/var/repositories/test_repo_sync
			auto-sync = %(auto-sync)s
			%(repo_extra_keys)s
		""")

        profile = {
            "eapi": ("5", ),
            "package.use.stable.mask": ("dev-libs/A flag", )
        }

        ebuilds = {"dev-libs/A-0": {}}

        user_config = {'make.conf': ('FEATURES="metadata-transfer"', )}

        playground = ResolverPlayground(ebuilds=ebuilds,
                                        profile=profile,
                                        user_config=user_config,
                                        debug=debug)
        settings = playground.settings
        eprefix = settings["EPREFIX"]
        eroot = settings["EROOT"]
        homedir = os.path.join(eroot, "home")
        distdir = os.path.join(eprefix, "distdir")
        repo = settings.repositories["test_repo"]
        metadata_dir = os.path.join(repo.location, "metadata")

        cmds = {}
        for cmd in ("emerge", "emaint"):
            for bindir in (self.bindir, self.sbindir):
                path = os.path.join(bindir, cmd)
                if os.path.exists(path):
                    cmds[cmd] = (portage._python_interpreter, "-b", "-Wd",
                                 path)
                    break
            else:
                raise AssertionError('%s binary not found in %s or %s' %
                                     (cmd, self.bindir, self.sbindir))

        git_binary = find_binary("git")
        git_cmd = (git_binary, )

        committer_name = "Gentoo Dev"
        committer_email = "*****@*****.**"

        def repos_set_conf(sync_type,
                           dflt_keys=None,
                           xtra_keys=None,
                           auto_sync="yes"):
            env["PORTAGE_REPOSITORIES"] = repos_conf % {\
             "EPREFIX": eprefix, "sync-type": sync_type,
             "auto-sync": auto_sync,
             "default_keys": "" if dflt_keys is None else dflt_keys,
             "repo_extra_keys": "" if xtra_keys is None else xtra_keys}

        def alter_ebuild():
            with open(
                    os.path.join(repo.location + "_sync", "dev-libs", "A",
                                 "A-0.ebuild"), "a") as f:
                f.write("\n")
            os.unlink(os.path.join(metadata_dir, 'timestamp.chk'))

        sync_cmds = (
            (homedir, cmds["emerge"] + ("--sync", )),
            (homedir, lambda: self.assertTrue(
                os.path.exists(os.path.join(repo.location, "dev-libs", "A")),
                "dev-libs/A expected, but missing")),
            (homedir, cmds["emaint"] + ("sync", "-A")),
        )

        sync_cmds_auto_sync = (
            (homedir, lambda: repos_set_conf("rsync", auto_sync="no")),
            (homedir, cmds["emerge"] + ("--sync", )),
            (homedir, lambda: self.assertFalse(
                os.path.exists(os.path.join(repo.location, "dev-libs", "A")),
                "dev-libs/A found, expected missing")),
            (homedir, lambda: repos_set_conf("rsync", auto_sync="yes")),
        )

        rename_repo = (
            (homedir,
             lambda: os.rename(repo.location, repo.location + "_sync")), )

        rsync_opts_repos = (
            (homedir, alter_ebuild),
            (homedir, lambda: repos_set_conf(
                "rsync", None,
                "sync-rsync-extra-opts = --backup --backup-dir=%s" %
                _shell_quote(repo.location + "_back"))),
            (homedir, cmds['emerge'] + ("--sync", )),
            (homedir,
             lambda: self.assertTrue(os.path.exists(repo.location + "_back"))),
            (homedir, lambda: shutil.rmtree(repo.location + "_back")),
            (homedir, lambda: repos_set_conf("rsync")),
        )

        rsync_opts_repos_default = (
            (homedir, alter_ebuild),
            (homedir, lambda: repos_set_conf(
                "rsync", "sync-rsync-extra-opts = --backup --backup-dir=%s" %
                _shell_quote(repo.location + "_back"))),
            (homedir, cmds['emerge'] + ("--sync", )),
            (homedir,
             lambda: self.assertTrue(os.path.exists(repo.location + "_back"))),
            (homedir, lambda: shutil.rmtree(repo.location + "_back")),
            (homedir, lambda: repos_set_conf("rsync")),
        )

        rsync_opts_repos_default_ovr = (
            (homedir, alter_ebuild),
            (homedir, lambda: repos_set_conf(
                "rsync", "sync-rsync-extra-opts = --backup --backup-dir=%s" %
                _shell_quote(repo.location + "_back_nowhere"),
                "sync-rsync-extra-opts = --backup --backup-dir=%s" %
                _shell_quote(repo.location + "_back"))),
            (homedir, cmds['emerge'] + ("--sync", )),
            (homedir,
             lambda: self.assertTrue(os.path.exists(repo.location + "_back"))),
            (homedir, lambda: shutil.rmtree(repo.location + "_back")),
            (homedir, lambda: repos_set_conf("rsync")),
        )

        rsync_opts_repos_default_cancel = (
            (homedir, alter_ebuild),
            (homedir, lambda: repos_set_conf(
                "rsync", "sync-rsync-extra-opts = --backup --backup-dir=%s" %
                _shell_quote(repo.location + "_back_nowhere"
                             ), "sync-rsync-extra-opts = ")),
            (homedir, cmds['emerge'] + ("--sync", )),
            (homedir,
             lambda: self.assertFalse(os.path.exists(repo.location + "_back"))
             ),
            (homedir, lambda: repos_set_conf("rsync")),
        )

        delete_sync_repo = ((homedir,
                             lambda: shutil.rmtree(repo.location + "_sync")), )

        git_repo_create = (
            (repo.location, git_cmd + (
                "config",
                "--global",
                "user.name",
                committer_name,
            )),
            (repo.location, git_cmd + (
                "config",
                "--global",
                "user.email",
                committer_email,
            )),
            (repo.location, git_cmd + ("init-db", )),
            (repo.location, git_cmd + ("add", ".")),
            (repo.location,
             git_cmd + ("commit", "-a", "-m", "add whole repo")),
        )

        sync_type_git = ((homedir, lambda: repos_set_conf("git")), )

        pythonpath = os.environ.get("PYTHONPATH")
        if pythonpath is not None and not pythonpath.strip():
            pythonpath = None
        if pythonpath is not None and \
         pythonpath.split(":")[0] == PORTAGE_PYM_PATH:
            pass
        else:
            if pythonpath is None:
                pythonpath = ""
            else:
                pythonpath = ":" + pythonpath
            pythonpath = PORTAGE_PYM_PATH + pythonpath

        env = {
            "PORTAGE_OVERRIDE_EPREFIX":
            eprefix,
            "DISTDIR":
            distdir,
            "GENTOO_COMMITTER_NAME":
            committer_name,
            "GENTOO_COMMITTER_EMAIL":
            committer_email,
            "HOME":
            homedir,
            "PATH":
            os.environ["PATH"],
            "PORTAGE_GRPNAME":
            os.environ["PORTAGE_GRPNAME"],
            "PORTAGE_USERNAME":
            os.environ["PORTAGE_USERNAME"],
            "PYTHONDONTWRITEBYTECODE":
            os.environ.get("PYTHONDONTWRITEBYTECODE", ""),
            "PYTHONPATH":
            pythonpath,
        }
        repos_set_conf("rsync")

        if os.environ.get("SANDBOX_ON") == "1":
            # avoid problems from nested sandbox instances
            env["FEATURES"] = "-sandbox -usersandbox"

        dirs = [homedir, metadata_dir]
        try:
            for d in dirs:
                ensure_dirs(d)

            timestamp_path = os.path.join(metadata_dir, 'timestamp.chk')
            with open(timestamp_path, 'w') as f:
                f.write(time.strftime('%s\n' % TIMESTAMP_FORMAT,
                                      time.gmtime()))

            if debug:
                # The subprocess inherits both stdout and stderr, for
                # debugging purposes.
                stdout = None
            else:
                # The subprocess inherits stderr so that any warnings
                # triggered by python -Wd will be visible.
                stdout = subprocess.PIPE

            for cwd, cmd in rename_repo + sync_cmds_auto_sync + sync_cmds + \
             rsync_opts_repos + rsync_opts_repos_default + \
             rsync_opts_repos_default_ovr + rsync_opts_repos_default_cancel + \
             delete_sync_repo + git_repo_create + sync_type_git + \
             rename_repo + sync_cmds:

                if hasattr(cmd, '__call__'):
                    cmd()
                    continue

                abs_cwd = os.path.join(repo.location, cwd)
                proc = subprocess.Popen(cmd,
                                        cwd=abs_cwd,
                                        env=env,
                                        stdout=stdout)

                if debug:
                    proc.wait()
                else:
                    output = proc.stdout.readlines()
                    proc.wait()
                    proc.stdout.close()
                    if proc.returncode != os.EX_OK:
                        for line in output:
                            sys.stderr.write(_unicode_decode(line))

                self.assertEqual(os.EX_OK, proc.returncode,
                                 "%s failed in %s" % (
                                     cmd,
                                     cwd,
                                 ))

        finally:
            playground.cleanup()
Пример #28
0
	def testPortdbCache(self):
		debug = False

		ebuilds = {
			"dev-libs/A-1": {},
			"dev-libs/A-2": {},
			"sys-apps/B-1": {},
			"sys-apps/B-2": {},
		}

		playground = ResolverPlayground(ebuilds=ebuilds, debug=debug)
		settings = playground.settings
		eprefix = settings["EPREFIX"]
		test_repo_location = settings.repositories["test_repo"].location
		user_config_dir = os.path.join(eprefix, USER_CONFIG_PATH)
		metadata_dir = os.path.join(test_repo_location, "metadata")
		md5_cache_dir = os.path.join(metadata_dir, "md5-cache")
		pms_cache_dir = os.path.join(metadata_dir, "cache")
		layout_conf_path = os.path.join(metadata_dir, "layout.conf")

		portage_python = portage._python_interpreter
		egencache_cmd = (portage_python, "-b", "-Wd",
			os.path.join(self.bindir, "egencache"),
			"--repo", "test_repo",
			"--repositories-configuration", settings.repositories.config_string())
		python_cmd = (portage_python, "-b", "-Wd", "-c")

		test_commands = (
			(lambda: not os.path.exists(pms_cache_dir),),
			(lambda: not os.path.exists(md5_cache_dir),),
			python_cmd + (textwrap.dedent("""
				import os, sys, portage
				if portage.portdb.repositories['test_repo'].location in portage.portdb._pregen_auxdb:
					sys.exit(1)
			"""),),

			egencache_cmd + ("--update",),
			(lambda: not os.path.exists(pms_cache_dir),),
			(lambda: os.path.exists(md5_cache_dir),),
			python_cmd + (textwrap.dedent("""
				import os, sys, portage
				if portage.portdb.repositories['test_repo'].location not in portage.portdb._pregen_auxdb:
					sys.exit(1)
			"""),),
			python_cmd + (textwrap.dedent("""
				import os, sys, portage
				from portage.cache.flat_hash import md5_database
				if not isinstance(portage.portdb._pregen_auxdb[portage.portdb.repositories['test_repo'].location], md5_database):
					sys.exit(1)
			"""),),

			(BASH_BINARY, "-c", "echo %s > %s" %
				tuple(map(portage._shell_quote,
				("cache-formats = md5-dict pms", layout_conf_path,)))),
			egencache_cmd + ("--update",),
			(lambda: os.path.exists(md5_cache_dir),),
			python_cmd + (textwrap.dedent("""
				import os, sys, portage
				if portage.portdb.repositories['test_repo'].location not in portage.portdb._pregen_auxdb:
					sys.exit(1)
			"""),),
			python_cmd + (textwrap.dedent("""
				import os, sys, portage
				from portage.cache.flat_hash import md5_database
				if not isinstance(portage.portdb._pregen_auxdb[portage.portdb.repositories['test_repo'].location], md5_database):
					sys.exit(1)
			"""),),

			# Disable DeprecationWarnings, since the pms format triggers them
			# in portdbapi._create_pregen_cache().
			(BASH_BINARY, "-c", "echo %s > %s" %
				tuple(map(portage._shell_quote,
				("cache-formats = pms md5-dict", layout_conf_path,)))),
			(portage_python, "-b", "-Wd", "-Wi::DeprecationWarning", "-c") + (textwrap.dedent("""
				import os, sys, portage
				if portage.portdb.repositories['test_repo'].location not in portage.portdb._pregen_auxdb:
					sys.exit(1)
			"""),),
			(portage_python, "-b", "-Wd", "-Wi::DeprecationWarning", "-c") + (textwrap.dedent("""
				import os, sys, portage
				from portage.cache.metadata import database as pms_database
				if not isinstance(portage.portdb._pregen_auxdb[portage.portdb.repositories['test_repo'].location], pms_database):
					sys.exit(1)
			"""),),

			# Test auto-detection and preference for md5-cache when both
			# cache formats are available but layout.conf is absent.
			(BASH_BINARY, "-c", "rm %s" % portage._shell_quote(layout_conf_path)),
			python_cmd + (textwrap.dedent("""
				import os, sys, portage
				if portage.portdb.repositories['test_repo'].location not in portage.portdb._pregen_auxdb:
					sys.exit(1)
			"""),),
			python_cmd + (textwrap.dedent("""
				import os, sys, portage
				from portage.cache.flat_hash import md5_database
				if not isinstance(portage.portdb._pregen_auxdb[portage.portdb.repositories['test_repo'].location], md5_database):
					sys.exit(1)
			"""),),
		)

		pythonpath =  os.environ.get("PYTHONPATH")
		if pythonpath is not None and not pythonpath.strip():
			pythonpath = None
		if pythonpath is not None and \
			pythonpath.split(":")[0] == PORTAGE_PYM_PATH:
			pass
		else:
			if pythonpath is None:
				pythonpath = ""
			else:
				pythonpath = ":" + pythonpath
			pythonpath = PORTAGE_PYM_PATH + pythonpath

		env = {
			"PATH" : os.environ.get("PATH", ""),
			"PORTAGE_OVERRIDE_EPREFIX" : eprefix,
			"PORTAGE_PYTHON" : portage_python,
			"PORTAGE_REPOSITORIES" : settings.repositories.config_string(),
			"PYTHONPATH" : pythonpath,
		}

		if "__PORTAGE_TEST_HARDLINK_LOCKS" in os.environ:
			env["__PORTAGE_TEST_HARDLINK_LOCKS"] = \
				os.environ["__PORTAGE_TEST_HARDLINK_LOCKS"]

		dirs = [user_config_dir]

		try:
			for d in dirs:
				ensure_dirs(d)

			if debug:
				# The subprocess inherits both stdout and stderr, for
				# debugging purposes.
				stdout = None
			else:
				# The subprocess inherits stderr so that any warnings
				# triggered by python -Wd will be visible.
				stdout = subprocess.PIPE

			for i, args in enumerate(test_commands):

				if hasattr(args[0], '__call__'):
					self.assertTrue(args[0](),
						"callable at index %s failed" % (i,))
					continue

				proc = subprocess.Popen(args,
					env=env, stdout=stdout)

				if debug:
					proc.wait()
				else:
					output = proc.stdout.readlines()
					proc.wait()
					proc.stdout.close()
					if proc.returncode != os.EX_OK:
						for line in output:
							sys.stderr.write(_unicode_decode(line))

				self.assertEqual(os.EX_OK, proc.returncode,
					"command %d failed with args %s" % (i, args,))
		finally:
			playground.cleanup()
Пример #29
0
    def updateItems(self, repoid):
        """
		Figure out which news items from NEWS_PATH are both unread and relevant to
		the user (according to the GLEP 42 standards of relevancy).  Then add these
		items into the news.repoid.unread file.
		"""

        # Ensure that the unread path exists and is writable.

        try:
            ensure_dirs(self.unread_path,
                        uid=self._uid,
                        gid=self._gid,
                        mode=self._dir_mode,
                        mask=self._mode_mask)
        except (OperationNotPermitted, PermissionDenied):
            return

        if not os.access(self.unread_path, os.W_OK):
            return

        news_dir = self._news_dir(repoid)
        try:
            news = _os.listdir(
                _unicode_encode(news_dir,
                                encoding=_encodings['fs'],
                                errors='strict'))
        except OSError:
            return

        skip_filename = self._skip_filename(repoid)
        unread_filename = self._unread_filename(repoid)
        unread_lock = lockfile(unread_filename, wantnewlockfile=1)
        try:
            try:
                unread = set(grabfile(unread_filename))
                unread_orig = unread.copy()
                skip = set(grabfile(skip_filename))
                skip_orig = skip.copy()
            except PermissionDenied:
                return

            for itemid in news:
                try:
                    itemid = _unicode_decode(itemid,
                                             encoding=_encodings['fs'],
                                             errors='strict')
                except UnicodeDecodeError:
                    itemid = _unicode_decode(itemid,
                                             encoding=_encodings['fs'],
                                             errors='replace')
                    writemsg_level(
                     _("!!! Invalid encoding in news item name: '%s'\n") % \
                     itemid, level=logging.ERROR, noiselevel=-1)
                    continue

                if itemid in skip:
                    continue
                filename = os.path.join(
                    news_dir, itemid, itemid + "." + self.language_id + ".txt")
                if not os.path.isfile(filename):
                    continue
                item = NewsItem(filename, itemid)
                if not item.isValid():
                    continue
                if item.isRelevant(profile=self._profile_path,
                                   config=self.config,
                                   vardb=self.vdb):
                    unread.add(item.name)
                    skip.add(item.name)

            if unread != unread_orig:
                write_atomic(unread_filename,
                             "".join("%s\n" % x for x in sorted(unread)))
                apply_secpass_permissions(unread_filename,
                                          uid=self._uid,
                                          gid=self._gid,
                                          mode=self._file_mode,
                                          mask=self._mode_mask)

            if skip != skip_orig:
                write_atomic(skip_filename,
                             "".join("%s\n" % x for x in sorted(skip)))
                apply_secpass_permissions(skip_filename,
                                          uid=self._uid,
                                          gid=self._gid,
                                          mode=self._file_mode,
                                          mask=self._mode_mask)

        finally:
            unlockfile(unread_lock)
Пример #30
0
	def testIpcDaemon(self):
		event_loop = global_event_loop()
		tmpdir = tempfile.mkdtemp()
		build_dir = None
		try:
			env = {}

			# Pass along PORTAGE_USERNAME and PORTAGE_GRPNAME since they
			# need to be inherited by ebuild subprocesses.
			if 'PORTAGE_USERNAME' in os.environ:
				env['PORTAGE_USERNAME'] = os.environ['PORTAGE_USERNAME']
			if 'PORTAGE_GRPNAME' in os.environ:
				env['PORTAGE_GRPNAME'] = os.environ['PORTAGE_GRPNAME']

			env['PORTAGE_PYTHON'] = _python_interpreter
			env['PORTAGE_BIN_PATH'] = PORTAGE_BIN_PATH
			env['PORTAGE_PYM_PATH'] = PORTAGE_PYM_PATH
			env['PORTAGE_BUILDDIR'] = os.path.join(tmpdir, 'cat', 'pkg-1')

			if "__PORTAGE_TEST_HARDLINK_LOCKS" in os.environ:
				env["__PORTAGE_TEST_HARDLINK_LOCKS"] = \
					os.environ["__PORTAGE_TEST_HARDLINK_LOCKS"]

			build_dir = EbuildBuildDir(
				scheduler=event_loop,
				settings=env)
			build_dir.lock()
			ensure_dirs(env['PORTAGE_BUILDDIR'])

			input_fifo = os.path.join(env['PORTAGE_BUILDDIR'], '.ipc_in')
			output_fifo = os.path.join(env['PORTAGE_BUILDDIR'], '.ipc_out')
			os.mkfifo(input_fifo)
			os.mkfifo(output_fifo)

			for exitcode in (0, 1, 2):
				exit_command = ExitCommand()
				commands = {'exit' : exit_command}
				daemon = EbuildIpcDaemon(commands=commands,
					input_fifo=input_fifo,
					output_fifo=output_fifo)
				proc = SpawnProcess(
					args=[BASH_BINARY, "-c",
					'"$PORTAGE_BIN_PATH"/ebuild-ipc exit %d' % exitcode],
					env=env)
				task_scheduler = TaskScheduler(iter([daemon, proc]),
					max_jobs=2, event_loop=event_loop)

				self.received_command = False
				def exit_command_callback():
					self.received_command = True
					task_scheduler.cancel()

				exit_command.reply_hook = exit_command_callback
				start_time = time.time()
				self._run(event_loop, task_scheduler, self._SCHEDULE_TIMEOUT)

				hardlock_cleanup(env['PORTAGE_BUILDDIR'],
					remove_all_locks=True)

				self.assertEqual(self.received_command, True,
					"command not received after %d seconds" % \
					(time.time() - start_time,))
				self.assertEqual(proc.isAlive(), False)
				self.assertEqual(daemon.isAlive(), False)
				self.assertEqual(exit_command.exitcode, exitcode)

			# Intentionally short timeout test for EventLoop/AsyncScheduler.
			# Use a ridiculously long sleep_time_s in case the user's
			# system is heavily loaded (see bug #436334).
			sleep_time_s = 600     #600.000 seconds
			short_timeout_ms = 10  #  0.010 seconds

			for i in range(3):
				exit_command = ExitCommand()
				commands = {'exit' : exit_command}
				daemon = EbuildIpcDaemon(commands=commands,
					input_fifo=input_fifo,
					output_fifo=output_fifo)
				proc = SleepProcess(seconds=sleep_time_s)
				task_scheduler = TaskScheduler(iter([daemon, proc]),
					max_jobs=2, event_loop=event_loop)

				self.received_command = False
				def exit_command_callback():
					self.received_command = True
					task_scheduler.cancel()

				exit_command.reply_hook = exit_command_callback
				start_time = time.time()
				self._run(event_loop, task_scheduler, short_timeout_ms)

				hardlock_cleanup(env['PORTAGE_BUILDDIR'],
					remove_all_locks=True)

				self.assertEqual(self.received_command, False,
					"command received after %d seconds" % \
					(time.time() - start_time,))
				self.assertEqual(proc.isAlive(), False)
				self.assertEqual(daemon.isAlive(), False)
				self.assertEqual(proc.returncode == os.EX_OK, False)

		finally:
			if build_dir is not None:
				build_dir.unlock()
			shutil.rmtree(tmpdir)
Пример #31
0
    def testIpcDaemon(self):
        event_loop = global_event_loop()
        tmpdir = tempfile.mkdtemp()
        build_dir = None
        try:
            env = {}

            # Pass along PORTAGE_USERNAME and PORTAGE_GRPNAME since they
            # need to be inherited by ebuild subprocesses.
            if "PORTAGE_USERNAME" in os.environ:
                env["PORTAGE_USERNAME"] = os.environ["PORTAGE_USERNAME"]
            if "PORTAGE_GRPNAME" in os.environ:
                env["PORTAGE_GRPNAME"] = os.environ["PORTAGE_GRPNAME"]

            env["PORTAGE_PYTHON"] = _python_interpreter
            env["PORTAGE_BIN_PATH"] = PORTAGE_BIN_PATH
            env["PORTAGE_PYM_PATH"] = PORTAGE_PYM_PATH
            env["PORTAGE_BUILDDIR"] = os.path.join(tmpdir, "cat", "pkg-1")
            env["PYTHONDONTWRITEBYTECODE"] = os.environ.get(
                "PYTHONDONTWRITEBYTECODE", "")

            if "__PORTAGE_TEST_HARDLINK_LOCKS" in os.environ:
                env["__PORTAGE_TEST_HARDLINK_LOCKS"] = os.environ[
                    "__PORTAGE_TEST_HARDLINK_LOCKS"]

            build_dir = EbuildBuildDir(scheduler=event_loop, settings=env)
            event_loop.run_until_complete(build_dir.async_lock())
            ensure_dirs(env["PORTAGE_BUILDDIR"])

            input_fifo = os.path.join(env["PORTAGE_BUILDDIR"], ".ipc_in")
            output_fifo = os.path.join(env["PORTAGE_BUILDDIR"], ".ipc_out")
            os.mkfifo(input_fifo)
            os.mkfifo(output_fifo)

            for exitcode in (0, 1, 2):
                exit_command = ExitCommand()
                commands = {"exit": exit_command}
                daemon = EbuildIpcDaemon(commands=commands,
                                         input_fifo=input_fifo,
                                         output_fifo=output_fifo)
                proc = SpawnProcess(
                    args=[
                        BASH_BINARY,
                        "-c",
                        '"$PORTAGE_BIN_PATH"/ebuild-ipc exit %d' % exitcode,
                    ],
                    env=env,
                )
                task_scheduler = TaskScheduler(iter([daemon, proc]),
                                               max_jobs=2,
                                               event_loop=event_loop)

                self.received_command = False

                def exit_command_callback():
                    self.received_command = True
                    task_scheduler.cancel()

                exit_command.reply_hook = exit_command_callback
                start_time = time.time()
                self._run(event_loop, task_scheduler, self._SCHEDULE_TIMEOUT)

                hardlock_cleanup(env["PORTAGE_BUILDDIR"],
                                 remove_all_locks=True)

                self.assertEqual(
                    self.received_command,
                    True,
                    "command not received after %d seconds" %
                    (time.time() - start_time, ),
                )
                self.assertEqual(proc.isAlive(), False)
                self.assertEqual(daemon.isAlive(), False)
                self.assertEqual(exit_command.exitcode, exitcode)

            # Intentionally short timeout test for EventLoop/AsyncScheduler.
            # Use a ridiculously long sleep_time_s in case the user's
            # system is heavily loaded (see bug #436334).
            sleep_time_s = 600  # seconds
            short_timeout_s = 0.010  # seconds

            for i in range(3):
                exit_command = ExitCommand()
                commands = {"exit": exit_command}
                daemon = EbuildIpcDaemon(commands=commands,
                                         input_fifo=input_fifo,
                                         output_fifo=output_fifo)
                proc = SleepProcess(seconds=sleep_time_s)
                task_scheduler = TaskScheduler(iter([daemon, proc]),
                                               max_jobs=2,
                                               event_loop=event_loop)

                self.received_command = False

                def exit_command_callback():
                    self.received_command = True
                    task_scheduler.cancel()

                exit_command.reply_hook = exit_command_callback
                start_time = time.time()
                self._run(event_loop, task_scheduler, short_timeout_s)

                hardlock_cleanup(env["PORTAGE_BUILDDIR"],
                                 remove_all_locks=True)

                self.assertEqual(
                    self.received_command,
                    False,
                    "command received after %d seconds" %
                    (time.time() - start_time, ),
                )
                self.assertEqual(proc.isAlive(), False)
                self.assertEqual(daemon.isAlive(), False)
                self.assertEqual(proc.returncode == os.EX_OK, False)

        finally:
            if build_dir is not None:
                event_loop.run_until_complete(build_dir.async_unlock())
            shutil.rmtree(tmpdir)
Пример #32
0
	def testMoveEnt(self):

		ebuilds = {

			"dev-libs/A-2::dont_apply_updates" : {
				"EAPI": "4",
				"SLOT": "2",
			},

		}

		installed = {

			"dev-libs/A-1::test_repo" : {
				"EAPI": "4",
			},

			"dev-libs/A-2::dont_apply_updates" : {
				"EAPI": "4",
				"SLOT": "2",
			},

		}

		binpkgs = {

			"dev-libs/A-1::test_repo" : {
				"EAPI": "4",
			},

			"dev-libs/A-2::dont_apply_updates" : {
				"EAPI": "4",
				"SLOT": "2",
			},

		}

		updates = textwrap.dedent("""
			move dev-libs/A dev-libs/A-moved
		""")

		playground = ResolverPlayground(binpkgs=binpkgs,
			ebuilds=ebuilds, installed=installed)

		settings = playground.settings
		trees = playground.trees
		eroot = settings["EROOT"]
		portdir = settings["PORTDIR"]
		portdb = trees[eroot]["porttree"].dbapi
		vardb = trees[eroot]["vartree"].dbapi
		bindb = trees[eroot]["bintree"].dbapi

		updates_dir = os.path.join(portdir, "profiles", "updates")

		try:
			ensure_dirs(updates_dir)
			with open(os.path.join(updates_dir, "1Q-2010"), 'w') as f:
				f.write(updates)

			# Create an empty updates directory, so that this
			# repo doesn't inherit updates from the main repo.
			ensure_dirs(os.path.join(
				portdb.getRepositoryPath("dont_apply_updates"),
				"profiles", "updates"))

			global_noiselimit = portage.util.noiselimit
			portage.util.noiselimit = -2
			try:
				_do_global_updates(trees, {})
			finally:
				portage.util.noiselimit = global_noiselimit

			# Workaround for cache validation not working
			# correctly when filesystem has timestamp precision
			# of 1 second.
			vardb._clear_cache()

			# A -> A-moved
			self.assertRaises(KeyError,
				vardb.aux_get, "dev-libs/A-1", ["EAPI"])
			vardb.aux_get("dev-libs/A-moved-1", ["EAPI"])
			self.assertRaises(KeyError,
				bindb.aux_get, "dev-libs/A-1", ["EAPI"])
			bindb.aux_get("dev-libs/A-moved-1", ["EAPI"])

			# dont_apply_updates
			self.assertRaises(KeyError,
				vardb.aux_get, "dev-libs/A-moved-2", ["EAPI"])
			vardb.aux_get("dev-libs/A-2", ["EAPI"])
			self.assertRaises(KeyError,
				bindb.aux_get, "dev-libs/A-moved-2", ["EAPI"])
			bindb.aux_get("dev-libs/A-2", ["EAPI"])

		finally:
			playground.cleanup()
Пример #33
0
 def _ensure_dirs(self):
     ensure_dirs(os.path.dirname(self._filename),
                 gid=portage_gid,
                 mode=0o2750,
                 mask=0o2)
Пример #34
0
	def testSimple(self):

		debug = False

		install_something = """
S="${WORKDIR}"

pkg_pretend() {
	einfo "called pkg_pretend for $CATEGORY/$PF"
}

src_install() {
	einfo "installing something..."
	insinto /usr/lib/${P}
	echo "blah blah blah" > "${T}"/regular-file
	doins "${T}"/regular-file
	dosym regular-file /usr/lib/${P}/symlink || die

	# Test code for bug #381629, using a copyright symbol encoded with latin-1.
	# We use $(printf "\\xa9") rather than $'\\xa9', since printf apparently
	# works in any case, while $'\\xa9' transforms to \\xef\\xbf\\xbd under
	# some conditions. TODO: Find out why it transforms to \\xef\\xbf\\xbd when
	# running tests for Python 3.2 (even though it's bash that is ultimately
	# responsible for performing the transformation).
	local latin_1_dir=/usr/lib/${P}/latin-1-$(printf "\\xa9")-directory
	insinto "${latin_1_dir}"
	echo "blah blah blah" > "${T}"/latin-1-$(printf "\\xa9")-regular-file || die
	doins "${T}"/latin-1-$(printf "\\xa9")-regular-file
	dosym latin-1-$(printf "\\xa9")-regular-file ${latin_1_dir}/latin-1-$(printf "\\xa9")-symlink || die
}

pkg_config() {
	einfo "called pkg_config for $CATEGORY/$PF"
}

pkg_info() {
	einfo "called pkg_info for $CATEGORY/$PF"
}

pkg_preinst() {
	einfo "called pkg_preinst for $CATEGORY/$PF"

	# Test that has_version and best_version work correctly with
	# prefix (involves internal ROOT -> EROOT calculation in order
	# to support ROOT override via the environment with EAPIs 3
	# and later which support prefix).
	if has_version $CATEGORY/$PN:$SLOT ; then
		einfo "has_version detects an installed instance of $CATEGORY/$PN:$SLOT"
		einfo "best_version reports that the installed instance is $(best_version $CATEGORY/$PN:$SLOT)"
	else
		einfo "has_version does not detect an installed instance of $CATEGORY/$PN:$SLOT"
	fi
}

"""

		ebuilds = {
			"dev-libs/A-1": {
				"EAPI" : "4",
				"IUSE" : "+flag",
				"KEYWORDS": "x86",
				"LICENSE": "GPL-2",
				"MISC_CONTENT": install_something,
				"RDEPEND": "flag? ( dev-libs/B[flag] )",
			},
			"dev-libs/B-1": {
				"EAPI" : "4",
				"IUSE" : "+flag",
				"KEYWORDS": "x86",
				"LICENSE": "GPL-2",
				"MISC_CONTENT": install_something,
			},
			"virtual/foo-0": {
				"EAPI" : "4",
				"KEYWORDS": "x86",
				"LICENSE": "GPL-2",
			},
		}

		installed = {
			"dev-libs/A-1": {
				"EAPI" : "4",
				"IUSE" : "+flag",
				"KEYWORDS": "x86",
				"LICENSE": "GPL-2",
				"RDEPEND": "flag? ( dev-libs/B[flag] )",
				"USE": "flag",
			},
			"dev-libs/B-1": {
				"EAPI" : "4",
				"IUSE" : "+flag",
				"KEYWORDS": "x86",
				"LICENSE": "GPL-2",
				"USE": "flag",
			},
			"dev-libs/depclean-me-1": {
				"EAPI" : "4",
				"IUSE" : "",
				"KEYWORDS": "x86",
				"LICENSE": "GPL-2",
				"USE": "",
			},
			"app-misc/depclean-me-1": {
				"EAPI" : "4",
				"IUSE" : "",
				"KEYWORDS": "x86",
				"LICENSE": "GPL-2",
				"RDEPEND": "dev-libs/depclean-me",
				"USE": "",
			},
		}

		metadata_xml_files = (
			(
				"dev-libs/A",
				{
					"herd" : "base-system",
					"flags" : "<flag name='flag'>Description of how USE='flag' affects this package</flag>",
				},
			),
			(
				"dev-libs/B",
				{
					"herd" : "no-herd",
					"flags" : "<flag name='flag'>Description of how USE='flag' affects this package</flag>",
				},
			),
		)

		playground = ResolverPlayground(
			ebuilds=ebuilds, installed=installed, debug=debug)
		settings = playground.settings
		eprefix = settings["EPREFIX"]
		eroot = settings["EROOT"]
		trees = playground.trees
		portdb = trees[eroot]["porttree"].dbapi
		portdir = settings["PORTDIR"]
		var_cache_edb = os.path.join(eprefix, "var", "cache", "edb")
		cachedir = os.path.join(var_cache_edb, "dep")
		cachedir_pregen = os.path.join(portdir, "metadata", "cache")

		portage_python = portage._python_interpreter
		ebuild_cmd = (portage_python, "-Wd",
			os.path.join(PORTAGE_BIN_PATH, "ebuild"))
		egencache_cmd = (portage_python, "-Wd",
			os.path.join(PORTAGE_BIN_PATH, "egencache"))
		emerge_cmd = (portage_python, "-Wd",
			os.path.join(PORTAGE_BIN_PATH, "emerge"))
		emaint_cmd = (portage_python, "-Wd",
			os.path.join(PORTAGE_BIN_PATH, "emaint"))
		env_update_cmd = (portage_python, "-Wd",
			os.path.join(PORTAGE_BIN_PATH, "env-update"))
		fixpackages_cmd = (portage_python, "-Wd",
			os.path.join(PORTAGE_BIN_PATH, "fixpackages"))
		portageq_cmd = (portage_python, "-Wd",
			os.path.join(PORTAGE_BIN_PATH, "portageq"))
		quickpkg_cmd = (portage_python, "-Wd",
			os.path.join(PORTAGE_BIN_PATH, "quickpkg"))
		regenworld_cmd = (portage_python, "-Wd",
			os.path.join(PORTAGE_BIN_PATH, "regenworld"))

		rm_binary = find_binary("rm")
		self.assertEqual(rm_binary is None, False,
			"rm command not found")
		rm_cmd = (rm_binary,)

		egencache_extra_args = []
		if self._have_python_xml():
			egencache_extra_args.append("--update-use-local-desc")

		test_ebuild = portdb.findname("dev-libs/A-1")
		self.assertFalse(test_ebuild is None)

		test_commands = (
			env_update_cmd,
			emerge_cmd + ("--version",),
			emerge_cmd + ("--info",),
			emerge_cmd + ("--info", "--verbose"),
			emerge_cmd + ("--list-sets",),
			emerge_cmd + ("--check-news",),
			rm_cmd + ("-rf", cachedir),
			rm_cmd + ("-rf", cachedir_pregen),
			emerge_cmd + ("--regen",),
			rm_cmd + ("-rf", cachedir),
			({"FEATURES" : "metadata-transfer"},) + \
				emerge_cmd + ("--regen",),
			rm_cmd + ("-rf", cachedir),
			({"FEATURES" : "metadata-transfer parse-eapi-ebuild-head"},) + \
				emerge_cmd + ("--regen",),
			rm_cmd + ("-rf", cachedir),
			egencache_cmd + ("--update",) + tuple(egencache_extra_args),
			({"FEATURES" : "metadata-transfer"},) + \
				emerge_cmd + ("--metadata",),
			rm_cmd + ("-rf", cachedir),
			({"FEATURES" : "metadata-transfer"},) + \
				emerge_cmd + ("--metadata",),
			emerge_cmd + ("--metadata",),
			rm_cmd + ("-rf", cachedir),
			emerge_cmd + ("--oneshot", "virtual/foo"),
			emerge_cmd + ("--pretend", "dev-libs/A"),
			ebuild_cmd + (test_ebuild, "manifest", "clean", "package", "merge"),
			emerge_cmd + ("--pretend", "--tree", "--complete-graph", "dev-libs/A"),
			emerge_cmd + ("-p", "dev-libs/B"),
			emerge_cmd + ("-B", "dev-libs/B",),
			emerge_cmd + ("--oneshot", "--usepkg", "dev-libs/B",),

			# trigger clean prior to pkg_pretend as in bug #390711
			ebuild_cmd + (test_ebuild, "unpack"), 
			emerge_cmd + ("--oneshot", "dev-libs/A",),

			emerge_cmd + ("--noreplace", "dev-libs/A",),
			emerge_cmd + ("--config", "dev-libs/A",),
			emerge_cmd + ("--info", "dev-libs/A", "dev-libs/B"),
			emerge_cmd + ("--pretend", "--depclean", "--verbose", "dev-libs/B"),
			emerge_cmd + ("--pretend", "--depclean",),
			emerge_cmd + ("--depclean",),
			quickpkg_cmd + ("dev-libs/A",),
			emerge_cmd + ("--usepkgonly", "dev-libs/A"),
			emaint_cmd + ("--check", "all"),
			emaint_cmd + ("--fix", "all"),
			fixpackages_cmd,
			regenworld_cmd,
			portageq_cmd + ("match", eroot, "dev-libs/A"),
			portageq_cmd + ("best_visible", eroot, "dev-libs/A"),
			portageq_cmd + ("best_visible", eroot, "binary", "dev-libs/A"),
			portageq_cmd + ("contents", eroot, "dev-libs/A-1"),
			portageq_cmd + ("metadata", eroot, "ebuild", "dev-libs/A-1", "EAPI", "IUSE", "RDEPEND"),
			portageq_cmd + ("metadata", eroot, "binary", "dev-libs/A-1", "EAPI", "USE", "RDEPEND"),
			portageq_cmd + ("metadata", eroot, "installed", "dev-libs/A-1", "EAPI", "USE", "RDEPEND"),
			portageq_cmd + ("owners", eroot, eroot + "usr"),
			emerge_cmd + ("-p", eroot + "usr"),
			emerge_cmd + ("-p", "--unmerge", "-q", eroot + "usr"),
			emerge_cmd + ("--unmerge", "--quiet", "dev-libs/A"),
			emerge_cmd + ("-C", "--quiet", "dev-libs/B"),
		)

		distdir = os.path.join(eprefix, "distdir")
		pkgdir = os.path.join(eprefix, "pkgdir")
		fake_bin = os.path.join(eprefix, "bin")
		portage_tmpdir = os.path.join(eprefix, "var", "tmp", "portage")
		profile_path = settings.profile_path
		user_config_dir = os.path.join(os.sep, eprefix, USER_CONFIG_PATH)

		features = []
		if not portage.process.sandbox_capable or \
			os.environ.get("SANDBOX_ON") == "1":
			features.append("-sandbox")

		# Since egencache ignores settings from the calling environment,
		# configure it via make.conf.
		make_conf = (
			"FEATURES=\"%s\"\n" % (" ".join(features),),
			"PORTDIR=\"%s\"\n" % (portdir,),
			"PORTAGE_GRPNAME=\"%s\"\n" % (os.environ["PORTAGE_GRPNAME"],),
			"PORTAGE_USERNAME=\"%s\"\n" % (os.environ["PORTAGE_USERNAME"],),
		)

		path =  os.environ.get("PATH")
		if path is not None and not path.strip():
			path = None
		if path is None:
			path = ""
		else:
			path = ":" + path
		path = fake_bin + path

		pythonpath =  os.environ.get("PYTHONPATH")
		if pythonpath is not None and not pythonpath.strip():
			pythonpath = None
		if pythonpath is not None and \
			pythonpath.split(":")[0] == PORTAGE_PYM_PATH:
			pass
		else:
			if pythonpath is None:
				pythonpath = ""
			else:
				pythonpath = ":" + pythonpath
			pythonpath = PORTAGE_PYM_PATH + pythonpath

		env = {
			"PORTAGE_OVERRIDE_EPREFIX" : eprefix,
			"CLEAN_DELAY" : "0",
			"DISTDIR" : distdir,
			"EMERGE_WARNING_DELAY" : "0",
			"INFODIR" : "",
			"INFOPATH" : "",
			"PATH" : path,
			"PKGDIR" : pkgdir,
			"PORTAGE_INST_GID" : str(portage.data.portage_gid),
			"PORTAGE_INST_UID" : str(portage.data.portage_uid),
			"PORTAGE_PYTHON" : portage_python,
			"PORTAGE_TMPDIR" : portage_tmpdir,
			"PYTHONPATH" : pythonpath,
		}

		if "__PORTAGE_TEST_HARDLINK_LOCKS" in os.environ:
			env["__PORTAGE_TEST_HARDLINK_LOCKS"] = \
				os.environ["__PORTAGE_TEST_HARDLINK_LOCKS"]

		updates_dir = os.path.join(portdir, "profiles", "updates")
		dirs = [cachedir, cachedir_pregen, distdir, fake_bin,
			portage_tmpdir, updates_dir,
			user_config_dir, var_cache_edb]
		true_symlinks = ["chown", "chgrp"]
		true_binary = find_binary("true")
		self.assertEqual(true_binary is None, False,
			"true command not found")
		try:
			for d in dirs:
				ensure_dirs(d)
			with open(os.path.join(user_config_dir, "make.conf"), 'w') as f:
				for line in make_conf:
					f.write(line)
			for x in true_symlinks:
				os.symlink(true_binary, os.path.join(fake_bin, x))
			with open(os.path.join(var_cache_edb, "counter"), 'wb') as f:
				f.write(b"100")
			# non-empty system set keeps --depclean quiet
			with open(os.path.join(profile_path, "packages"), 'w') as f:
				f.write("*dev-libs/token-system-pkg")
			for cp, xml_data in metadata_xml_files:
				with open(os.path.join(portdir, cp, "metadata.xml"), 'w') as f:
					f.write(playground.metadata_xml_template % xml_data)
			with open(os.path.join(updates_dir, "1Q-2010"), 'w') as f:
				f.write("""
slotmove =app-doc/pms-3 2 3
move dev-util/git dev-vcs/git
""")

			if debug:
				# The subprocess inherits both stdout and stderr, for
				# debugging purposes.
				stdout = None
			else:
				# The subprocess inherits stderr so that any warnings
				# triggered by python -Wd will be visible.
				stdout = subprocess.PIPE

			for args in test_commands:

				if isinstance(args[0], dict):
					local_env = env.copy()
					local_env.update(args[0])
					args = args[1:]
				else:
					local_env = env

				proc = subprocess.Popen(args,
					env=local_env, stdout=stdout)

				if debug:
					proc.wait()
				else:
					output = proc.stdout.readlines()
					proc.wait()
					proc.stdout.close()
					if proc.returncode != os.EX_OK:
						for line in output:
							sys.stderr.write(_unicode_decode(line))

				self.assertEqual(os.EX_OK, proc.returncode,
					"emerge failed with args %s" % (args,))
		finally:
			playground.cleanup()
Пример #35
0
    def _create_profile(self, ebuilds, installed, profile, repo_configs,
                        user_config, sets):

        user_config_dir = os.path.join(self.eroot, USER_CONFIG_PATH)

        try:
            os.makedirs(user_config_dir)
        except os.error:
            pass

        for repo in self._repositories:
            if repo == "DEFAULT":
                continue

            repo_dir = self._get_repo_dir(repo)
            profile_dir = os.path.join(repo_dir, "profiles")
            metadata_dir = os.path.join(repo_dir, "metadata")
            os.makedirs(metadata_dir)

            #Create $REPO/profiles/categories
            categories = set()
            for cpv in ebuilds:
                ebuilds_repo = Atom("=" + cpv, allow_repo=True).repo
                if ebuilds_repo is None:
                    ebuilds_repo = "test_repo"
                if ebuilds_repo == repo:
                    categories.add(catsplit(cpv)[0])

            categories_file = os.path.join(profile_dir, "categories")
            with open(categories_file, "w") as f:
                for cat in categories:
                    f.write(cat + "\n")

            #Create $REPO/profiles/license_groups
            license_file = os.path.join(profile_dir, "license_groups")
            with open(license_file, "w") as f:
                f.write("EULA TEST\n")

            repo_config = repo_configs.get(repo)
            if repo_config:
                for config_file, lines in repo_config.items():
                    if config_file not in self.config_files and not any(
                            fnmatch.fnmatch(config_file, os.path.join(x, "*"))
                            for x in self.config_files):
                        raise ValueError("Unknown config file: '%s'" %
                                         config_file)

                    if config_file in ("layout.conf", ):
                        file_name = os.path.join(repo_dir, "metadata",
                                                 config_file)
                    else:
                        file_name = os.path.join(profile_dir, config_file)
                        if "/" in config_file and not os.path.isdir(
                                os.path.dirname(file_name)):
                            os.makedirs(os.path.dirname(file_name))
                    with open(file_name, "w") as f:
                        for line in lines:
                            f.write("%s\n" % line)
                        # Temporarily write empty value of masters until it becomes default.
                        # TODO: Delete all references to "# use implicit masters" when empty value becomes default.
                        if config_file == "layout.conf" and not any(
                                line.startswith(("masters =",
                                                 "# use implicit masters"))
                                for line in lines):
                            f.write("masters =\n")

            #Create $profile_dir/eclass (we fail to digest the ebuilds if it's not there)
            os.makedirs(os.path.join(repo_dir, "eclass"))

            # Temporarily write empty value of masters until it becomes default.
            if not repo_config or "layout.conf" not in repo_config:
                layout_conf_path = os.path.join(repo_dir, "metadata",
                                                "layout.conf")
                with open(layout_conf_path, "w") as f:
                    f.write("masters =\n")

            if repo == "test_repo":
                #Create a minimal profile in /var/db/repos/gentoo
                sub_profile_dir = os.path.join(profile_dir, "default", "linux",
                                               "x86", "test_profile")
                os.makedirs(sub_profile_dir)

                if not (profile and "eapi" in profile):
                    eapi_file = os.path.join(sub_profile_dir, "eapi")
                    with open(eapi_file, "w") as f:
                        f.write("0\n")

                make_defaults_file = os.path.join(sub_profile_dir,
                                                  "make.defaults")
                with open(make_defaults_file, "w") as f:
                    f.write("ARCH=\"x86\"\n")
                    f.write("ACCEPT_KEYWORDS=\"x86\"\n")

                use_force_file = os.path.join(sub_profile_dir, "use.force")
                with open(use_force_file, "w") as f:
                    f.write("x86\n")

                parent_file = os.path.join(sub_profile_dir, "parent")
                with open(parent_file, "w") as f:
                    f.write("..\n")

                if profile:
                    for config_file, lines in profile.items():
                        if config_file not in self.config_files:
                            raise ValueError("Unknown config file: '%s'" %
                                             config_file)

                        file_name = os.path.join(sub_profile_dir, config_file)
                        with open(file_name, "w") as f:
                            for line in lines:
                                f.write("%s\n" % line)

                #Create profile symlink
                os.symlink(sub_profile_dir,
                           os.path.join(user_config_dir, "make.profile"))

        make_conf = {
            "ACCEPT_KEYWORDS": "x86",
            "CLEAN_DELAY": "0",
            "DISTDIR": self.distdir,
            "EMERGE_WARNING_DELAY": "0",
            "PKGDIR": self.pkgdir,
            "PORTAGE_INST_GID": str(portage.data.portage_gid),
            "PORTAGE_INST_UID": str(portage.data.portage_uid),
            "PORTAGE_TMPDIR": os.path.join(self.eroot, 'var/tmp'),
        }

        if os.environ.get("NOCOLOR"):
            make_conf["NOCOLOR"] = os.environ["NOCOLOR"]

        # Pass along PORTAGE_USERNAME and PORTAGE_GRPNAME since they
        # need to be inherited by ebuild subprocesses.
        if 'PORTAGE_USERNAME' in os.environ:
            make_conf['PORTAGE_USERNAME'] = os.environ['PORTAGE_USERNAME']
        if 'PORTAGE_GRPNAME' in os.environ:
            make_conf['PORTAGE_GRPNAME'] = os.environ['PORTAGE_GRPNAME']

        make_conf_lines = []
        for k_v in make_conf.items():
            make_conf_lines.append('%s="%s"' % k_v)

        if "make.conf" in user_config:
            make_conf_lines.extend(user_config["make.conf"])

        if not portage.process.sandbox_capable or \
         os.environ.get("SANDBOX_ON") == "1":
            # avoid problems from nested sandbox instances
            make_conf_lines.append(
                'FEATURES="${FEATURES} -sandbox -usersandbox"')

        configs = user_config.copy()
        configs["make.conf"] = make_conf_lines

        for config_file, lines in configs.items():
            if config_file not in self.config_files:
                raise ValueError("Unknown config file: '%s'" % config_file)

            file_name = os.path.join(user_config_dir, config_file)
            with open(file_name, "w") as f:
                for line in lines:
                    f.write("%s\n" % line)

        #Create /usr/share/portage/config/make.globals
        make_globals_path = os.path.join(self.eroot,
                                         GLOBAL_CONFIG_PATH.lstrip(os.sep),
                                         "make.globals")
        ensure_dirs(os.path.dirname(make_globals_path))
        os.symlink(os.path.join(cnf_path, "make.globals"), make_globals_path)

        #Create /usr/share/portage/config/sets/portage.conf
        default_sets_conf_dir = os.path.join(self.eroot,
                                             "usr/share/portage/config/sets")

        try:
            os.makedirs(default_sets_conf_dir)
        except os.error:
            pass

        provided_sets_portage_conf = (os.path.join(cnf_path, "sets",
                                                   "portage.conf"))
        os.symlink(provided_sets_portage_conf,
                   os.path.join(default_sets_conf_dir, "portage.conf"))

        set_config_dir = os.path.join(user_config_dir, "sets")

        try:
            os.makedirs(set_config_dir)
        except os.error:
            pass

        for sets_file, lines in sets.items():
            file_name = os.path.join(set_config_dir, sets_file)
            with open(file_name, "w") as f:
                for line in lines:
                    f.write("%s\n" % line)

        if cnf_path_repoman is not None:
            #Create /usr/share/repoman
            repoman_share_dir = os.path.join(self.eroot, 'usr', 'share',
                                             'repoman')
            os.symlink(cnf_path_repoman, repoman_share_dir)
Пример #36
0
	def _create_profile(self, ebuilds, installed, profile, repo_configs, user_config, sets):

		user_config_dir = os.path.join(self.eroot, USER_CONFIG_PATH)

		try:
			os.makedirs(user_config_dir)
		except os.error:
			pass

		for repo in self.repo_dirs:
			repo_dir = self._get_repo_dir(repo)
			profile_dir = os.path.join(self._get_repo_dir(repo), "profiles")
			metadata_dir = os.path.join(repo_dir, "metadata")
			os.makedirs(metadata_dir)

			#Create $REPO/profiles/categories
			categories = set()
			for cpv in ebuilds:
				ebuilds_repo = Atom("="+cpv, allow_repo=True).repo
				if ebuilds_repo is None:
					ebuilds_repo = "test_repo"
				if ebuilds_repo == repo:
					categories.add(catsplit(cpv)[0])

			categories_file = os.path.join(profile_dir, "categories")
			f = open(categories_file, "w")
			for cat in categories:
				f.write(cat + "\n")
			f.close()
			
			#Create $REPO/profiles/license_groups
			license_file = os.path.join(profile_dir, "license_groups")
			f = open(license_file, "w")
			f.write("EULA TEST\n")
			f.close()

			repo_config = repo_configs.get(repo) 
			if repo_config:
				for config_file, lines in repo_config.items():
					if config_file not in self.config_files:
						raise ValueError("Unknown config file: '%s'" % config_file)

					if config_file in ("layout.conf",):
						file_name = os.path.join(repo_dir, "metadata", config_file)
					else:
						file_name = os.path.join(profile_dir, config_file)
					f = open(file_name, "w")
					for line in lines:
						f.write("%s\n" % line)
					f.close()

			#Create $profile_dir/eclass (we fail to digest the ebuilds if it's not there)
			os.makedirs(os.path.join(repo_dir, "eclass"))

			if repo == "test_repo":
				#Create a minimal profile in /usr/portage
				sub_profile_dir = os.path.join(profile_dir, "default", "linux", "x86", "test_profile")
				os.makedirs(sub_profile_dir)

				if not (profile and "eapi" in profile):
					eapi_file = os.path.join(sub_profile_dir, "eapi")
					f = open(eapi_file, "w")
					f.write("0\n")
					f.close()

				make_defaults_file = os.path.join(sub_profile_dir, "make.defaults")
				f = open(make_defaults_file, "w")
				f.write("ARCH=\"x86\"\n")
				f.write("ACCEPT_KEYWORDS=\"x86\"\n")
				f.close()

				use_force_file = os.path.join(sub_profile_dir, "use.force")
				f = open(use_force_file, "w")
				f.write("x86\n")
				f.close()

				parent_file = os.path.join(sub_profile_dir, "parent")
				f = open(parent_file, "w")
				f.write("..\n")
				f.close()

				if profile:
					for config_file, lines in profile.items():
						if config_file not in self.config_files:
							raise ValueError("Unknown config file: '%s'" % config_file)

						file_name = os.path.join(sub_profile_dir, config_file)
						f = open(file_name, "w")
						for line in lines:
							f.write("%s\n" % line)
						f.close()

				#Create profile symlink
				os.symlink(sub_profile_dir, os.path.join(user_config_dir, "make.profile"))

				#Create minimal herds.xml
				herds_xml = """<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE herds SYSTEM "http://www.gentoo.org/dtd/herds.dtd">
<?xml-stylesheet href="/xsl/herds.xsl" type="text/xsl" ?>
<?xml-stylesheet href="/xsl/guide.xsl" type="text/xsl" ?>
<herds>
<herd>
  <name>base-system</name>
  <email>[email protected]</email>
  <description>Core system utilities and libraries.</description>
  <maintainer>
    <email>[email protected]</email>
    <name>Base System</name>
    <role>Base System Maintainer</role>
  </maintainer>
</herd>
</herds>
"""
				with open(os.path.join(metadata_dir, "metadata.xml"), 'w') as f:
					f.write(herds_xml)

		# Write empty entries for each repository, in order to exercise
		# RepoConfigLoader's repos.conf processing.
		repos_conf_file = os.path.join(user_config_dir, "repos.conf")		
		f = open(repos_conf_file, "w")
		for repo in sorted(self.repo_dirs.keys()):
			f.write("[%s]\n" % repo)
			f.write("\n")
		f.close()

		portdir_overlay = []
		for repo_name in sorted(self.repo_dirs):
			path = self.repo_dirs[repo_name]
			if path != self.portdir:
				portdir_overlay.append(path)

		make_conf = {
			"ACCEPT_KEYWORDS": "x86",
			"CLEAN_DELAY": "0",
			"DISTDIR" : self.distdir,
			"EMERGE_WARNING_DELAY": "0",
			"PKGDIR": self.pkgdir,
			"PORTDIR": self.portdir,
			"PORTAGE_INST_GID": str(portage.data.portage_gid),
			"PORTAGE_INST_UID": str(portage.data.portage_uid),
			"PORTDIR_OVERLAY": " ".join("'%s'" % x for x in portdir_overlay),
			"PORTAGE_TMPDIR": os.path.join(self.eroot, 'var/tmp'),
		}

		if os.environ.get("NOCOLOR"):
			make_conf["NOCOLOR"] = os.environ["NOCOLOR"]

		# Pass along PORTAGE_USERNAME and PORTAGE_GRPNAME since they
		# need to be inherited by ebuild subprocesses.
		if 'PORTAGE_USERNAME' in os.environ:
			make_conf['PORTAGE_USERNAME'] = os.environ['PORTAGE_USERNAME']
		if 'PORTAGE_GRPNAME' in os.environ:
			make_conf['PORTAGE_GRPNAME'] = os.environ['PORTAGE_GRPNAME']

		make_conf_lines = []
		for k_v in make_conf.items():
			make_conf_lines.append('%s="%s"' % k_v)

		if "make.conf" in user_config:
			make_conf_lines.extend(user_config["make.conf"])

		if not portage.process.sandbox_capable or \
			os.environ.get("SANDBOX_ON") == "1":
			# avoid problems from nested sandbox instances
			make_conf_lines.append('FEATURES="${FEATURES} -sandbox"')

		configs = user_config.copy()
		configs["make.conf"] = make_conf_lines

		for config_file, lines in configs.items():
			if config_file not in self.config_files:
				raise ValueError("Unknown config file: '%s'" % config_file)

			file_name = os.path.join(user_config_dir, config_file)
			f = open(file_name, "w")
			for line in lines:
				f.write("%s\n" % line)
			f.close()

		#Create /usr/share/portage/config/make.globals
		make_globals_path = os.path.join(self.eroot,
			GLOBAL_CONFIG_PATH.lstrip(os.sep), "make.globals")
		ensure_dirs(os.path.dirname(make_globals_path))
		os.symlink(os.path.join(PORTAGE_BASE_PATH, "cnf", "make.globals"),
			make_globals_path)

		#Create /usr/share/portage/config/sets/portage.conf
		default_sets_conf_dir = os.path.join(self.eroot, "usr/share/portage/config/sets")
		
		try:
			os.makedirs(default_sets_conf_dir)
		except os.error:
			pass

		provided_sets_portage_conf = \
			os.path.join(PORTAGE_BASE_PATH, "cnf/sets/portage.conf")
		os.symlink(provided_sets_portage_conf, os.path.join(default_sets_conf_dir, "portage.conf"))

		set_config_dir = os.path.join(user_config_dir, "sets")

		try:
			os.makedirs(set_config_dir)
		except os.error:
			pass

		for sets_file, lines in sets.items():
			file_name = os.path.join(set_config_dir, sets_file)
			f = open(file_name, "w")
			for line in lines:
				f.write("%s\n" % line)
			f.close()
Пример #37
0
    def __init__(self, ebuilds={}, binpkgs={}, installed={}, profile={}, repo_configs={}, \
     user_config={}, sets={}, world=[], world_sets=[], distfiles={},
     eprefix=None, targetroot=False, debug=False):
        """
		ebuilds: cpv -> metadata mapping simulating available ebuilds.
		installed: cpv -> metadata mapping simulating installed packages.
			If a metadata key is missing, it gets a default value.
		profile: settings defined by the profile.
		"""

        self.debug = debug
        if eprefix is None:
            self.eprefix = normalize_path(tempfile.mkdtemp())

            # EPREFIX/bin is used by fake true_binaries. Real binaries goes into EPREFIX/usr/bin
            eubin = os.path.join(self.eprefix, "usr", "bin")
            ensure_dirs(eubin)
            essential_binaries = (
                "awk",
                "basename",
                "bzip2",
                "cat",
                "chgrp",
                "chmod",
                "chown",
                "cp",
                "egrep",
                "env",
                "find",
                "grep",
                "head",
                "install",
                "ln",
                "mkdir",
                "mkfifo",
                "mktemp",
                "mv",
                "readlink",
                "rm",
                "sed",
                "sort",
                "tar",
                "tr",
                "uname",
                "uniq",
                "xargs",
            )
            # Exclude internal wrappers from PATH lookup.
            orig_path = os.environ['PATH']
            included_paths = []
            for path in orig_path.split(':'):
                if path and not fnmatch.fnmatch(path,
                                                '*/portage/*/ebuild-helpers*'):
                    included_paths.append(path)
            try:
                os.environ['PATH'] = ':'.join(included_paths)
                for x in essential_binaries:
                    path = find_binary(x)
                    if path is None:
                        raise portage.exception.CommandNotFound(x)
                    os.symlink(path, os.path.join(eubin, x))
            finally:
                os.environ['PATH'] = orig_path
        else:
            self.eprefix = normalize_path(eprefix)

        # Tests may override portage.const.EPREFIX in order to
        # simulate a prefix installation. It's reasonable to do
        # this because tests should be self-contained such that
        # the "real" value of portage.const.EPREFIX is entirely
        # irrelevant (see bug #492932).
        portage.const.EPREFIX = self.eprefix.rstrip(os.sep)

        self.eroot = self.eprefix + os.sep
        if targetroot:
            self.target_root = os.path.join(self.eroot, 'target_root')
        else:
            self.target_root = os.sep
        self.distdir = os.path.join(self.eroot, "var", "portage", "distfiles")
        self.pkgdir = os.path.join(self.eprefix, "pkgdir")
        self.vdbdir = os.path.join(self.eroot, "var/db/pkg")
        os.makedirs(self.vdbdir)

        if not debug:
            portage.util.noiselimit = -2

        self._repositories = {}
        #Make sure the main repo is always created
        self._get_repo_dir("test_repo")

        self._create_distfiles(distfiles)
        self._create_ebuilds(ebuilds)
        self._create_binpkgs(binpkgs)
        self._create_installed(installed)
        self._create_profile(ebuilds, installed, profile, repo_configs,
                             user_config, sets)
        self._create_world(world, world_sets)

        self.settings, self.trees = self._load_config()

        self._create_ebuild_manifests(ebuilds)

        portage.util.noiselimit = 0
	def testBlockerFileCollision(self):

		debug = False

		install_something = """
S="${WORKDIR}"

src_install() {
	einfo "installing something..."
	insinto /usr/lib
	echo "${PN}" > "${T}/file-collision"
	doins "${T}/file-collision"
}
"""

		ebuilds = {
			"dev-libs/A-1" : {
				"EAPI": "6",
				"MISC_CONTENT": install_something,
				"RDEPEND":  "!dev-libs/B",
			},
			"dev-libs/B-1" : {
				"EAPI": "6",
				"MISC_CONTENT": install_something,
				"RDEPEND":  "!dev-libs/A",
			},
		}

		playground = ResolverPlayground(ebuilds=ebuilds, debug=debug)
		settings = playground.settings
		eprefix = settings["EPREFIX"]
		eroot = settings["EROOT"]
		var_cache_edb = os.path.join(eprefix, "var", "cache", "edb")
		user_config_dir = os.path.join(eprefix, USER_CONFIG_PATH)

		portage_python = portage._python_interpreter
		emerge_cmd = (portage_python, "-b", "-Wd",
			os.path.join(self.bindir, "emerge"))

		file_collision = os.path.join(eroot, 'usr/lib/file-collision')

		test_commands = (
			emerge_cmd + ("--oneshot", "dev-libs/A",),
			(lambda: portage.util.grablines(file_collision) == ["A\n"],),
			emerge_cmd + ("--oneshot", "dev-libs/B",),
			(lambda: portage.util.grablines(file_collision) == ["B\n"],),
			emerge_cmd + ("--oneshot", "dev-libs/A",),
			(lambda: portage.util.grablines(file_collision) == ["A\n"],),
			({"FEATURES":"parallel-install"},) + emerge_cmd + ("--oneshot", "dev-libs/B",),
			(lambda: portage.util.grablines(file_collision) == ["B\n"],),
			({"FEATURES":"parallel-install"},) + emerge_cmd + ("-Cq", "dev-libs/B",),
			(lambda: not os.path.exists(file_collision),),
		)

		fake_bin = os.path.join(eprefix, "bin")
		portage_tmpdir = os.path.join(eprefix, "var", "tmp", "portage")
		profile_path = settings.profile_path

		path =  os.environ.get("PATH")
		if path is not None and not path.strip():
			path = None
		if path is None:
			path = ""
		else:
			path = ":" + path
		path = fake_bin + path

		pythonpath =  os.environ.get("PYTHONPATH")
		if pythonpath is not None and not pythonpath.strip():
			pythonpath = None
		if pythonpath is not None and \
			pythonpath.split(":")[0] == PORTAGE_PYM_PATH:
			pass
		else:
			if pythonpath is None:
				pythonpath = ""
			else:
				pythonpath = ":" + pythonpath
			pythonpath = PORTAGE_PYM_PATH + pythonpath

		env = {
			"PORTAGE_OVERRIDE_EPREFIX" : eprefix,
			"PATH" : path,
			"PORTAGE_PYTHON" : portage_python,
			"PORTAGE_REPOSITORIES" : settings.repositories.config_string(),
			"PYTHONDONTWRITEBYTECODE" : os.environ.get("PYTHONDONTWRITEBYTECODE", ""),
			"PYTHONPATH" : pythonpath,
		}

		if "__PORTAGE_TEST_HARDLINK_LOCKS" in os.environ:
			env["__PORTAGE_TEST_HARDLINK_LOCKS"] = \
				os.environ["__PORTAGE_TEST_HARDLINK_LOCKS"]

		dirs = [playground.distdir, fake_bin, portage_tmpdir,
			user_config_dir, var_cache_edb]
		true_symlinks = ["chown", "chgrp"]
		true_binary = find_binary("true")
		self.assertEqual(true_binary is None, False,
			"true command not found")
		try:
			for d in dirs:
				ensure_dirs(d)
			for x in true_symlinks:
				os.symlink(true_binary, os.path.join(fake_bin, x))
			with open(os.path.join(var_cache_edb, "counter"), 'wb') as f:
				f.write(b"100")
			# non-empty system set keeps --unmerge quiet
			with open(os.path.join(profile_path, "packages"), 'w') as f:
				f.write("*dev-libs/token-system-pkg")

			if debug:
				# The subprocess inherits both stdout and stderr, for
				# debugging purposes.
				stdout = None
			else:
				# The subprocess inherits stderr so that any warnings
				# triggered by python -Wd will be visible.
				stdout = subprocess.PIPE

			for i, args in enumerate(test_commands):

				if hasattr(args[0], '__call__'):
					self.assertTrue(args[0](),
						"callable at index %s failed" % (i,))
					continue

				if isinstance(args[0], dict):
					local_env = env.copy()
					local_env.update(args[0])
					args = args[1:]
				else:
					local_env = env

				proc = subprocess.Popen(args,
					env=local_env, stdout=stdout)

				if debug:
					proc.wait()
				else:
					output = proc.stdout.readlines()
					proc.wait()
					proc.stdout.close()
					if proc.returncode != os.EX_OK:
						for line in output:
							sys.stderr.write(_unicode_decode(line))

				self.assertEqual(os.EX_OK, proc.returncode,
					"emerge failed with args %s" % (args,))
		finally:
			playground.debug = False
			playground.cleanup()
Пример #39
0
    def testSimple(self):

        debug = False

        install_something = """
S="${WORKDIR}"

pkg_pretend() {
	einfo "called pkg_pretend for $CATEGORY/$PF"
}

src_install() {
	einfo "installing something..."
	insinto /usr/lib/${P}
	echo "blah blah blah" > "${T}"/regular-file
	doins "${T}"/regular-file
	dosym regular-file /usr/lib/${P}/symlink || die

	# Test CONFIG_PROTECT
	insinto /etc
	newins "${T}"/regular-file ${PN}-${SLOT%/*}

	# Test code for bug #381629, using a copyright symbol encoded with latin-1.
	# We use $(printf "\\xa9") rather than $'\\xa9', since printf apparently
	# works in any case, while $'\\xa9' transforms to \\xef\\xbf\\xbd under
	# some conditions. TODO: Find out why it transforms to \\xef\\xbf\\xbd when
	# running tests for Python 3.2 (even though it's bash that is ultimately
	# responsible for performing the transformation).
	local latin_1_dir=/usr/lib/${P}/latin-1-$(printf "\\xa9")-directory
	insinto "${latin_1_dir}"
	echo "blah blah blah" > "${T}"/latin-1-$(printf "\\xa9")-regular-file || die
	doins "${T}"/latin-1-$(printf "\\xa9")-regular-file
	dosym latin-1-$(printf "\\xa9")-regular-file ${latin_1_dir}/latin-1-$(printf "\\xa9")-symlink || die
}

pkg_config() {
	einfo "called pkg_config for $CATEGORY/$PF"
}

pkg_info() {
	einfo "called pkg_info for $CATEGORY/$PF"
}

pkg_preinst() {
	einfo "called pkg_preinst for $CATEGORY/$PF"

	# Test that has_version and best_version work correctly with
	# prefix (involves internal ROOT -> EROOT calculation in order
	# to support ROOT override via the environment with EAPIs 3
	# and later which support prefix).
	if has_version $CATEGORY/$PN:$SLOT ; then
		einfo "has_version detects an installed instance of $CATEGORY/$PN:$SLOT"
		einfo "best_version reports that the installed instance is $(best_version $CATEGORY/$PN:$SLOT)"
	else
		einfo "has_version does not detect an installed instance of $CATEGORY/$PN:$SLOT"
	fi
	if [[ ${EPREFIX} != ${PORTAGE_OVERRIDE_EPREFIX} ]] ; then
		if has_version --host-root $CATEGORY/$PN:$SLOT ; then
			einfo "has_version --host-root detects an installed instance of $CATEGORY/$PN:$SLOT"
			einfo "best_version --host-root reports that the installed instance is $(best_version $CATEGORY/$PN:$SLOT)"
		else
			einfo "has_version --host-root does not detect an installed instance of $CATEGORY/$PN:$SLOT"
		fi
	fi
}

"""

        ebuilds = {
            "dev-libs/A-1": {
                "EAPI": "5",
                "IUSE": "+flag",
                "KEYWORDS": "x86",
                "LICENSE": "GPL-2",
                "MISC_CONTENT": install_something,
                "RDEPEND": "flag? ( dev-libs/B[flag] )",
            },
            "dev-libs/B-1": {
                "EAPI": "5",
                "IUSE": "+flag",
                "KEYWORDS": "x86",
                "LICENSE": "GPL-2",
                "MISC_CONTENT": install_something,
            },
            "virtual/foo-0": {
                "EAPI": "5",
                "KEYWORDS": "x86",
                "LICENSE": "GPL-2",
            },
        }

        installed = {
            "dev-libs/A-1": {
                "EAPI": "5",
                "IUSE": "+flag",
                "KEYWORDS": "x86",
                "LICENSE": "GPL-2",
                "RDEPEND": "flag? ( dev-libs/B[flag] )",
                "USE": "flag",
            },
            "dev-libs/B-1": {
                "EAPI": "5",
                "IUSE": "+flag",
                "KEYWORDS": "x86",
                "LICENSE": "GPL-2",
                "USE": "flag",
            },
            "dev-libs/depclean-me-1": {
                "EAPI": "5",
                "IUSE": "",
                "KEYWORDS": "x86",
                "LICENSE": "GPL-2",
                "USE": "",
            },
            "app-misc/depclean-me-1": {
                "EAPI": "5",
                "IUSE": "",
                "KEYWORDS": "x86",
                "LICENSE": "GPL-2",
                "RDEPEND": "dev-libs/depclean-me",
                "USE": "",
            },
        }

        metadata_xml_files = (
            (
                "dev-libs/A",
                {
                    "herd":
                    "base-system",
                    "flags":
                    "<flag name='flag'>Description of how USE='flag' affects this package</flag>",
                },
            ),
            (
                "dev-libs/B",
                {
                    "herd":
                    "no-herd",
                    "flags":
                    "<flag name='flag'>Description of how USE='flag' affects this package</flag>",
                },
            ),
        )

        playground = ResolverPlayground(ebuilds=ebuilds,
                                        installed=installed,
                                        debug=debug)
        settings = playground.settings
        eprefix = settings["EPREFIX"]
        eroot = settings["EROOT"]
        trees = playground.trees
        portdb = trees[eroot]["porttree"].dbapi
        test_repo_location = settings.repositories["test_repo"].location
        var_cache_edb = os.path.join(eprefix, "var", "cache", "edb")
        cachedir = os.path.join(var_cache_edb, "dep")
        cachedir_pregen = os.path.join(test_repo_location, "metadata",
                                       "md5-cache")

        portage_python = portage._python_interpreter
        dispatch_conf_cmd = (portage_python, "-b", "-Wd",
                             os.path.join(self.sbindir, "dispatch-conf"))
        ebuild_cmd = (portage_python, "-b", "-Wd",
                      os.path.join(self.bindir, "ebuild"))
        egencache_cmd = (portage_python, "-b", "-Wd",
                         os.path.join(self.bindir, "egencache"), "--repo",
                         "test_repo", "--repositories-configuration",
                         settings.repositories.config_string())
        emerge_cmd = (portage_python, "-b", "-Wd",
                      os.path.join(self.bindir, "emerge"))
        emaint_cmd = (portage_python, "-b", "-Wd",
                      os.path.join(self.sbindir, "emaint"))
        env_update_cmd = (portage_python, "-b", "-Wd",
                          os.path.join(self.sbindir, "env-update"))
        etc_update_cmd = (BASH_BINARY, os.path.join(self.sbindir,
                                                    "etc-update"))
        fixpackages_cmd = (portage_python, "-b", "-Wd",
                           os.path.join(self.sbindir, "fixpackages"))
        portageq_cmd = (portage_python, "-b", "-Wd",
                        os.path.join(self.bindir, "portageq"))
        quickpkg_cmd = (portage_python, "-b", "-Wd",
                        os.path.join(self.bindir, "quickpkg"))
        regenworld_cmd = (portage_python, "-b", "-Wd",
                          os.path.join(self.sbindir, "regenworld"))

        rm_binary = find_binary("rm")
        self.assertEqual(rm_binary is None, False, "rm command not found")
        rm_cmd = (rm_binary, )

        egencache_extra_args = []
        if self._have_python_xml():
            egencache_extra_args.append("--update-use-local-desc")

        test_ebuild = portdb.findname("dev-libs/A-1")
        self.assertFalse(test_ebuild is None)

        cross_prefix = os.path.join(eprefix, "cross_prefix")
        cross_root = os.path.join(eprefix, "cross_root")
        cross_eroot = os.path.join(cross_root, eprefix.lstrip(os.sep))

        test_commands = (
         env_update_cmd,
         portageq_cmd + ("envvar", "-v", "CONFIG_PROTECT", "EROOT",
          "PORTAGE_CONFIGROOT", "PORTAGE_TMPDIR", "USERLAND"),
         etc_update_cmd,
         dispatch_conf_cmd,
         emerge_cmd + ("--version",),
         emerge_cmd + ("--info",),
         emerge_cmd + ("--info", "--verbose"),
         emerge_cmd + ("--list-sets",),
         emerge_cmd + ("--check-news",),
         rm_cmd + ("-rf", cachedir),
         rm_cmd + ("-rf", cachedir_pregen),
         emerge_cmd + ("--regen",),
         rm_cmd + ("-rf", cachedir),
         ({"FEATURES" : "metadata-transfer"},) + \
          emerge_cmd + ("--regen",),
         rm_cmd + ("-rf", cachedir),
         ({"FEATURES" : "metadata-transfer"},) + \
          emerge_cmd + ("--regen",),
         rm_cmd + ("-rf", cachedir),
         egencache_cmd + ("--update",) + tuple(egencache_extra_args),
         ({"FEATURES" : "metadata-transfer"},) + \
          emerge_cmd + ("--metadata",),
         rm_cmd + ("-rf", cachedir),
         ({"FEATURES" : "metadata-transfer"},) + \
          emerge_cmd + ("--metadata",),
         emerge_cmd + ("--metadata",),
         rm_cmd + ("-rf", cachedir),
         emerge_cmd + ("--oneshot", "virtual/foo"),
         lambda: self.assertFalse(os.path.exists(
          os.path.join(pkgdir, "virtual", "foo-0.tbz2"))),
         ({"FEATURES" : "unmerge-backup"},) + \
          emerge_cmd + ("--unmerge", "virtual/foo"),
         lambda: self.assertTrue(os.path.exists(
          os.path.join(pkgdir, "virtual", "foo-0.tbz2"))),
         emerge_cmd + ("--pretend", "dev-libs/A"),
         ebuild_cmd + (test_ebuild, "manifest", "clean", "package", "merge"),
         emerge_cmd + ("--pretend", "--tree", "--complete-graph", "dev-libs/A"),
         emerge_cmd + ("-p", "dev-libs/B"),
         emerge_cmd + ("-p", "--newrepo", "dev-libs/B"),
         emerge_cmd + ("-B", "dev-libs/B",),
         emerge_cmd + ("--oneshot", "--usepkg", "dev-libs/B",),

         # trigger clean prior to pkg_pretend as in bug #390711
         ebuild_cmd + (test_ebuild, "unpack"),
         emerge_cmd + ("--oneshot", "dev-libs/A",),

         emerge_cmd + ("--noreplace", "dev-libs/A",),
         emerge_cmd + ("--config", "dev-libs/A",),
         emerge_cmd + ("--info", "dev-libs/A", "dev-libs/B"),
         emerge_cmd + ("--pretend", "--depclean", "--verbose", "dev-libs/B"),
         emerge_cmd + ("--pretend", "--depclean",),
         emerge_cmd + ("--depclean",),
         quickpkg_cmd + ("--include-config", "y", "dev-libs/A",),
         # Test bug #523684, where a file renamed or removed by the
         # admin forces replacement files to be merged with config
         # protection.
         lambda: self.assertEqual(0,
          len(list(find_updated_config_files(eroot,
          shlex_split(settings["CONFIG_PROTECT"]))))),
         lambda: os.unlink(os.path.join(eprefix, "etc", "A-0")),
         emerge_cmd + ("--usepkgonly", "dev-libs/A"),
         lambda: self.assertEqual(1,
          len(list(find_updated_config_files(eroot,
          shlex_split(settings["CONFIG_PROTECT"]))))),
         emaint_cmd + ("--check", "all"),
         emaint_cmd + ("--fix", "all"),
         fixpackages_cmd,
         regenworld_cmd,
         portageq_cmd + ("match", eroot, "dev-libs/A"),
         portageq_cmd + ("best_visible", eroot, "dev-libs/A"),
         portageq_cmd + ("best_visible", eroot, "binary", "dev-libs/A"),
         portageq_cmd + ("contents", eroot, "dev-libs/A-1"),
         portageq_cmd + ("metadata", eroot, "ebuild", "dev-libs/A-1", "EAPI", "IUSE", "RDEPEND"),
         portageq_cmd + ("metadata", eroot, "binary", "dev-libs/A-1", "EAPI", "USE", "RDEPEND"),
         portageq_cmd + ("metadata", eroot, "installed", "dev-libs/A-1", "EAPI", "USE", "RDEPEND"),
         portageq_cmd + ("owners", eroot, eroot + "usr"),
         emerge_cmd + ("-p", eroot + "usr"),
         emerge_cmd + ("-p", "--unmerge", "-q", eroot + "usr"),
         emerge_cmd + ("--unmerge", "--quiet", "dev-libs/A"),
         emerge_cmd + ("-C", "--quiet", "dev-libs/B"),

         # Test cross-prefix usage, including chpathtool for binpkgs.
         ({"EPREFIX" : cross_prefix},) + \
          emerge_cmd + ("--usepkgonly", "dev-libs/A"),
         ({"EPREFIX" : cross_prefix},) + \
          portageq_cmd + ("has_version", cross_prefix, "dev-libs/A"),
         ({"EPREFIX" : cross_prefix},) + \
          portageq_cmd + ("has_version", cross_prefix, "dev-libs/B"),
         ({"EPREFIX" : cross_prefix},) + \
          emerge_cmd + ("-C", "--quiet", "dev-libs/B"),
         ({"EPREFIX" : cross_prefix},) + \
          emerge_cmd + ("-C", "--quiet", "dev-libs/A"),
         ({"EPREFIX" : cross_prefix},) + \
          emerge_cmd + ("dev-libs/A",),
         ({"EPREFIX" : cross_prefix},) + \
          portageq_cmd + ("has_version", cross_prefix, "dev-libs/A"),
         ({"EPREFIX" : cross_prefix},) + \
          portageq_cmd + ("has_version", cross_prefix, "dev-libs/B"),

         # Test ROOT support
         ({"ROOT": cross_root},) + emerge_cmd + ("dev-libs/B",),
         portageq_cmd + ("has_version", cross_eroot, "dev-libs/B"),
        )

        distdir = playground.distdir
        pkgdir = playground.pkgdir
        fake_bin = os.path.join(eprefix, "bin")
        portage_tmpdir = os.path.join(eprefix, "var", "tmp", "portage")
        profile_path = settings.profile_path
        user_config_dir = os.path.join(os.sep, eprefix, USER_CONFIG_PATH)

        path = os.environ.get("PATH")
        if path is not None and not path.strip():
            path = None
        if path is None:
            path = ""
        else:
            path = ":" + path
        path = fake_bin + path

        pythonpath = os.environ.get("PYTHONPATH")
        if pythonpath is not None and not pythonpath.strip():
            pythonpath = None
        if pythonpath is not None and \
         pythonpath.split(":")[0] == PORTAGE_PYM_PATH:
            pass
        else:
            if pythonpath is None:
                pythonpath = ""
            else:
                pythonpath = ":" + pythonpath
            pythonpath = PORTAGE_PYM_PATH + pythonpath

        env = {
            "PORTAGE_OVERRIDE_EPREFIX":
            eprefix,
            "CLEAN_DELAY":
            "0",
            "DISTDIR":
            distdir,
            "EMERGE_WARNING_DELAY":
            "0",
            "INFODIR":
            "",
            "INFOPATH":
            "",
            "PATH":
            path,
            "PKGDIR":
            pkgdir,
            "PORTAGE_INST_GID":
            str(portage.data.portage_gid),
            "PORTAGE_INST_UID":
            str(portage.data.portage_uid),
            "PORTAGE_PYTHON":
            portage_python,
            "PORTAGE_REPOSITORIES":
            settings.repositories.config_string(),
            "PORTAGE_TMPDIR":
            portage_tmpdir,
            "PYTHONDONTWRITEBYTECODE":
            os.environ.get("PYTHONDONTWRITEBYTECODE", ""),
            "PYTHONPATH":
            pythonpath,
            "__PORTAGE_TEST_PATH_OVERRIDE":
            fake_bin,
        }

        if "__PORTAGE_TEST_HARDLINK_LOCKS" in os.environ:
            env["__PORTAGE_TEST_HARDLINK_LOCKS"] = \
             os.environ["__PORTAGE_TEST_HARDLINK_LOCKS"]

        updates_dir = os.path.join(test_repo_location, "profiles", "updates")
        dirs = [
            cachedir, cachedir_pregen, cross_eroot, cross_prefix, distdir,
            fake_bin, portage_tmpdir, updates_dir, user_config_dir,
            var_cache_edb
        ]
        etc_symlinks = ("dispatch-conf.conf", "etc-update.conf")
        # Override things that may be unavailable, or may have portability
        # issues when running tests in exotic environments.
        #   prepstrip - bug #447810 (bash read builtin EINTR problem)
        true_symlinks = ["find", "prepstrip", "sed", "scanelf"]
        true_binary = find_binary("true")
        self.assertEqual(true_binary is None, False, "true command not found")
        try:
            for d in dirs:
                ensure_dirs(d)
            for x in true_symlinks:
                os.symlink(true_binary, os.path.join(fake_bin, x))
            for x in etc_symlinks:
                os.symlink(os.path.join(self.cnf_etc_path, x),
                           os.path.join(eprefix, "etc", x))
            with open(os.path.join(var_cache_edb, "counter"), 'wb') as f:
                f.write(b"100")
            # non-empty system set keeps --depclean quiet
            with open(os.path.join(profile_path, "packages"), 'w') as f:
                f.write("*dev-libs/token-system-pkg")
            for cp, xml_data in metadata_xml_files:
                with open(os.path.join(test_repo_location, cp, "metadata.xml"),
                          'w') as f:
                    f.write(playground.metadata_xml_template % xml_data)
            with open(os.path.join(updates_dir, "1Q-2010"), 'w') as f:
                f.write("""
slotmove =app-doc/pms-3 2 3
move dev-util/git dev-vcs/git
""")

            if debug:
                # The subprocess inherits both stdout and stderr, for
                # debugging purposes.
                stdout = None
            else:
                # The subprocess inherits stderr so that any warnings
                # triggered by python -Wd will be visible.
                stdout = subprocess.PIPE

            for args in test_commands:

                if hasattr(args, '__call__'):
                    args()
                    continue

                if isinstance(args[0], dict):
                    local_env = env.copy()
                    local_env.update(args[0])
                    args = args[1:]
                else:
                    local_env = env

                proc = subprocess.Popen(args, env=local_env, stdout=stdout)

                if debug:
                    proc.wait()
                else:
                    output = proc.stdout.readlines()
                    proc.wait()
                    proc.stdout.close()
                    if proc.returncode != os.EX_OK:
                        for line in output:
                            sys.stderr.write(_unicode_decode(line))

                self.assertEqual(os.EX_OK, proc.returncode,
                                 "emerge failed with args %s" % (args, ))
        finally:
            playground.cleanup()
Пример #40
0
	def _create_profile(self, ebuilds, installed, profile, repo_configs, user_config, sets):

		user_config_dir = os.path.join(self.eroot, USER_CONFIG_PATH)

		try:
			os.makedirs(user_config_dir)
		except os.error:
			pass

		for repo in self.repo_dirs:
			repo_dir = self._get_repo_dir(repo)
			profile_dir = os.path.join(self._get_repo_dir(repo), "profiles")
			metadata_dir = os.path.join(repo_dir, "metadata")
			os.makedirs(metadata_dir)

			#Create $REPO/profiles/categories
			categories = set()
			for cpv in ebuilds:
				ebuilds_repo = Atom("="+cpv, allow_repo=True).repo
				if ebuilds_repo is None:
					ebuilds_repo = "test_repo"
				if ebuilds_repo == repo:
					categories.add(catsplit(cpv)[0])

			categories_file = os.path.join(profile_dir, "categories")
			f = open(categories_file, "w")
			for cat in categories:
				f.write(cat + "\n")
			f.close()
			
			#Create $REPO/profiles/license_groups
			license_file = os.path.join(profile_dir, "license_groups")
			f = open(license_file, "w")
			f.write("EULA TEST\n")
			f.close()

			repo_config = repo_configs.get(repo) 
			if repo_config:
				for config_file, lines in repo_config.items():
					if config_file not in self.config_files:
						raise ValueError("Unknown config file: '%s'" % config_file)

					if config_file in ("layout.conf",):
						file_name = os.path.join(repo_dir, "metadata", config_file)
					else:
						file_name = os.path.join(profile_dir, config_file)
					f = open(file_name, "w")
					for line in lines:
						f.write("%s\n" % line)
					f.close()

			#Create $profile_dir/eclass (we fail to digest the ebuilds if it's not there)
			os.makedirs(os.path.join(repo_dir, "eclass"))

			if repo == "test_repo":
				#Create a minimal profile in /usr/portage
				sub_profile_dir = os.path.join(profile_dir, "default", "linux", "x86", "test_profile")
				os.makedirs(sub_profile_dir)

				eapi_file = os.path.join(sub_profile_dir, "eapi")
				f = open(eapi_file, "w")
				f.write("0\n")
				f.close()

				make_defaults_file = os.path.join(sub_profile_dir, "make.defaults")
				f = open(make_defaults_file, "w")
				f.write("ARCH=\"x86\"\n")
				f.write("ACCEPT_KEYWORDS=\"x86\"\n")
				f.close()

				use_force_file = os.path.join(sub_profile_dir, "use.force")
				f = open(use_force_file, "w")
				f.write("x86\n")
				f.close()

				parent_file = os.path.join(sub_profile_dir, "parent")
				f = open(parent_file, "w")
				f.write("..\n")
				f.close()

				if profile:
					for config_file, lines in profile.items():
						if config_file not in self.config_files:
							raise ValueError("Unknown config file: '%s'" % config_file)

						file_name = os.path.join(sub_profile_dir, config_file)
						f = open(file_name, "w")
						for line in lines:
							f.write("%s\n" % line)
						f.close()

				#Create profile symlink
				os.symlink(sub_profile_dir, os.path.join(user_config_dir, "make.profile"))

				#Create minimal herds.xml
				herds_xml = """<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE herds SYSTEM "http://www.gentoo.org/dtd/herds.dtd">
<?xml-stylesheet href="/xsl/herds.xsl" type="text/xsl" ?>
<?xml-stylesheet href="/xsl/guide.xsl" type="text/xsl" ?>
<herds>
<herd>
  <name>base-system</name>
  <email>[email protected]</email>
  <description>Core system utilities and libraries.</description>
  <maintainer>
    <email>[email protected]</email>
    <name>Base System</name>
    <role>Base System Maintainer</role>
  </maintainer>
</herd>
</herds>
"""
				with open(os.path.join(metadata_dir, "metadata.xml"), 'w') as f:
					f.write(herds_xml)

		# Write empty entries for each repository, in order to exercise
		# RepoConfigLoader's repos.conf processing.
		repos_conf_file = os.path.join(user_config_dir, "repos.conf")		
		f = open(repos_conf_file, "w")
		for repo in sorted(self.repo_dirs.keys()):
			f.write("[%s]\n" % repo)
			f.write("\n")
		f.close()

		for config_file, lines in user_config.items():
			if config_file not in self.config_files:
				raise ValueError("Unknown config file: '%s'" % config_file)

			file_name = os.path.join(user_config_dir, config_file)
			f = open(file_name, "w")
			for line in lines:
				f.write("%s\n" % line)
			f.close()

		#Create /usr/share/portage/config/make.globals
		make_globals_path = os.path.join(self.eroot,
			GLOBAL_CONFIG_PATH.lstrip(os.sep), "make.globals")
		ensure_dirs(os.path.dirname(make_globals_path))
		os.symlink(os.path.join(PORTAGE_BASE_PATH, "cnf", "make.globals"),
			make_globals_path)

		#Create /usr/share/portage/config/sets/portage.conf
		default_sets_conf_dir = os.path.join(self.eroot, "usr/share/portage/config/sets")
		
		try:
			os.makedirs(default_sets_conf_dir)
		except os.error:
			pass

		provided_sets_portage_conf = \
			os.path.join(PORTAGE_BASE_PATH, "cnf/sets/portage.conf")
		os.symlink(provided_sets_portage_conf, os.path.join(default_sets_conf_dir, "portage.conf"))

		set_config_dir = os.path.join(user_config_dir, "sets")

		try:
			os.makedirs(set_config_dir)
		except os.error:
			pass

		for sets_file, lines in sets.items():
			file_name = os.path.join(set_config_dir, sets_file)
			f = open(file_name, "w")
			for line in lines:
				f.write("%s\n" % line)
			f.close()

		user_config_dir = os.path.join(self.eroot, "etc", "portage")

		try:
			os.makedirs(user_config_dir)
		except os.error:
			pass

		for config_file, lines in user_config.items():
			if config_file not in self.config_files:
				raise ValueError("Unknown config file: '%s'" % config_file)

			file_name = os.path.join(user_config_dir, config_file)
			f = open(file_name, "w")
			for line in lines:
				f.write("%s\n" % line)
			f.close()
Пример #41
0
    def testSimple(self):
        debug = False

        skip_reason = self._must_skip()
        if skip_reason:
            self.portage_skip = skip_reason
            self.assertFalse(True, skip_reason)
            return

        copyright_header = """# Copyright 1999-%s Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
# $Header: $
""" % time.gmtime().tm_year

        repo_configs = {
            "test_repo": {
                "layout.conf": ("update-changelog = true", ),
            }
        }

        profiles = (
            ("x86", "default/linux/x86/test_profile", "stable"),
            ("x86", "default/linux/x86/test_dev", "dev"),
            ("x86", "default/linux/x86/test_exp", "exp"),
        )

        profile = {
            "eapi": ("5", ),
            "package.use.stable.mask": ("dev-libs/A flag", )
        }

        ebuilds = {
            "dev-libs/A-0": {
                "COPYRIGHT_HEADER": copyright_header,
                "DESCRIPTION": "Desc goes here",
                "EAPI": "5",
                "HOMEPAGE": "http://example.com",
                "IUSE": "flag",
                "KEYWORDS": "x86",
                "LICENSE": "GPL-2",
                "RDEPEND": "flag? ( dev-libs/B[flag] )",
            },
            "dev-libs/A-1": {
                "COPYRIGHT_HEADER": copyright_header,
                "DESCRIPTION": "Desc goes here",
                "EAPI": "4",
                "HOMEPAGE": "http://example.com",
                "IUSE": "flag",
                "KEYWORDS": "~x86",
                "LICENSE": "GPL-2",
                "RDEPEND": "flag? ( dev-libs/B[flag] )",
            },
            "dev-libs/B-1": {
                "COPYRIGHT_HEADER": copyright_header,
                "DESCRIPTION": "Desc goes here",
                "EAPI": "4",
                "HOMEPAGE": "http://example.com",
                "IUSE": "flag",
                "KEYWORDS": "~x86",
                "LICENSE": "GPL-2",
            },
            "dev-libs/C-0": {
                "COPYRIGHT_HEADER": copyright_header,
                "DESCRIPTION": "Desc goes here",
                "EAPI": "4",
                "HOMEPAGE": "http://example.com",
                "IUSE": "flag",
                # must be unstable, since dev-libs/A[flag] is stable masked
                "KEYWORDS": "~x86",
                "LICENSE": "GPL-2",
                "RDEPEND": "flag? ( dev-libs/A[flag] )",
            },
        }
        licenses = ["GPL-2"]
        arch_list = ["x86"]
        metadata_dtd = os.path.join(PORTAGE_BASE_PATH, "cnf/metadata.dtd")
        metadata_xml_files = (
            (
                "dev-libs/A",
                {
                    "herd":
                    "base-system",
                    "flags":
                    "<flag name='flag' restrict='&gt;=dev-libs/A-0'>Description of how USE='flag' affects this package</flag>",
                },
            ),
            (
                "dev-libs/B",
                {
                    "herd":
                    "no-herd",
                    "flags":
                    "<flag name='flag'>Description of how USE='flag' affects this package</flag>",
                },
            ),
            (
                "dev-libs/C",
                {
                    "herd":
                    "no-herd",
                    "flags":
                    "<flag name='flag'>Description of how USE='flag' affects this package</flag>",
                },
            ),
        )

        use_desc = (("flag",
                     "Description of how USE='flag' affects packages"), )

        playground = ResolverPlayground(ebuilds=ebuilds,
                                        profile=profile,
                                        repo_configs=repo_configs,
                                        debug=debug)
        settings = playground.settings
        eprefix = settings["EPREFIX"]
        eroot = settings["EROOT"]
        portdb = playground.trees[playground.eroot]["porttree"].dbapi
        homedir = os.path.join(eroot, "home")
        distdir = os.path.join(eprefix, "distdir")
        test_repo_location = settings.repositories["test_repo"].location
        profiles_dir = os.path.join(test_repo_location, "profiles")
        license_dir = os.path.join(test_repo_location, "licenses")

        repoman_cmd = (portage._python_interpreter, "-b", "-Wd",
                       os.path.join(self.bindir, "repoman"))

        git_binary = find_binary("git")
        git_cmd = (git_binary, )

        cp_binary = find_binary("cp")
        self.assertEqual(cp_binary is None, False, "cp command not found")
        cp_cmd = (cp_binary, )

        test_ebuild = portdb.findname("dev-libs/A-1")
        self.assertFalse(test_ebuild is None)

        committer_name = "Gentoo Dev"
        committer_email = "*****@*****.**"

        git_test = (
            ("", repoman_cmd + ("manifest", )),
            ("", git_cmd + (
                "config",
                "--global",
                "user.name",
                committer_name,
            )),
            ("", git_cmd + (
                "config",
                "--global",
                "user.email",
                committer_email,
            )),
            ("", git_cmd + ("init-db", )),
            ("", git_cmd + ("add", ".")),
            ("", git_cmd + ("commit", "-a", "-m", "add whole repo")),
            ("", repoman_cmd + ("full", "-d")),
            ("", cp_cmd + (test_ebuild, test_ebuild[:-8] + "2.ebuild")),
            ("", git_cmd + ("add", test_ebuild[:-8] + "2.ebuild")),
            ("", repoman_cmd + ("commit", "-m", "bump to version 2")),
            ("", cp_cmd + (test_ebuild, test_ebuild[:-8] + "3.ebuild")),
            ("", git_cmd + ("add", test_ebuild[:-8] + "3.ebuild")),
            ("dev-libs", repoman_cmd + ("commit", "-m", "bump to version 3")),
            ("", cp_cmd + (test_ebuild, test_ebuild[:-8] + "4.ebuild")),
            ("", git_cmd + ("add", test_ebuild[:-8] + "4.ebuild")),
            ("dev-libs/A",
             repoman_cmd + ("commit", "-m", "bump to version 4")),
        )

        pythonpath = os.environ.get("PYTHONPATH")
        if pythonpath is not None and not pythonpath.strip():
            pythonpath = None
        if pythonpath is not None and \
         pythonpath.split(":")[0] == PORTAGE_PYM_PATH:
            pass
        else:
            if pythonpath is None:
                pythonpath = ""
            else:
                pythonpath = ":" + pythonpath
            pythonpath = PORTAGE_PYM_PATH + pythonpath

        env = {
            "PORTAGE_OVERRIDE_EPREFIX":
            eprefix,
            "DISTDIR":
            distdir,
            "GENTOO_COMMITTER_NAME":
            committer_name,
            "GENTOO_COMMITTER_EMAIL":
            committer_email,
            "HOME":
            homedir,
            "PATH":
            os.environ["PATH"],
            "PORTAGE_GRPNAME":
            os.environ["PORTAGE_GRPNAME"],
            "PORTAGE_USERNAME":
            os.environ["PORTAGE_USERNAME"],
            "PORTAGE_REPOSITORIES":
            settings.repositories.config_string(),
            "PYTHONDONTWRITEBYTECODE":
            os.environ.get("PYTHONDONTWRITEBYTECODE", ""),
            "PYTHONPATH":
            pythonpath,
        }

        if os.environ.get("SANDBOX_ON") == "1":
            # avoid problems from nested sandbox instances
            env["FEATURES"] = "-sandbox -usersandbox"

        dirs = [homedir, license_dir, profiles_dir, distdir]
        try:
            for d in dirs:
                ensure_dirs(d)
            with open(os.path.join(test_repo_location, "skel.ChangeLog"),
                      'w') as f:
                f.write(copyright_header)
            with open(os.path.join(profiles_dir, "profiles.desc"), 'w') as f:
                for x in profiles:
                    f.write("%s %s %s\n" % x)

            # ResolverPlayground only created the first profile,
            # so create the remaining ones.
            for x in profiles[1:]:
                sub_profile_dir = os.path.join(profiles_dir, x[1])
                ensure_dirs(sub_profile_dir)
                for config_file, lines in profile.items():
                    file_name = os.path.join(sub_profile_dir, config_file)
                    with open(file_name, "w") as f:
                        for line in lines:
                            f.write("%s\n" % line)

            for x in licenses:
                open(os.path.join(license_dir, x), 'wb').close()
            with open(os.path.join(profiles_dir, "arch.list"), 'w') as f:
                for x in arch_list:
                    f.write("%s\n" % x)
            with open(os.path.join(profiles_dir, "use.desc"), 'w') as f:
                for k, v in use_desc:
                    f.write("%s - %s\n" % (k, v))
            for cp, xml_data in metadata_xml_files:
                with open(os.path.join(test_repo_location, cp, "metadata.xml"),
                          'w') as f:
                    f.write(playground.metadata_xml_template % xml_data)
            # Use a symlink to test_repo, in order to trigger bugs
            # involving canonical vs. non-canonical paths.
            test_repo_symlink = os.path.join(eroot, "test_repo_symlink")
            os.symlink(test_repo_location, test_repo_symlink)
            # repoman checks metadata.dtd for recent CTIME, so copy the file in
            # order to ensure that the CTIME is current
            # NOTE: if we don't have the file around, let repoman try to fetch it.
            if os.path.exists(metadata_dtd):
                shutil.copyfile(metadata_dtd,
                                os.path.join(distdir, "metadata.dtd"))

            if debug:
                # The subprocess inherits both stdout and stderr, for
                # debugging purposes.
                stdout = None
            else:
                # The subprocess inherits stderr so that any warnings
                # triggered by python -Wd will be visible.
                stdout = subprocess.PIPE

            for cwd in ("", "dev-libs", "dev-libs/A", "dev-libs/B"):
                abs_cwd = os.path.join(test_repo_symlink, cwd)
                proc = subprocess.Popen(repoman_cmd + ("full", ),
                                        cwd=abs_cwd,
                                        env=env,
                                        stdout=stdout)

                if debug:
                    proc.wait()
                else:
                    output = proc.stdout.readlines()
                    proc.wait()
                    proc.stdout.close()
                    if proc.returncode != os.EX_OK:
                        for line in output:
                            sys.stderr.write(_unicode_decode(line))

                self.assertEqual(os.EX_OK, proc.returncode,
                                 "repoman failed in %s" % (cwd, ))

            if git_binary is not None:
                for cwd, cmd in git_test:
                    abs_cwd = os.path.join(test_repo_symlink, cwd)
                    proc = subprocess.Popen(cmd,
                                            cwd=abs_cwd,
                                            env=env,
                                            stdout=stdout)

                    if debug:
                        proc.wait()
                    else:
                        output = proc.stdout.readlines()
                        proc.wait()
                        proc.stdout.close()
                        if proc.returncode != os.EX_OK:
                            for line in output:
                                sys.stderr.write(_unicode_decode(line))

                    self.assertEqual(os.EX_OK, proc.returncode,
                                     "%s failed in %s" % (
                                         cmd,
                                         cwd,
                                     ))
        finally:
            playground.cleanup()
Пример #42
0
    def _async_test_simple(self, playground, metadata_xml_files, loop=None):

        debug = playground.debug
        settings = playground.settings
        eprefix = settings["EPREFIX"]
        eroot = settings["EROOT"]
        trees = playground.trees
        portdb = trees[eroot]["porttree"].dbapi
        test_repo_location = settings.repositories["test_repo"].location
        var_cache_edb = os.path.join(eprefix, "var", "cache", "edb")
        cachedir = os.path.join(var_cache_edb, "dep")
        cachedir_pregen = os.path.join(test_repo_location, "metadata",
                                       "md5-cache")

        portage_python = portage._python_interpreter
        dispatch_conf_cmd = (portage_python, "-b", "-Wd",
                             os.path.join(self.sbindir, "dispatch-conf"))
        ebuild_cmd = (portage_python, "-b", "-Wd",
                      os.path.join(self.bindir, "ebuild"))
        egencache_cmd = (portage_python, "-b", "-Wd",
                         os.path.join(self.bindir, "egencache"), "--repo",
                         "test_repo", "--repositories-configuration",
                         settings.repositories.config_string())
        emerge_cmd = (portage_python, "-b", "-Wd",
                      os.path.join(self.bindir, "emerge"))
        emaint_cmd = (portage_python, "-b", "-Wd",
                      os.path.join(self.sbindir, "emaint"))
        env_update_cmd = (portage_python, "-b", "-Wd",
                          os.path.join(self.sbindir, "env-update"))
        etc_update_cmd = (BASH_BINARY, os.path.join(self.sbindir,
                                                    "etc-update"))
        fixpackages_cmd = (portage_python, "-b", "-Wd",
                           os.path.join(self.sbindir, "fixpackages"))
        portageq_cmd = (portage_python, "-b", "-Wd",
                        os.path.join(self.bindir, "portageq"))
        quickpkg_cmd = (portage_python, "-b", "-Wd",
                        os.path.join(self.bindir, "quickpkg"))
        regenworld_cmd = (portage_python, "-b", "-Wd",
                          os.path.join(self.sbindir, "regenworld"))

        rm_binary = find_binary("rm")
        self.assertEqual(rm_binary is None, False, "rm command not found")
        rm_cmd = (rm_binary, )

        egencache_extra_args = []
        if self._have_python_xml():
            egencache_extra_args.append("--update-use-local-desc")

        test_ebuild = portdb.findname("dev-libs/A-1")
        self.assertFalse(test_ebuild is None)

        cross_prefix = os.path.join(eprefix, "cross_prefix")
        cross_root = os.path.join(eprefix, "cross_root")
        cross_eroot = os.path.join(cross_root, eprefix.lstrip(os.sep))

        binhost_dir = os.path.join(eprefix, "binhost")
        binhost_address = '127.0.0.1'
        binhost_remote_path = '/binhost'
        binhost_server = AsyncHTTPServer(
            binhost_address, BinhostContentMap(binhost_remote_path,
                                               binhost_dir), loop).__enter__()
        binhost_uri = 'http://{address}:{port}{path}'.format(
            address=binhost_address,
            port=binhost_server.server_port,
            path=binhost_remote_path)

        test_commands = (
         emerge_cmd + ("--usepkgonly", "--root", cross_root, "--quickpkg-direct=y", "--quickpkg-direct-root", "/", "dev-libs/A"),
         emerge_cmd + ("--usepkgonly", "--quickpkg-direct=y", "--quickpkg-direct-root", cross_root, "dev-libs/A"),
         env_update_cmd,
         portageq_cmd + ("envvar", "-v", "CONFIG_PROTECT", "EROOT",
          "PORTAGE_CONFIGROOT", "PORTAGE_TMPDIR", "USERLAND"),
         etc_update_cmd,
         dispatch_conf_cmd,
         emerge_cmd + ("--version",),
         emerge_cmd + ("--info",),
         emerge_cmd + ("--info", "--verbose"),
         emerge_cmd + ("--list-sets",),
         emerge_cmd + ("--check-news",),
         rm_cmd + ("-rf", cachedir),
         rm_cmd + ("-rf", cachedir_pregen),
         emerge_cmd + ("--regen",),
         rm_cmd + ("-rf", cachedir),
         ({"FEATURES" : "metadata-transfer"},) + \
          emerge_cmd + ("--regen",),
         rm_cmd + ("-rf", cachedir),
         ({"FEATURES" : "metadata-transfer"},) + \
          emerge_cmd + ("--regen",),
         rm_cmd + ("-rf", cachedir),
         egencache_cmd + ("--update",) + tuple(egencache_extra_args),
         ({"FEATURES" : "metadata-transfer"},) + \
          emerge_cmd + ("--metadata",),
         rm_cmd + ("-rf", cachedir),
         ({"FEATURES" : "metadata-transfer"},) + \
          emerge_cmd + ("--metadata",),
         emerge_cmd + ("--metadata",),
         rm_cmd + ("-rf", cachedir),
         emerge_cmd + ("--oneshot", "virtual/foo"),
         lambda: self.assertFalse(os.path.exists(
          os.path.join(pkgdir, "virtual", "foo-0.tbz2"))),
         ({"FEATURES" : "unmerge-backup"},) + \
          emerge_cmd + ("--unmerge", "virtual/foo"),
         lambda: self.assertTrue(os.path.exists(
          os.path.join(pkgdir, "virtual", "foo-0.tbz2"))),
         emerge_cmd + ("--pretend", "dev-libs/A"),
         ebuild_cmd + (test_ebuild, "manifest", "clean", "package", "merge"),
         emerge_cmd + ("--pretend", "--tree", "--complete-graph", "dev-libs/A"),
         emerge_cmd + ("-p", "dev-libs/B"),
         emerge_cmd + ("-p", "--newrepo", "dev-libs/B"),
         emerge_cmd + ("-B", "dev-libs/B",),
         emerge_cmd + ("--oneshot", "--usepkg", "dev-libs/B",),

         # trigger clean prior to pkg_pretend as in bug #390711
         ebuild_cmd + (test_ebuild, "unpack"),
         emerge_cmd + ("--oneshot", "dev-libs/A",),

         emerge_cmd + ("--noreplace", "dev-libs/A",),
         emerge_cmd + ("--config", "dev-libs/A",),
         emerge_cmd + ("--info", "dev-libs/A", "dev-libs/B"),
         emerge_cmd + ("--pretend", "--depclean", "--verbose", "dev-libs/B"),
         emerge_cmd + ("--pretend", "--depclean",),
         emerge_cmd + ("--depclean",),
         quickpkg_cmd + ("--include-config", "y", "dev-libs/A",),
         # Test bug #523684, where a file renamed or removed by the
         # admin forces replacement files to be merged with config
         # protection.
         lambda: self.assertEqual(0,
          len(list(find_updated_config_files(eroot,
          shlex_split(settings["CONFIG_PROTECT"]))))),
         lambda: os.unlink(os.path.join(eprefix, "etc", "A-0")),
         emerge_cmd + ("--usepkgonly", "dev-libs/A"),
         lambda: self.assertEqual(1,
          len(list(find_updated_config_files(eroot,
          shlex_split(settings["CONFIG_PROTECT"]))))),
         emaint_cmd + ("--check", "all"),
         emaint_cmd + ("--fix", "all"),
         fixpackages_cmd,
         regenworld_cmd,
         portageq_cmd + ("match", eroot, "dev-libs/A"),
         portageq_cmd + ("best_visible", eroot, "dev-libs/A"),
         portageq_cmd + ("best_visible", eroot, "binary", "dev-libs/A"),
         portageq_cmd + ("contents", eroot, "dev-libs/A-1"),
         portageq_cmd + ("metadata", eroot, "ebuild", "dev-libs/A-1", "EAPI", "IUSE", "RDEPEND"),
         portageq_cmd + ("metadata", eroot, "binary", "dev-libs/A-1", "EAPI", "USE", "RDEPEND"),
         portageq_cmd + ("metadata", eroot, "installed", "dev-libs/A-1", "EAPI", "USE", "RDEPEND"),
         portageq_cmd + ("owners", eroot, eroot + "usr"),
         emerge_cmd + ("-p", eroot + "usr"),
         emerge_cmd + ("-p", "--unmerge", "-q", eroot + "usr"),
         emerge_cmd + ("--unmerge", "--quiet", "dev-libs/A"),
         emerge_cmd + ("-C", "--quiet", "dev-libs/B"),

         # If EMERGE_DEFAULT_OPTS contains --autounmask=n, then --autounmask
         # must be specified with --autounmask-continue.
         ({"EMERGE_DEFAULT_OPTS" : "--autounmask=n"},) + \
          emerge_cmd + ("--autounmask", "--autounmask-continue", "dev-libs/C",),
         # Verify that the above --autounmask-continue command caused
         # USE=flag to be applied correctly to dev-libs/D.
         portageq_cmd + ("match", eroot, "dev-libs/D[flag]"),

         # Test cross-prefix usage, including chpathtool for binpkgs.
         # EAPI 7
         ({"EPREFIX" : cross_prefix},) + \
          emerge_cmd + ("dev-libs/C",),
         ({"EPREFIX" : cross_prefix},) + \
          portageq_cmd + ("has_version", cross_prefix, "dev-libs/C"),
         ({"EPREFIX" : cross_prefix},) + \
          portageq_cmd + ("has_version", cross_prefix, "dev-libs/D"),
         ({"ROOT": cross_root},) + emerge_cmd + ("dev-libs/D",),
         portageq_cmd + ("has_version", cross_eroot, "dev-libs/D"),
         # EAPI 5
         ({"EPREFIX" : cross_prefix},) + \
          emerge_cmd + ("--usepkgonly", "dev-libs/A"),
         ({"EPREFIX" : cross_prefix},) + \
          portageq_cmd + ("has_version", cross_prefix, "dev-libs/A"),
         ({"EPREFIX" : cross_prefix},) + \
          portageq_cmd + ("has_version", cross_prefix, "dev-libs/B"),
         ({"EPREFIX" : cross_prefix},) + \
          emerge_cmd + ("-C", "--quiet", "dev-libs/B"),
         ({"EPREFIX" : cross_prefix},) + \
          emerge_cmd + ("-C", "--quiet", "dev-libs/A"),
         ({"EPREFIX" : cross_prefix},) + \
          emerge_cmd + ("dev-libs/A",),
         ({"EPREFIX" : cross_prefix},) + \
          portageq_cmd + ("has_version", cross_prefix, "dev-libs/A"),
         ({"EPREFIX" : cross_prefix},) + \
          portageq_cmd + ("has_version", cross_prefix, "dev-libs/B"),

         # Test ROOT support
         ({"ROOT": cross_root},) + emerge_cmd + ("dev-libs/B",),
         portageq_cmd + ("has_version", cross_eroot, "dev-libs/B"),
        )

        # Test binhost support if FETCHCOMMAND is available.
        binrepos_conf_file = os.path.join(os.sep, eprefix, BINREPOS_CONF_FILE)
        with open(binrepos_conf_file, 'wt') as f:
            f.write('[test-binhost]\n')
            f.write('sync-uri = {}\n'.format(binhost_uri))
        fetchcommand = portage.util.shlex_split(
            playground.settings['FETCHCOMMAND'])
        fetch_bin = portage.process.find_binary(fetchcommand[0])
        if fetch_bin is not None:
            test_commands = test_commands + (
             lambda: os.rename(pkgdir, binhost_dir),
             emerge_cmd + ("-e", "--getbinpkgonly", "dev-libs/A"),
             lambda: shutil.rmtree(pkgdir),
             lambda: os.rename(binhost_dir, pkgdir),
             # Remove binrepos.conf and test PORTAGE_BINHOST.
             lambda: os.unlink(binrepos_conf_file),
             lambda: os.rename(pkgdir, binhost_dir),
             ({"PORTAGE_BINHOST": binhost_uri},) + \
              emerge_cmd + ("-fe", "--getbinpkgonly", "dev-libs/A"),
             lambda: shutil.rmtree(pkgdir),
             lambda: os.rename(binhost_dir, pkgdir),
            )

        distdir = playground.distdir
        pkgdir = playground.pkgdir
        fake_bin = os.path.join(eprefix, "bin")
        portage_tmpdir = os.path.join(eprefix, "var", "tmp", "portage")
        profile_path = settings.profile_path
        user_config_dir = os.path.join(os.sep, eprefix, USER_CONFIG_PATH)

        path = os.environ.get("PATH")
        if path is not None and not path.strip():
            path = None
        if path is None:
            path = ""
        else:
            path = ":" + path
        path = fake_bin + path

        pythonpath = os.environ.get("PYTHONPATH")
        if pythonpath is not None and not pythonpath.strip():
            pythonpath = None
        if pythonpath is not None and \
         pythonpath.split(":")[0] == PORTAGE_PYM_PATH:
            pass
        else:
            if pythonpath is None:
                pythonpath = ""
            else:
                pythonpath = ":" + pythonpath
            pythonpath = PORTAGE_PYM_PATH + pythonpath

        env = {
            "PORTAGE_OVERRIDE_EPREFIX":
            eprefix,
            "CLEAN_DELAY":
            "0",
            "DISTDIR":
            distdir,
            "EMERGE_WARNING_DELAY":
            "0",
            "INFODIR":
            "",
            "INFOPATH":
            "",
            "PATH":
            path,
            "PKGDIR":
            pkgdir,
            "PORTAGE_INST_GID":
            str(portage.data.portage_gid),
            "PORTAGE_INST_UID":
            str(portage.data.portage_uid),
            "PORTAGE_PYTHON":
            portage_python,
            "PORTAGE_REPOSITORIES":
            settings.repositories.config_string(),
            "PORTAGE_TMPDIR":
            portage_tmpdir,
            "PORTAGE_LOGDIR":
            portage_tmpdir,
            "PYTHONDONTWRITEBYTECODE":
            os.environ.get("PYTHONDONTWRITEBYTECODE", ""),
            "PYTHONPATH":
            pythonpath,
            "__PORTAGE_TEST_PATH_OVERRIDE":
            fake_bin,
        }

        if "__PORTAGE_TEST_HARDLINK_LOCKS" in os.environ:
            env["__PORTAGE_TEST_HARDLINK_LOCKS"] = \
             os.environ["__PORTAGE_TEST_HARDLINK_LOCKS"]

        updates_dir = os.path.join(test_repo_location, "profiles", "updates")
        dirs = [
            cachedir, cachedir_pregen, cross_eroot, cross_prefix, distdir,
            fake_bin, portage_tmpdir, updates_dir, user_config_dir,
            var_cache_edb
        ]
        etc_symlinks = ("dispatch-conf.conf", "etc-update.conf")
        # Override things that may be unavailable, or may have portability
        # issues when running tests in exotic environments.
        #   prepstrip - bug #447810 (bash read builtin EINTR problem)
        true_symlinks = ["find", "prepstrip", "sed", "scanelf"]
        true_binary = find_binary("true")
        self.assertEqual(true_binary is None, False, "true command not found")
        try:
            for d in dirs:
                ensure_dirs(d)
            for x in true_symlinks:
                os.symlink(true_binary, os.path.join(fake_bin, x))
            for x in etc_symlinks:
                os.symlink(os.path.join(self.cnf_etc_path, x),
                           os.path.join(eprefix, "etc", x))
            with open(os.path.join(var_cache_edb, "counter"), 'wb') as f:
                f.write(b"100")
            # non-empty system set keeps --depclean quiet
            with open(os.path.join(profile_path, "packages"), 'w') as f:
                f.write("*dev-libs/token-system-pkg")
            for cp, xml_data in metadata_xml_files:
                with open(os.path.join(test_repo_location, cp, "metadata.xml"),
                          'w') as f:
                    f.write(playground.metadata_xml_template % xml_data)
            with open(os.path.join(updates_dir, "1Q-2010"), 'w') as f:
                f.write("""
slotmove =app-doc/pms-3 2 3
move dev-util/git dev-vcs/git
""")

            if debug:
                # The subprocess inherits both stdout and stderr, for
                # debugging purposes.
                stdout = None
            else:
                # The subprocess inherits stderr so that any warnings
                # triggered by python -Wd will be visible.
                stdout = subprocess.PIPE

            for args in test_commands:

                if hasattr(args, '__call__'):
                    args()
                    continue

                if isinstance(args[0], dict):
                    local_env = env.copy()
                    local_env.update(args[0])
                    args = args[1:]
                else:
                    local_env = env

                proc = yield asyncio.create_subprocess_exec(*args,
                                                            env=local_env,
                                                            stderr=None,
                                                            stdout=stdout,
                                                            loop=loop)

                if debug:
                    yield proc.wait()
                else:
                    output, _err = yield proc.communicate()
                    yield proc.wait()
                    if proc.returncode != os.EX_OK:
                        portage.writemsg(output)

                self.assertEqual(os.EX_OK, proc.returncode,
                                 "emerge failed with args %s" % (args, ))
        finally:
            binhost_server.__exit__(None, None, None)
            playground.cleanup()
Пример #43
0
def _env_update(makelinks, target_root, prev_mtimes, contents, env,
	writemsg_level):
	if writemsg_level is None:
		writemsg_level = portage.util.writemsg_level
	if target_root is None:
		target_root = portage.settings["ROOT"]
	if prev_mtimes is None:
		prev_mtimes = portage.mtimedb["ldpath"]
	if env is None:
		settings = portage.settings
	else:
		settings = env

	eprefix = settings.get("EPREFIX", "")
	eprefix_lstrip = eprefix.lstrip(os.sep)
	envd_dir = os.path.join(target_root, eprefix_lstrip, "etc", "env.d")
	ensure_dirs(envd_dir, mode=0o755)
	fns = listdir(envd_dir, EmptyOnError=1)
	fns.sort()
	templist = []
	for x in fns:
		if len(x) < 3:
			continue
		if not x[0].isdigit() or not x[1].isdigit():
			continue
		if x.startswith(".") or x.endswith("~") or x.endswith(".bak"):
			continue
		templist.append(x)
	fns = templist
	del templist

	space_separated = set(["CONFIG_PROTECT", "CONFIG_PROTECT_MASK"])
	colon_separated = set(["ADA_INCLUDE_PATH", "ADA_OBJECTS_PATH",
		"CLASSPATH", "INFODIR", "INFOPATH", "KDEDIRS", "LDPATH", "MANPATH",
		  "PATH", "PKG_CONFIG_PATH", "PRELINK_PATH", "PRELINK_PATH_MASK",
		  "PYTHONPATH", "ROOTPATH"])

	config_list = []

	for x in fns:
		file_path = os.path.join(envd_dir, x)
		try:
			myconfig = getconfig(file_path, expand=False)
		except ParseError as e:
			writemsg("!!! '%s'\n" % str(e), noiselevel=-1)
			del e
			continue
		if myconfig is None:
			# broken symlink or file removed by a concurrent process
			writemsg("!!! File Not Found: '%s'\n" % file_path, noiselevel=-1)
			continue

		config_list.append(myconfig)
		if "SPACE_SEPARATED" in myconfig:
			space_separated.update(myconfig["SPACE_SEPARATED"].split())
			del myconfig["SPACE_SEPARATED"]
		if "COLON_SEPARATED" in myconfig:
			colon_separated.update(myconfig["COLON_SEPARATED"].split())
			del myconfig["COLON_SEPARATED"]

	env = {}
	specials = {}
	for var in space_separated:
		mylist = []
		for myconfig in config_list:
			if var in myconfig:
				for item in myconfig[var].split():
					if item and not item in mylist:
						mylist.append(item)
				del myconfig[var] # prepare for env.update(myconfig)
		if mylist:
			env[var] = " ".join(mylist)
		specials[var] = mylist

	for var in colon_separated:
		mylist = []
		for myconfig in config_list:
			if var in myconfig:
				for item in myconfig[var].split(":"):
					if item and not item in mylist:
						mylist.append(item)
				del myconfig[var] # prepare for env.update(myconfig)
		if mylist:
			env[var] = ":".join(mylist)
		specials[var] = mylist

	for myconfig in config_list:
		"""Cumulative variables have already been deleted from myconfig so that
		they won't be overwritten by this dict.update call."""
		env.update(myconfig)

	ldsoconf_path = os.path.join(
		target_root, eprefix_lstrip, "etc", "ld.so.conf")
	try:
		myld = io.open(_unicode_encode(ldsoconf_path,
			encoding=_encodings['fs'], errors='strict'),
			mode='r', encoding=_encodings['content'], errors='replace')
		myldlines=myld.readlines()
		myld.close()
		oldld=[]
		for x in myldlines:
			#each line has at least one char (a newline)
			if x[:1] == "#":
				continue
			oldld.append(x[:-1])
	except (IOError, OSError) as e:
		if e.errno != errno.ENOENT:
			raise
		oldld = None

	newld = specials["LDPATH"]
	if (oldld != newld):
		#ld.so.conf needs updating and ldconfig needs to be run
		myfd = atomic_ofstream(ldsoconf_path)
		myfd.write("# ld.so.conf autogenerated by env-update; make all changes to\n")
		myfd.write("# contents of /etc/env.d directory\n")
		for x in specials["LDPATH"]:
			myfd.write(x + "\n")
		myfd.close()

	# Update prelink.conf if we are prelink-enabled
	if prelink_capable:
		newprelink = atomic_ofstream(os.path.join(
			target_root, eprefix_lstrip, "etc", "prelink.conf"))
		newprelink.write("# prelink.conf autogenerated by env-update; make all changes to\n")
		newprelink.write("# contents of /etc/env.d directory\n")

		for x in ["/bin","/sbin","/usr/bin","/usr/sbin","/lib","/usr/lib"]:
			newprelink.write("-l %s\n" % (x,));
		prelink_paths = []
		prelink_paths += specials.get("LDPATH", [])
		prelink_paths += specials.get("PATH", [])
		prelink_paths += specials.get("PRELINK_PATH", [])
		prelink_path_mask = specials.get("PRELINK_PATH_MASK", [])
		for x in prelink_paths:
			if not x:
				continue
			if x[-1:] != '/':
				x += "/"
			plmasked = 0
			for y in prelink_path_mask:
				if not y:
					continue
				if y[-1] != '/':
					y += "/"
				if y == x[0:len(y)]:
					plmasked = 1
					break
			if not plmasked:
				newprelink.write("-h %s\n" % (x,))
		for x in prelink_path_mask:
			newprelink.write("-b %s\n" % (x,))
		newprelink.close()

	current_time = long(time.time())
	mtime_changed = False
	lib_dirs = set()
	for lib_dir in set(specials["LDPATH"] + \
		['usr/lib','usr/lib64','usr/lib32','lib','lib64','lib32']):
		x = os.path.join(target_root, eprefix_lstrip, lib_dir.lstrip(os.sep))
		try:
			newldpathtime = os.stat(x)[stat.ST_MTIME]
			lib_dirs.add(normalize_path(x))
		except OSError as oe:
			if oe.errno == errno.ENOENT:
				try:
					del prev_mtimes[x]
				except KeyError:
					pass
				# ignore this path because it doesn't exist
				continue
			raise
		if newldpathtime == current_time:
			# Reset mtime to avoid the potential ambiguity of times that
			# differ by less than 1 second.
			newldpathtime -= 1
			os.utime(x, (newldpathtime, newldpathtime))
			prev_mtimes[x] = newldpathtime
			mtime_changed = True
		elif x in prev_mtimes:
			if prev_mtimes[x] == newldpathtime:
				pass
			else:
				prev_mtimes[x] = newldpathtime
				mtime_changed = True
		else:
			prev_mtimes[x] = newldpathtime
			mtime_changed = True

	if makelinks and \
		not mtime_changed and \
		contents is not None:
		libdir_contents_changed = False
		for mypath, mydata in contents.items():
			if mydata[0] not in ("obj", "sym"):
				continue
			head, tail = os.path.split(mypath)
			if head in lib_dirs:
				libdir_contents_changed = True
				break
		if not libdir_contents_changed:
			makelinks = False

	ldconfig = "/sbin/ldconfig"
	if "CHOST" in settings and "CBUILD" in settings and \
		settings["CHOST"] != settings["CBUILD"]:
		ldconfig = find_binary("%s-ldconfig" % settings["CHOST"])

	# Only run ldconfig as needed
	if makelinks and ldconfig and not eprefix:
		# ldconfig has very different behaviour between FreeBSD and Linux
		if ostype == "Linux" or ostype.lower().endswith("gnu"):
			# We can't update links if we haven't cleaned other versions first, as
			# an older package installed ON TOP of a newer version will cause ldconfig
			# to overwrite the symlinks we just made. -X means no links. After 'clean'
			# we can safely create links.
			writemsg_level(_(">>> Regenerating %setc/ld.so.cache...\n") % \
				(target_root,))
			os.system("cd / ; %s -X -r '%s'" % (ldconfig, target_root))
		elif ostype in ("FreeBSD","DragonFly"):
			writemsg_level(_(">>> Regenerating %svar/run/ld-elf.so.hints...\n") % \
				target_root)
			os.system(("cd / ; %s -elf -i " + \
				"-f '%svar/run/ld-elf.so.hints' '%setc/ld.so.conf'") % \
				(ldconfig, target_root, target_root))

	del specials["LDPATH"]

	penvnotice  = "# THIS FILE IS AUTOMATICALLY GENERATED BY env-update.\n"
	penvnotice += "# DO NOT EDIT THIS FILE. CHANGES TO STARTUP PROFILES\n"
	cenvnotice  = penvnotice[:]
	penvnotice += "# GO INTO /etc/profile NOT /etc/profile.env\n\n"
	cenvnotice += "# GO INTO /etc/csh.cshrc NOT /etc/csh.env\n\n"

	#create /etc/profile.env for bash support
	outfile = atomic_ofstream(os.path.join(
		target_root, eprefix_lstrip, "etc", "profile.env"))
	outfile.write(penvnotice)

	env_keys = [ x for x in env if x != "LDPATH" ]
	env_keys.sort()
	for k in env_keys:
		v = env[k]
		if v.startswith('$') and not v.startswith('${'):
			outfile.write("export %s=$'%s'\n" % (k, v[1:]))
		else:
			outfile.write("export %s='%s'\n" % (k, v))
	outfile.close()

	#create /etc/csh.env for (t)csh support
	outfile = atomic_ofstream(os.path.join(
		target_root, eprefix_lstrip, "etc", "csh.env"))
	outfile.write(cenvnotice)
	for x in env_keys:
		outfile.write("setenv %s '%s'\n" % (x, env[x]))
	outfile.close()
Пример #44
0
def _prepare_features_dirs(mysettings):

	# Use default ABI libdir in accordance with bug #355283.
	libdir = None
	default_abi = mysettings.get("DEFAULT_ABI")
	if default_abi:
		libdir = mysettings.get("LIBDIR_" + default_abi)
	if not libdir:
		libdir = "lib"

	features_dirs = {
		"ccache":{
			"basedir_var":"CCACHE_DIR",
			"default_dir":os.path.join(mysettings["PORTAGE_TMPDIR"], "ccache"),
			"always_recurse":False},
		"distcc":{
			"basedir_var":"DISTCC_DIR",
			"default_dir":os.path.join(mysettings["BUILD_PREFIX"], ".distcc"),
			"subdirs":("lock", "state"),
			"always_recurse":True}
	}
	dirmode  = 0o2070
	filemode =   0o60
	modemask =    0o2
	restrict = mysettings.get("PORTAGE_RESTRICT","").split()
	droppriv = secpass >= 2 and \
		"userpriv" in mysettings.features and \
		"userpriv" not in restrict
	for myfeature, kwargs in features_dirs.items():
		if myfeature in mysettings.features:
			failure = False
			basedir = mysettings.get(kwargs["basedir_var"])
			if basedir is None or not basedir.strip():
				basedir = kwargs["default_dir"]
				mysettings[kwargs["basedir_var"]] = basedir
			try:
				mydirs = [mysettings[kwargs["basedir_var"]]]
				if "subdirs" in kwargs:
					for subdir in kwargs["subdirs"]:
						mydirs.append(os.path.join(basedir, subdir))
				for mydir in mydirs:
					modified = ensure_dirs(mydir)
					# Generally, we only want to apply permissions for
					# initial creation.  Otherwise, we don't know exactly what
					# permissions the user wants, so should leave them as-is.
					droppriv_fix = False
					if droppriv:
						st = os.stat(mydir)
						if st.st_gid != portage_gid or \
							not dirmode == (stat.S_IMODE(st.st_mode) & dirmode):
							droppriv_fix = True
						if not droppriv_fix:
							# Check permissions of files in the directory.
							for filename in os.listdir(mydir):
								try:
									subdir_st = os.lstat(
										os.path.join(mydir, filename))
								except OSError:
									continue
								if subdir_st.st_gid != portage_gid or \
									((stat.S_ISDIR(subdir_st.st_mode) and \
									not dirmode == (stat.S_IMODE(subdir_st.st_mode) & dirmode))):
									droppriv_fix = True
									break

					if droppriv_fix:
						_adjust_perms_msg(mysettings,
							colorize("WARN", " * ") + \
							_("Adjusting permissions "
							"for FEATURES=userpriv: '%s'\n") % mydir)
					elif modified:
						_adjust_perms_msg(mysettings,
							colorize("WARN", " * ") + \
							_("Adjusting permissions "
							"for FEATURES=%s: '%s'\n") % (myfeature, mydir))

					if modified or kwargs["always_recurse"] or droppriv_fix:
						def onerror(e):
							raise	# The feature is disabled if a single error
									# occurs during permissions adjustment.
						if not apply_recursive_permissions(mydir,
						gid=portage_gid, dirmode=dirmode, dirmask=modemask,
						filemode=filemode, filemask=modemask, onerror=onerror):
							raise OperationNotPermitted(
								_("Failed to apply recursive permissions for the portage group."))

			except DirectoryNotFound as e:
				failure = True
				writemsg(_("\n!!! Directory does not exist: '%s'\n") % \
					(e,), noiselevel=-1)
				writemsg(_("!!! Disabled FEATURES='%s'\n") % myfeature,
					noiselevel=-1)

			except PortageException as e:
				failure = True
				writemsg("\n!!! %s\n" % str(e), noiselevel=-1)
				writemsg(_("!!! Failed resetting perms on %s='%s'\n") % \
					(kwargs["basedir_var"], basedir), noiselevel=-1)
				writemsg(_("!!! Disabled FEATURES='%s'\n") % myfeature,
					noiselevel=-1)

			if failure:
				mysettings.features.remove(myfeature)
				time.sleep(5)
Пример #45
0
    def testIpcDaemon(self):
        tmpdir = tempfile.mkdtemp()
        build_dir = None
        try:
            env = {}

            # Pass along PORTAGE_USERNAME and PORTAGE_GRPNAME since they
            # need to be inherited by ebuild subprocesses.
            if 'PORTAGE_USERNAME' in os.environ:
                env['PORTAGE_USERNAME'] = os.environ['PORTAGE_USERNAME']
            if 'PORTAGE_GRPNAME' in os.environ:
                env['PORTAGE_GRPNAME'] = os.environ['PORTAGE_GRPNAME']

            env['PORTAGE_PYTHON'] = _python_interpreter
            env['PORTAGE_BIN_PATH'] = PORTAGE_BIN_PATH
            env['PORTAGE_PYM_PATH'] = PORTAGE_PYM_PATH
            env['PORTAGE_BUILDDIR'] = os.path.join(tmpdir, 'cat', 'pkg-1')

            if "__PORTAGE_TEST_HARDLINK_LOCKS" in os.environ:
                env["__PORTAGE_TEST_HARDLINK_LOCKS"] = \
                 os.environ["__PORTAGE_TEST_HARDLINK_LOCKS"]

            task_scheduler = TaskScheduler(max_jobs=2)
            build_dir = EbuildBuildDir(scheduler=task_scheduler.sched_iface,
                                       settings=env)
            build_dir.lock()
            ensure_dirs(env['PORTAGE_BUILDDIR'])

            input_fifo = os.path.join(env['PORTAGE_BUILDDIR'], '.ipc_in')
            output_fifo = os.path.join(env['PORTAGE_BUILDDIR'], '.ipc_out')
            os.mkfifo(input_fifo)
            os.mkfifo(output_fifo)

            for exitcode in (0, 1, 2):
                exit_command = ExitCommand()
                commands = {'exit': exit_command}
                daemon = EbuildIpcDaemon(commands=commands,
                                         input_fifo=input_fifo,
                                         output_fifo=output_fifo,
                                         scheduler=task_scheduler.sched_iface)
                proc = SpawnProcess(args=[
                    BASH_BINARY, "-c",
                    '"$PORTAGE_BIN_PATH"/ebuild-ipc exit %d' % exitcode
                ],
                                    env=env,
                                    scheduler=task_scheduler.sched_iface)

                self.received_command = False

                def exit_command_callback():
                    self.received_command = True
                    task_scheduler.clear()
                    task_scheduler.wait()

                exit_command.reply_hook = exit_command_callback
                start_time = time.time()
                task_scheduler.add(daemon)
                task_scheduler.add(proc)
                task_scheduler.run(timeout=self._SCHEDULE_TIMEOUT)
                task_scheduler.clear()
                task_scheduler.wait()
                hardlock_cleanup(env['PORTAGE_BUILDDIR'],
                                 remove_all_locks=True)

                self.assertEqual(self.received_command, True,
                 "command not received after %d seconds" % \
                 (time.time() - start_time,))
                self.assertEqual(proc.isAlive(), False)
                self.assertEqual(daemon.isAlive(), False)
                self.assertEqual(exit_command.exitcode, exitcode)

            # Intentionally short timeout test for QueueScheduler.run()
            sleep_time_s = 10  # 10.000 seconds
            short_timeout_ms = 10  #  0.010 seconds

            for i in range(3):
                exit_command = ExitCommand()
                commands = {'exit': exit_command}
                daemon = EbuildIpcDaemon(commands=commands,
                                         input_fifo=input_fifo,
                                         output_fifo=output_fifo,
                                         scheduler=task_scheduler.sched_iface)
                proc = SpawnProcess(
                    args=[BASH_BINARY, "-c",
                          'exec sleep %d' % sleep_time_s],
                    env=env,
                    scheduler=task_scheduler.sched_iface)

                self.received_command = False

                def exit_command_callback():
                    self.received_command = True
                    task_scheduler.clear()
                    task_scheduler.wait()

                exit_command.reply_hook = exit_command_callback
                start_time = time.time()
                task_scheduler.add(daemon)
                task_scheduler.add(proc)
                task_scheduler.run(timeout=short_timeout_ms)
                task_scheduler.clear()
                task_scheduler.wait()
                hardlock_cleanup(env['PORTAGE_BUILDDIR'],
                                 remove_all_locks=True)

                self.assertEqual(self.received_command, False,
                 "command received after %d seconds" % \
                 (time.time() - start_time,))
                self.assertEqual(proc.isAlive(), False)
                self.assertEqual(daemon.isAlive(), False)
                self.assertEqual(proc.returncode == os.EX_OK, False)

        finally:
            if build_dir is not None:
                build_dir.unlock()
            shutil.rmtree(tmpdir)
Пример #46
0
def _prepare_workdir(mysettings):
	workdir_mode = 0o700
	try:
		mode = mysettings["PORTAGE_WORKDIR_MODE"]
		if mode.isdigit():
			parsed_mode = int(mode, 8)
		elif mode == "":
			raise KeyError()
		else:
			raise ValueError()
		if parsed_mode & 0o7777 != parsed_mode:
			raise ValueError("Invalid file mode: %s" % mode)
		else:
			workdir_mode = parsed_mode
	except KeyError as e:
		writemsg(_("!!! PORTAGE_WORKDIR_MODE is unset, using %s.\n") % oct(workdir_mode))
	except ValueError as e:
		if len(str(e)) > 0:
			writemsg("%s\n" % e)
		writemsg(_("!!! Unable to parse PORTAGE_WORKDIR_MODE='%s', using %s.\n") % \
		(mysettings["PORTAGE_WORKDIR_MODE"], oct(workdir_mode)))
	mysettings["PORTAGE_WORKDIR_MODE"] = oct(workdir_mode).replace('o', '')
	try:
		apply_secpass_permissions(mysettings["WORKDIR"],
		uid=portage_uid, gid=portage_gid, mode=workdir_mode)
	except FileNotFound:
		pass # ebuild.sh will create it

	if mysettings.get("PORTAGE_LOGDIR", "") == "":
		while "PORTAGE_LOGDIR" in mysettings:
			del mysettings["PORTAGE_LOGDIR"]
	if "PORTAGE_LOGDIR" in mysettings:
		try:
			modified = ensure_dirs(mysettings["PORTAGE_LOGDIR"])
			if modified:
				# Only initialize group/mode if the directory doesn't
				# exist, so that we don't override permissions if they
				# were previously set by the administrator.
				# NOTE: These permissions should be compatible with our
				# default logrotate config as discussed in bug 374287.
				apply_secpass_permissions(mysettings["PORTAGE_LOGDIR"],
					uid=portage_uid, gid=portage_gid, mode=0o2770)
		except PortageException as e:
			writemsg("!!! %s\n" % str(e), noiselevel=-1)
			writemsg(_("!!! Permission issues with PORTAGE_LOGDIR='%s'\n") % \
				mysettings["PORTAGE_LOGDIR"], noiselevel=-1)
			writemsg(_("!!! Disabling logging.\n"), noiselevel=-1)
			while "PORTAGE_LOGDIR" in mysettings:
				del mysettings["PORTAGE_LOGDIR"]

	compress_log_ext = ''
	if 'compress-build-logs' in mysettings.features:
		compress_log_ext = '.gz'

	logdir_subdir_ok = False
	if "PORTAGE_LOGDIR" in mysettings and \
		os.access(mysettings["PORTAGE_LOGDIR"], os.W_OK):
		logdir = normalize_path(mysettings["PORTAGE_LOGDIR"])
		logid_path = os.path.join(mysettings["PORTAGE_BUILDDIR"], ".logid")
		if not os.path.exists(logid_path):
			open(_unicode_encode(logid_path), 'w').close()
		logid_time = _unicode_decode(time.strftime("%Y%m%d-%H%M%S",
			time.gmtime(os.stat(logid_path).st_mtime)),
			encoding=_encodings['content'], errors='replace')

		if "split-log" in mysettings.features:
			log_subdir = os.path.join(logdir, "build", mysettings["CATEGORY"])
			mysettings["PORTAGE_LOG_FILE"] = os.path.join(
				log_subdir, "%s:%s.log%s" %
				(mysettings["PF"], logid_time, compress_log_ext))
		else:
			log_subdir = logdir
			mysettings["PORTAGE_LOG_FILE"] = os.path.join(
				logdir, "%s:%s:%s.log%s" % \
				(mysettings["CATEGORY"], mysettings["PF"], logid_time,
				compress_log_ext))

		if log_subdir is logdir:
			logdir_subdir_ok = True
		else:
			try:
				_ensure_log_subdirs(logdir, log_subdir)
			except PortageException as e:
				writemsg("!!! %s\n" % (e,), noiselevel=-1)

			if os.access(log_subdir, os.W_OK):
				logdir_subdir_ok = True
			else:
				writemsg("!!! %s: %s\n" %
					(_("Permission Denied"), log_subdir), noiselevel=-1)

	tmpdir_log_path = os.path.join(
		mysettings["T"], "build.log%s" % compress_log_ext)
	if not logdir_subdir_ok:
		# NOTE: When sesandbox is enabled, the local SELinux security policies
		# may not allow output to be piped out of the sesandbox domain. The
		# current policy will allow it to work when a pty is available, but
		# not through a normal pipe. See bug #162404.
		mysettings["PORTAGE_LOG_FILE"] = tmpdir_log_path
	else:
		# Create a symlink from tmpdir_log_path to PORTAGE_LOG_FILE, as
		# requested in bug #412865.
		make_new_symlink = False
		try:
			target = os.readlink(tmpdir_log_path)
		except OSError:
			make_new_symlink = True
		else:
			if target != mysettings["PORTAGE_LOG_FILE"]:
				make_new_symlink = True
		if make_new_symlink:
			try:
				os.unlink(tmpdir_log_path)
			except OSError:
				pass
			os.symlink(mysettings["PORTAGE_LOG_FILE"], tmpdir_log_path)
Пример #47
0
	def testConfigProtect(self):
		"""
		Demonstrates many different scenarios. For example:

		 * regular file replaces regular file
		 * regular file replaces symlink
		 * regular file replaces directory
		 * symlink replaces symlink
		 * symlink replaces regular file
		 * symlink replaces directory
		 * directory replaces regular file
		 * directory replaces symlink
		"""

		debug = False

		content_A_1 = """
S="${WORKDIR}"

src_install() {
	insinto /etc/A
	keepdir /etc/A/dir_a
	keepdir /etc/A/symlink_replaces_dir
	keepdir /etc/A/regular_replaces_dir
	echo regular_a_1 > "${T}"/regular_a
	doins "${T}"/regular_a
	echo regular_b_1 > "${T}"/regular_b
	doins "${T}"/regular_b
	dosym regular_a /etc/A/regular_replaces_symlink
	dosym regular_b /etc/A/symlink_replaces_symlink
	echo regular_replaces_regular_1 > \
		"${T}"/regular_replaces_regular
	doins "${T}"/regular_replaces_regular
	echo symlink_replaces_regular > \
		"${T}"/symlink_replaces_regular
	doins "${T}"/symlink_replaces_regular
}

"""

		content_A_2 = """
S="${WORKDIR}"

src_install() {
	insinto /etc/A
	keepdir /etc/A/dir_a
	dosym dir_a /etc/A/symlink_replaces_dir
	echo regular_replaces_dir > "${T}"/regular_replaces_dir
	doins "${T}"/regular_replaces_dir
	echo regular_a_2 > "${T}"/regular_a
	doins "${T}"/regular_a
	echo regular_b_2 > "${T}"/regular_b
	doins "${T}"/regular_b
	echo regular_replaces_symlink > \
		"${T}"/regular_replaces_symlink
	doins "${T}"/regular_replaces_symlink
	dosym regular_b /etc/A/symlink_replaces_symlink
	echo regular_replaces_regular_2 > \
		"${T}"/regular_replaces_regular
	doins "${T}"/regular_replaces_regular
	dosym regular_a /etc/A/symlink_replaces_regular
}

"""

		ebuilds = {
			"dev-libs/A-1": {
				"EAPI" : "5",
				"IUSE" : "+flag",
				"KEYWORDS": "x86",
				"LICENSE": "GPL-2",
				"MISC_CONTENT": content_A_1,
			},
			"dev-libs/A-2": {
				"EAPI" : "5",
				"IUSE" : "+flag",
				"KEYWORDS": "x86",
				"LICENSE": "GPL-2",
				"MISC_CONTENT": content_A_2,
			},
		}

		playground = ResolverPlayground(
			ebuilds=ebuilds, debug=debug)
		settings = playground.settings
		eprefix = settings["EPREFIX"]
		eroot = settings["EROOT"]
		var_cache_edb = os.path.join(eprefix, "var", "cache", "edb")

		portage_python = portage._python_interpreter
		dispatch_conf_cmd = (portage_python, "-b", "-Wd",
			os.path.join(self.sbindir, "dispatch-conf"))
		emerge_cmd = (portage_python, "-b", "-Wd",
			os.path.join(self.bindir, "emerge"))
		etc_update_cmd = (BASH_BINARY,
			os.path.join(self.sbindir, "etc-update"))
		etc_update_auto = etc_update_cmd + ("--automode", "-5",)

		config_protect = "/etc"

		def modify_files(dir_path):
			for name in os.listdir(dir_path):
				path = os.path.join(dir_path, name)
				st = os.lstat(path)
				if stat.S_ISREG(st.st_mode):
					with io.open(path, mode='a',
						encoding=_encodings["stdio"]) as f:
						f.write("modified at %d\n" % time.time())
				elif stat.S_ISLNK(st.st_mode):
					old_dest = os.readlink(path)
					os.unlink(path)
					os.symlink(old_dest +
						" modified at %d" % time.time(), path)

		def updated_config_files(count):
			self.assertEqual(count,
				sum(len(x[1]) for x in find_updated_config_files(eroot,
				shlex_split(config_protect))))

		test_commands = (
			etc_update_cmd,
			dispatch_conf_cmd,
			emerge_cmd + ("-1", "=dev-libs/A-1"),
			partial(updated_config_files, 0),
			emerge_cmd + ("-1", "=dev-libs/A-2"),
			partial(updated_config_files, 2),
			etc_update_auto,
			partial(updated_config_files, 0),
			emerge_cmd + ("-1", "=dev-libs/A-2"),
			partial(updated_config_files, 0),
			# Test bug #523684, where a file renamed or removed by the
			# admin forces replacement files to be merged with config
			# protection.
			partial(shutil.rmtree,
				os.path.join(eprefix, "etc", "A")),
			emerge_cmd + ("-1", "=dev-libs/A-2"),
			partial(updated_config_files, 8),
			etc_update_auto,
			partial(updated_config_files, 0),
			# Modify some config files, and verify that it triggers
			# config protection.
			partial(modify_files,
				os.path.join(eroot, "etc", "A")),
			emerge_cmd + ("-1", "=dev-libs/A-2"),
			partial(updated_config_files, 6),
			etc_update_auto,
			partial(updated_config_files, 0),
			# Modify some config files, downgrade to A-1, and verify
			# that config protection works properly when the file
			# types are changing.
			partial(modify_files,
				os.path.join(eroot, "etc", "A")),
			emerge_cmd + ("-1", "--noconfmem", "=dev-libs/A-1"),
			partial(updated_config_files, 6),
			etc_update_auto,
			partial(updated_config_files, 0),
		)

		distdir = playground.distdir
		fake_bin = os.path.join(eprefix, "bin")
		portage_tmpdir = os.path.join(eprefix, "var", "tmp", "portage")

		path =  os.environ.get("PATH")
		if path is not None and not path.strip():
			path = None
		if path is None:
			path = ""
		else:
			path = ":" + path
		path = fake_bin + path

		pythonpath =  os.environ.get("PYTHONPATH")
		if pythonpath is not None and not pythonpath.strip():
			pythonpath = None
		if pythonpath is not None and \
			pythonpath.split(":")[0] == PORTAGE_PYM_PATH:
			pass
		else:
			if pythonpath is None:
				pythonpath = ""
			else:
				pythonpath = ":" + pythonpath
			pythonpath = PORTAGE_PYM_PATH + pythonpath

		env = {
			"PORTAGE_OVERRIDE_EPREFIX" : eprefix,
			"CLEAN_DELAY" : "0",
			"CONFIG_PROTECT": config_protect,
			"DISTDIR" : distdir,
			"EMERGE_DEFAULT_OPTS": "-v",
			"EMERGE_WARNING_DELAY" : "0",
			"INFODIR" : "",
			"INFOPATH" : "",
			"PATH" : path,
			"PORTAGE_INST_GID" : str(portage.data.portage_gid),
			"PORTAGE_INST_UID" : str(portage.data.portage_uid),
			"PORTAGE_PYTHON" : portage_python,
			"PORTAGE_REPOSITORIES" : settings.repositories.config_string(),
			"PORTAGE_TMPDIR" : portage_tmpdir,
			"PYTHONDONTWRITEBYTECODE" : os.environ.get("PYTHONDONTWRITEBYTECODE", ""),
			"PYTHONPATH" : pythonpath,
			"__PORTAGE_TEST_PATH_OVERRIDE" : fake_bin,
		}

		if "__PORTAGE_TEST_HARDLINK_LOCKS" in os.environ:
			env["__PORTAGE_TEST_HARDLINK_LOCKS"] = \
				os.environ["__PORTAGE_TEST_HARDLINK_LOCKS"]

		dirs = [distdir, fake_bin, portage_tmpdir,
			var_cache_edb]
		etc_symlinks = ("dispatch-conf.conf", "etc-update.conf")
		# Override things that may be unavailable, or may have portability
		# issues when running tests in exotic environments.
		#   prepstrip - bug #447810 (bash read builtin EINTR problem)
		true_symlinks = ["prepstrip", "scanelf"]
		true_binary = find_binary("true")
		self.assertEqual(true_binary is None, False,
			"true command not found")
		try:
			for d in dirs:
				ensure_dirs(d)
			for x in true_symlinks:
				os.symlink(true_binary, os.path.join(fake_bin, x))
			for x in etc_symlinks:
				os.symlink(os.path.join(self.cnf_etc_path, x),
					os.path.join(eprefix, "etc", x))
			with open(os.path.join(var_cache_edb, "counter"), 'wb') as f:
				f.write(b"100")

			if debug:
				# The subprocess inherits both stdout and stderr, for
				# debugging purposes.
				stdout = None
			else:
				# The subprocess inherits stderr so that any warnings
				# triggered by python -Wd will be visible.
				stdout = subprocess.PIPE

			for args in test_commands:

				if hasattr(args, '__call__'):
					args()
					continue

				if isinstance(args[0], dict):
					local_env = env.copy()
					local_env.update(args[0])
					args = args[1:]
				else:
					local_env = env

				proc = subprocess.Popen(args,
					env=local_env, stdout=stdout)

				if debug:
					proc.wait()
				else:
					output = proc.stdout.readlines()
					proc.wait()
					proc.stdout.close()
					if proc.returncode != os.EX_OK:
						for line in output:
							sys.stderr.write(_unicode_decode(line))

				self.assertEqual(os.EX_OK, proc.returncode,
					"emerge failed with args %s" % (args,))
		finally:
			playground.cleanup()
Пример #48
0
	def updateItems(self, repoid):
		"""
		Figure out which news items from NEWS_PATH are both unread and relevant to
		the user (according to the GLEP 42 standards of relevancy).  Then add these
		items into the news.repoid.unread file.
		"""

		# Ensure that the unread path exists and is writable.

		try:
			ensure_dirs(self.unread_path, uid=self._uid, gid=self._gid,
				mode=self._dir_mode, mask=self._mode_mask)
		except (OperationNotPermitted, PermissionDenied):
			return

		if not os.access(self.unread_path, os.W_OK):
			return

		news_dir = self._news_dir(repoid)
		try:
			news = _os.listdir(_unicode_encode(news_dir,
				encoding=_encodings['fs'], errors='strict'))
		except OSError:
			return

		skip_filename = self._skip_filename(repoid)
		unread_filename = self._unread_filename(repoid)
		unread_lock = lockfile(unread_filename, wantnewlockfile=1)
		try:
			try:
				unread = set(grabfile(unread_filename))
				unread_orig = unread.copy()
				skip = set(grabfile(skip_filename))
				skip_orig = skip.copy()
			except PermissionDenied:
				return

			for itemid in news:
				try:
					itemid = _unicode_decode(itemid,
						encoding=_encodings['fs'], errors='strict')
				except UnicodeDecodeError:
					itemid = _unicode_decode(itemid,
						encoding=_encodings['fs'], errors='replace')
					writemsg_level(
						_("!!! Invalid encoding in news item name: '%s'\n") % \
						itemid, level=logging.ERROR, noiselevel=-1)
					continue

				if itemid in skip:
					continue
				filename = os.path.join(news_dir, itemid,
					itemid + "." + self.language_id + ".txt")
				if not os.path.isfile(filename):
					continue
				item = NewsItem(filename, itemid)
				if not item.isValid():
					continue
				if item.isRelevant(profile=self._profile_path,
					config=self.config, vardb=self.vdb):
					unread.add(item.name)
					skip.add(item.name)

			if unread != unread_orig:
				write_atomic(unread_filename,
					"".join("%s\n" % x for x in sorted(unread)))
				apply_secpass_permissions(unread_filename,
					uid=self._uid, gid=self._gid,
					mode=self._file_mode, mask=self._mode_mask)

			if skip != skip_orig:
				write_atomic(skip_filename,
					"".join("%s\n" % x for x in sorted(skip)))
				apply_secpass_permissions(skip_filename,
					uid=self._uid, gid=self._gid,
					mode=self._file_mode, mask=self._mode_mask)

		finally:
			unlockfile(unread_lock)
Пример #49
0
	def testSyncLocal(self):
		debug = False

		skip_reason = self._must_skip()
		if skip_reason:
			self.portage_skip = skip_reason
			self.assertFalse(True, skip_reason)
			return

		repos_conf = textwrap.dedent("""
			[DEFAULT]
			%(default_keys)s
			[test_repo]
			location = %(EPREFIX)s/var/repositories/test_repo
			sync-type = %(sync-type)s
			sync-uri = file://%(EPREFIX)s/var/repositories/test_repo_sync
			auto-sync = %(auto-sync)s
			%(repo_extra_keys)s
		""")

		profile = {
			"eapi": ("5",),
			"package.use.stable.mask": ("dev-libs/A flag",)
		}

		ebuilds = {
			"dev-libs/A-0": {}
		}

		user_config = {
			'make.conf': ('FEATURES="metadata-transfer"',)
		}

		playground = ResolverPlayground(ebuilds=ebuilds,
			profile=profile, user_config=user_config, debug=debug)
		settings = playground.settings
		eprefix = settings["EPREFIX"]
		eroot = settings["EROOT"]
		homedir = os.path.join(eroot, "home")
		distdir = os.path.join(eprefix, "distdir")
		repo = settings.repositories["test_repo"]
		metadata_dir = os.path.join(repo.location, "metadata")

		cmds = {}
		for cmd in ("emerge", "emaint"):
			path = os.path.join(self.bindir, cmd)
			assert os.path.exists(path)
			cmds[cmd] =  (portage._python_interpreter,
				"-b", "-Wd", path)

		git_binary = find_binary("git")
		git_cmd = (git_binary,)

		committer_name = "Gentoo Dev"
		committer_email = "*****@*****.**"

		def repos_set_conf(sync_type, dflt_keys=None, xtra_keys=None,
			auto_sync="yes"):
			env["PORTAGE_REPOSITORIES"] = repos_conf % {\
				"EPREFIX": eprefix, "sync-type": sync_type,
				"auto-sync": auto_sync,
				"default_keys": "" if dflt_keys is None else dflt_keys,
				"repo_extra_keys": "" if xtra_keys is None else xtra_keys}

		def alter_ebuild():
			with open(os.path.join(repo.location + "_sync",
				"dev-libs", "A", "A-0.ebuild"), "a") as f:
				f.write("\n")
			os.unlink(os.path.join(metadata_dir, 'timestamp.chk'))

		sync_cmds = (
			(homedir, cmds["emerge"] + ("--sync",)),
			(homedir, lambda: self.assertTrue(os.path.exists(
				os.path.join(repo.location, "dev-libs", "A")
				), "dev-libs/A expected, but missing")),
			(homedir, cmds["emaint"] + ("sync", "-A")),
		)

		sync_cmds_auto_sync = (
			(homedir, lambda: repos_set_conf("rsync", auto_sync="no")),
			(homedir, cmds["emerge"] + ("--sync",)),
			(homedir, lambda: self.assertFalse(os.path.exists(
				os.path.join(repo.location, "dev-libs", "A")
				), "dev-libs/A found, expected missing")),
			(homedir, lambda: repos_set_conf("rsync", auto_sync="yes")),
		)

		rename_repo = (
			(homedir, lambda: os.rename(repo.location,
				repo.location + "_sync")),
		)

		rsync_opts_repos = (
			(homedir, alter_ebuild),
			(homedir, lambda: repos_set_conf("rsync", None,
				"sync-rsync-extra-opts = --backup --backup-dir=%s" %
				_shell_quote(repo.location + "_back"))),
			(homedir, cmds['emerge'] + ("--sync",)),
			(homedir, lambda: self.assertTrue(os.path.exists(
				repo.location + "_back"))),
			(homedir, lambda: shutil.rmtree(repo.location + "_back")),
			(homedir, lambda: repos_set_conf("rsync")),
		)

		rsync_opts_repos_default = (
			(homedir, alter_ebuild),
			(homedir, lambda: repos_set_conf("rsync",
					"sync-rsync-extra-opts = --backup --backup-dir=%s" %
					_shell_quote(repo.location+"_back"))),
			(homedir, cmds['emerge'] + ("--sync",)),
			(homedir, lambda: self.assertTrue(os.path.exists(repo.location + "_back"))),
			(homedir, lambda: shutil.rmtree(repo.location + "_back")),
			(homedir, lambda: repos_set_conf("rsync")),
		)

		rsync_opts_repos_default_ovr = (
			(homedir, alter_ebuild),
			(homedir, lambda: repos_set_conf("rsync",
				"sync-rsync-extra-opts = --backup --backup-dir=%s" %
				_shell_quote(repo.location + "_back_nowhere"),
				"sync-rsync-extra-opts = --backup --backup-dir=%s" %
				_shell_quote(repo.location + "_back"))),
			(homedir, cmds['emerge'] + ("--sync",)),
			(homedir, lambda: self.assertTrue(os.path.exists(repo.location + "_back"))),
			(homedir, lambda: shutil.rmtree(repo.location + "_back")),
			(homedir, lambda: repos_set_conf("rsync")),
		)

		rsync_opts_repos_default_cancel = (
			(homedir, alter_ebuild),
			(homedir, lambda: repos_set_conf("rsync",
				"sync-rsync-extra-opts = --backup --backup-dir=%s" %
				_shell_quote(repo.location + "_back_nowhere"),
				"sync-rsync-extra-opts = ")),
			(homedir, cmds['emerge'] + ("--sync",)),
			(homedir, lambda: self.assertFalse(os.path.exists(repo.location + "_back"))),
			(homedir, lambda: repos_set_conf("rsync")),
		)

		delete_sync_repo = (
			(homedir, lambda: shutil.rmtree(
				repo.location + "_sync")),
		)

		git_repo_create = (
			(repo.location, git_cmd +
				("config", "--global", "user.name", committer_name,)),
			(repo.location, git_cmd +
				("config", "--global", "user.email", committer_email,)),
			(repo.location, git_cmd + ("init-db",)),
			(repo.location, git_cmd + ("add", ".")),
			(repo.location, git_cmd +
				("commit", "-a", "-m", "add whole repo")),
		)

		sync_type_git = (
			(homedir, lambda: repos_set_conf("git")),
		)

		pythonpath =  os.environ.get("PYTHONPATH")
		if pythonpath is not None and not pythonpath.strip():
			pythonpath = None
		if pythonpath is not None and \
			pythonpath.split(":")[0] == PORTAGE_PYM_PATH:
			pass
		else:
			if pythonpath is None:
				pythonpath = ""
			else:
				pythonpath = ":" + pythonpath
			pythonpath = PORTAGE_PYM_PATH + pythonpath

		env = {
			"PORTAGE_OVERRIDE_EPREFIX" : eprefix,
			"DISTDIR" : distdir,
			"GENTOO_COMMITTER_NAME" : committer_name,
			"GENTOO_COMMITTER_EMAIL" : committer_email,
			"HOME" : homedir,
			"PATH" : os.environ["PATH"],
			"PORTAGE_GRPNAME" : os.environ["PORTAGE_GRPNAME"],
			"PORTAGE_USERNAME" : os.environ["PORTAGE_USERNAME"],
			"PYTHONDONTWRITEBYTECODE" : os.environ.get("PYTHONDONTWRITEBYTECODE", ""),
			"PYTHONPATH" : pythonpath,
		}
		repos_set_conf("rsync")

		if os.environ.get("SANDBOX_ON") == "1":
			# avoid problems from nested sandbox instances
			env["FEATURES"] = "-sandbox -usersandbox"

		dirs = [homedir, metadata_dir]
		try:
			for d in dirs:
				ensure_dirs(d)

			timestamp_path = os.path.join(metadata_dir, 'timestamp.chk')
			with open(timestamp_path, 'w') as f:
				f.write(time.strftime('%s\n' % TIMESTAMP_FORMAT, time.gmtime()))

			if debug:
				# The subprocess inherits both stdout and stderr, for
				# debugging purposes.
				stdout = None
			else:
				# The subprocess inherits stderr so that any warnings
				# triggered by python -Wd will be visible.
				stdout = subprocess.PIPE

			for cwd, cmd in rename_repo + sync_cmds_auto_sync + sync_cmds + \
				rsync_opts_repos + rsync_opts_repos_default + \
				rsync_opts_repos_default_ovr + rsync_opts_repos_default_cancel + \
				delete_sync_repo + git_repo_create + sync_type_git + \
				rename_repo + sync_cmds:

				if hasattr(cmd, '__call__'):
					cmd()
					continue

				abs_cwd = os.path.join(repo.location, cwd)
				proc = subprocess.Popen(cmd,
					cwd=abs_cwd, env=env, stdout=stdout)

				if debug:
					proc.wait()
				else:
					output = proc.stdout.readlines()
					proc.wait()
					proc.stdout.close()
					if proc.returncode != os.EX_OK:
						for line in output:
							sys.stderr.write(_unicode_decode(line))

				self.assertEqual(os.EX_OK, proc.returncode,
					"%s failed in %s" % (cmd, cwd,))


		finally:
			playground.cleanup()
Пример #50
0
	def testSimple(self):

		debug = False

		install_something = """
S="${WORKDIR}"

pkg_pretend() {
	einfo "called pkg_pretend for $CATEGORY/$PF"
}

src_install() {
	einfo "installing something..."
	insinto /usr/lib/${P}
	echo "blah blah blah" > "${T}"/regular-file
	doins "${T}"/regular-file
	dosym regular-file /usr/lib/${P}/symlink || die

	# Test CONFIG_PROTECT
	insinto /etc
	newins "${T}"/regular-file ${PN}-${SLOT%/*}

	# Test code for bug #381629, using a copyright symbol encoded with latin-1.
	# We use $(printf "\\xa9") rather than $'\\xa9', since printf apparently
	# works in any case, while $'\\xa9' transforms to \\xef\\xbf\\xbd under
	# some conditions. TODO: Find out why it transforms to \\xef\\xbf\\xbd when
	# running tests for Python 3.2 (even though it's bash that is ultimately
	# responsible for performing the transformation).
	local latin_1_dir=/usr/lib/${P}/latin-1-$(printf "\\xa9")-directory
	insinto "${latin_1_dir}"
	echo "blah blah blah" > "${T}"/latin-1-$(printf "\\xa9")-regular-file || die
	doins "${T}"/latin-1-$(printf "\\xa9")-regular-file
	dosym latin-1-$(printf "\\xa9")-regular-file ${latin_1_dir}/latin-1-$(printf "\\xa9")-symlink || die
}

pkg_config() {
	einfo "called pkg_config for $CATEGORY/$PF"
}

pkg_info() {
	einfo "called pkg_info for $CATEGORY/$PF"
}

pkg_preinst() {
	einfo "called pkg_preinst for $CATEGORY/$PF"

	# Test that has_version and best_version work correctly with
	# prefix (involves internal ROOT -> EROOT calculation in order
	# to support ROOT override via the environment with EAPIs 3
	# and later which support prefix).
	if has_version $CATEGORY/$PN:$SLOT ; then
		einfo "has_version detects an installed instance of $CATEGORY/$PN:$SLOT"
		einfo "best_version reports that the installed instance is $(best_version $CATEGORY/$PN:$SLOT)"
	else
		einfo "has_version does not detect an installed instance of $CATEGORY/$PN:$SLOT"
	fi
	if [[ ${EPREFIX} != ${PORTAGE_OVERRIDE_EPREFIX} ]] ; then
		if has_version --host-root $CATEGORY/$PN:$SLOT ; then
			einfo "has_version --host-root detects an installed instance of $CATEGORY/$PN:$SLOT"
			einfo "best_version --host-root reports that the installed instance is $(best_version $CATEGORY/$PN:$SLOT)"
		else
			einfo "has_version --host-root does not detect an installed instance of $CATEGORY/$PN:$SLOT"
		fi
	fi
}

"""

		ebuilds = {
			"dev-libs/A-1": {
				"EAPI" : "5",
				"IUSE" : "+flag",
				"KEYWORDS": "x86",
				"LICENSE": "GPL-2",
				"MISC_CONTENT": install_something,
				"RDEPEND": "flag? ( dev-libs/B[flag] )",
			},
			"dev-libs/B-1": {
				"EAPI" : "5",
				"IUSE" : "+flag",
				"KEYWORDS": "x86",
				"LICENSE": "GPL-2",
				"MISC_CONTENT": install_something,
			},
			"dev-libs/C-1": {
				"EAPI" : "6",
				"KEYWORDS": "~x86",
				"RDEPEND": "dev-libs/D[flag]",
			},
			"dev-libs/D-1": {
				"EAPI" : "6",
				"KEYWORDS": "~x86",
				"IUSE" : "flag",
			},
			"virtual/foo-0": {
				"EAPI" : "5",
				"KEYWORDS": "x86",
				"LICENSE": "GPL-2",
			},
		}

		installed = {
			"dev-libs/A-1": {
				"EAPI" : "5",
				"IUSE" : "+flag",
				"KEYWORDS": "x86",
				"LICENSE": "GPL-2",
				"RDEPEND": "flag? ( dev-libs/B[flag] )",
				"USE": "flag",
			},
			"dev-libs/B-1": {
				"EAPI" : "5",
				"IUSE" : "+flag",
				"KEYWORDS": "x86",
				"LICENSE": "GPL-2",
				"USE": "flag",
			},
			"dev-libs/depclean-me-1": {
				"EAPI" : "5",
				"IUSE" : "",
				"KEYWORDS": "x86",
				"LICENSE": "GPL-2",
				"USE": "",
			},
			"app-misc/depclean-me-1": {
				"EAPI" : "5",
				"IUSE" : "",
				"KEYWORDS": "x86",
				"LICENSE": "GPL-2",
				"RDEPEND": "dev-libs/depclean-me",
				"USE": "",
			},
		}

		metadata_xml_files = (
			(
				"dev-libs/A",
				{
					"flags" : "<flag name='flag'>Description of how USE='flag' affects this package</flag>",
				},
			),
			(
				"dev-libs/B",
				{
					"flags" : "<flag name='flag'>Description of how USE='flag' affects this package</flag>",
				},
			),
		)

		playground = ResolverPlayground(
			ebuilds=ebuilds, installed=installed, debug=debug)
		settings = playground.settings
		eprefix = settings["EPREFIX"]
		eroot = settings["EROOT"]
		trees = playground.trees
		portdb = trees[eroot]["porttree"].dbapi
		test_repo_location = settings.repositories["test_repo"].location
		var_cache_edb = os.path.join(eprefix, "var", "cache", "edb")
		cachedir = os.path.join(var_cache_edb, "dep")
		cachedir_pregen = os.path.join(test_repo_location, "metadata", "md5-cache")

		portage_python = portage._python_interpreter
		dispatch_conf_cmd = (portage_python, "-b", "-Wd",
			os.path.join(self.sbindir, "dispatch-conf"))
		ebuild_cmd = (portage_python, "-b", "-Wd",
			os.path.join(self.bindir, "ebuild"))
		egencache_cmd = (portage_python, "-b", "-Wd",
			os.path.join(self.bindir, "egencache"),
			"--repo", "test_repo",
			"--repositories-configuration", settings.repositories.config_string())
		emerge_cmd = (portage_python, "-b", "-Wd",
			os.path.join(self.bindir, "emerge"))
		emaint_cmd = (portage_python, "-b", "-Wd",
			os.path.join(self.sbindir, "emaint"))
		env_update_cmd = (portage_python, "-b", "-Wd",
			os.path.join(self.sbindir, "env-update"))
		etc_update_cmd = (BASH_BINARY,
			os.path.join(self.sbindir, "etc-update"))
		fixpackages_cmd = (portage_python, "-b", "-Wd",
			os.path.join(self.sbindir, "fixpackages"))
		portageq_cmd = (portage_python, "-b", "-Wd",
			os.path.join(self.bindir, "portageq"))
		quickpkg_cmd = (portage_python, "-b", "-Wd",
			os.path.join(self.bindir, "quickpkg"))
		regenworld_cmd = (portage_python, "-b", "-Wd",
			os.path.join(self.sbindir, "regenworld"))

		rm_binary = find_binary("rm")
		self.assertEqual(rm_binary is None, False,
			"rm command not found")
		rm_cmd = (rm_binary,)

		egencache_extra_args = []
		if self._have_python_xml():
			egencache_extra_args.append("--update-use-local-desc")

		test_ebuild = portdb.findname("dev-libs/A-1")
		self.assertFalse(test_ebuild is None)

		cross_prefix = os.path.join(eprefix, "cross_prefix")
		cross_root = os.path.join(eprefix, "cross_root")
		cross_eroot = os.path.join(cross_root, eprefix.lstrip(os.sep))

		test_commands = (
			env_update_cmd,
			portageq_cmd + ("envvar", "-v", "CONFIG_PROTECT", "EROOT",
				"PORTAGE_CONFIGROOT", "PORTAGE_TMPDIR", "USERLAND"),
			etc_update_cmd,
			dispatch_conf_cmd,
			emerge_cmd + ("--version",),
			emerge_cmd + ("--info",),
			emerge_cmd + ("--info", "--verbose"),
			emerge_cmd + ("--list-sets",),
			emerge_cmd + ("--check-news",),
			rm_cmd + ("-rf", cachedir),
			rm_cmd + ("-rf", cachedir_pregen),
			emerge_cmd + ("--regen",),
			rm_cmd + ("-rf", cachedir),
			({"FEATURES" : "metadata-transfer"},) + \
				emerge_cmd + ("--regen",),
			rm_cmd + ("-rf", cachedir),
			({"FEATURES" : "metadata-transfer"},) + \
				emerge_cmd + ("--regen",),
			rm_cmd + ("-rf", cachedir),
			egencache_cmd + ("--update",) + tuple(egencache_extra_args),
			({"FEATURES" : "metadata-transfer"},) + \
				emerge_cmd + ("--metadata",),
			rm_cmd + ("-rf", cachedir),
			({"FEATURES" : "metadata-transfer"},) + \
				emerge_cmd + ("--metadata",),
			emerge_cmd + ("--metadata",),
			rm_cmd + ("-rf", cachedir),
			emerge_cmd + ("--oneshot", "virtual/foo"),
			lambda: self.assertFalse(os.path.exists(
				os.path.join(pkgdir, "virtual", "foo-0.tbz2"))),
			({"FEATURES" : "unmerge-backup"},) + \
				emerge_cmd + ("--unmerge", "virtual/foo"),
			lambda: self.assertTrue(os.path.exists(
				os.path.join(pkgdir, "virtual", "foo-0.tbz2"))),
			emerge_cmd + ("--pretend", "dev-libs/A"),
			ebuild_cmd + (test_ebuild, "manifest", "clean", "package", "merge"),
			emerge_cmd + ("--pretend", "--tree", "--complete-graph", "dev-libs/A"),
			emerge_cmd + ("-p", "dev-libs/B"),
			emerge_cmd + ("-p", "--newrepo", "dev-libs/B"),
			emerge_cmd + ("-B", "dev-libs/B",),
			emerge_cmd + ("--oneshot", "--usepkg", "dev-libs/B",),

			# trigger clean prior to pkg_pretend as in bug #390711
			ebuild_cmd + (test_ebuild, "unpack"), 
			emerge_cmd + ("--oneshot", "dev-libs/A",),

			emerge_cmd + ("--noreplace", "dev-libs/A",),
			emerge_cmd + ("--config", "dev-libs/A",),
			emerge_cmd + ("--info", "dev-libs/A", "dev-libs/B"),
			emerge_cmd + ("--pretend", "--depclean", "--verbose", "dev-libs/B"),
			emerge_cmd + ("--pretend", "--depclean",),
			emerge_cmd + ("--depclean",),
			quickpkg_cmd + ("--include-config", "y", "dev-libs/A",),
			# Test bug #523684, where a file renamed or removed by the
			# admin forces replacement files to be merged with config
			# protection.
			lambda: self.assertEqual(0,
				len(list(find_updated_config_files(eroot,
				shlex_split(settings["CONFIG_PROTECT"]))))),
			lambda: os.unlink(os.path.join(eprefix, "etc", "A-0")),
			emerge_cmd + ("--usepkgonly", "dev-libs/A"),
			lambda: self.assertEqual(1,
				len(list(find_updated_config_files(eroot,
				shlex_split(settings["CONFIG_PROTECT"]))))),
			emaint_cmd + ("--check", "all"),
			emaint_cmd + ("--fix", "all"),
			fixpackages_cmd,
			regenworld_cmd,
			portageq_cmd + ("match", eroot, "dev-libs/A"),
			portageq_cmd + ("best_visible", eroot, "dev-libs/A"),
			portageq_cmd + ("best_visible", eroot, "binary", "dev-libs/A"),
			portageq_cmd + ("contents", eroot, "dev-libs/A-1"),
			portageq_cmd + ("metadata", eroot, "ebuild", "dev-libs/A-1", "EAPI", "IUSE", "RDEPEND"),
			portageq_cmd + ("metadata", eroot, "binary", "dev-libs/A-1", "EAPI", "USE", "RDEPEND"),
			portageq_cmd + ("metadata", eroot, "installed", "dev-libs/A-1", "EAPI", "USE", "RDEPEND"),
			portageq_cmd + ("owners", eroot, eroot + "usr"),
			emerge_cmd + ("-p", eroot + "usr"),
			emerge_cmd + ("-p", "--unmerge", "-q", eroot + "usr"),
			emerge_cmd + ("--unmerge", "--quiet", "dev-libs/A"),
			emerge_cmd + ("-C", "--quiet", "dev-libs/B"),

			emerge_cmd + ("--autounmask-continue", "dev-libs/C",),
			# Verify that the above --autounmask-continue command caused
			# USE=flag to be applied correctly to dev-libs/D.
			portageq_cmd + ("match", eroot, "dev-libs/D[flag]"),

			# Test cross-prefix usage, including chpathtool for binpkgs.
			({"EPREFIX" : cross_prefix},) + \
				emerge_cmd + ("--usepkgonly", "dev-libs/A"),
			({"EPREFIX" : cross_prefix},) + \
				portageq_cmd + ("has_version", cross_prefix, "dev-libs/A"),
			({"EPREFIX" : cross_prefix},) + \
				portageq_cmd + ("has_version", cross_prefix, "dev-libs/B"),
			({"EPREFIX" : cross_prefix},) + \
				emerge_cmd + ("-C", "--quiet", "dev-libs/B"),
			({"EPREFIX" : cross_prefix},) + \
				emerge_cmd + ("-C", "--quiet", "dev-libs/A"),
			({"EPREFIX" : cross_prefix},) + \
				emerge_cmd + ("dev-libs/A",),
			({"EPREFIX" : cross_prefix},) + \
				portageq_cmd + ("has_version", cross_prefix, "dev-libs/A"),
			({"EPREFIX" : cross_prefix},) + \
				portageq_cmd + ("has_version", cross_prefix, "dev-libs/B"),

			# Test ROOT support
			({"ROOT": cross_root},) + emerge_cmd + ("dev-libs/B",),
			portageq_cmd + ("has_version", cross_eroot, "dev-libs/B"),
		)

		distdir = playground.distdir
		pkgdir = playground.pkgdir
		fake_bin = os.path.join(eprefix, "bin")
		portage_tmpdir = os.path.join(eprefix, "var", "tmp", "portage")
		profile_path = settings.profile_path
		user_config_dir = os.path.join(os.sep, eprefix, USER_CONFIG_PATH)

		path =  os.environ.get("PATH")
		if path is not None and not path.strip():
			path = None
		if path is None:
			path = ""
		else:
			path = ":" + path
		path = fake_bin + path

		pythonpath =  os.environ.get("PYTHONPATH")
		if pythonpath is not None and not pythonpath.strip():
			pythonpath = None
		if pythonpath is not None and \
			pythonpath.split(":")[0] == PORTAGE_PYM_PATH:
			pass
		else:
			if pythonpath is None:
				pythonpath = ""
			else:
				pythonpath = ":" + pythonpath
			pythonpath = PORTAGE_PYM_PATH + pythonpath

		env = {
			"PORTAGE_OVERRIDE_EPREFIX" : eprefix,
			"CLEAN_DELAY" : "0",
			"DISTDIR" : distdir,
			"EMERGE_WARNING_DELAY" : "0",
			"INFODIR" : "",
			"INFOPATH" : "",
			"PATH" : path,
			"PKGDIR" : pkgdir,
			"PORTAGE_INST_GID" : str(portage.data.portage_gid),
			"PORTAGE_INST_UID" : str(portage.data.portage_uid),
			"PORTAGE_PYTHON" : portage_python,
			"PORTAGE_REPOSITORIES" : settings.repositories.config_string(),
			"PORTAGE_TMPDIR" : portage_tmpdir,
			"PYTHONDONTWRITEBYTECODE" : os.environ.get("PYTHONDONTWRITEBYTECODE", ""),
			"PYTHONPATH" : pythonpath,
			"__PORTAGE_TEST_PATH_OVERRIDE" : fake_bin,
		}

		if "__PORTAGE_TEST_HARDLINK_LOCKS" in os.environ:
			env["__PORTAGE_TEST_HARDLINK_LOCKS"] = \
				os.environ["__PORTAGE_TEST_HARDLINK_LOCKS"]

		updates_dir = os.path.join(test_repo_location, "profiles", "updates")
		dirs = [cachedir, cachedir_pregen, cross_eroot, cross_prefix,
			distdir, fake_bin, portage_tmpdir, updates_dir,
			user_config_dir, var_cache_edb]
		etc_symlinks = ("dispatch-conf.conf", "etc-update.conf")
		# Override things that may be unavailable, or may have portability
		# issues when running tests in exotic environments.
		#   prepstrip - bug #447810 (bash read builtin EINTR problem)
		true_symlinks = ["find", "prepstrip", "sed", "scanelf"]
		true_binary = find_binary("true")
		self.assertEqual(true_binary is None, False,
			"true command not found")
		try:
			for d in dirs:
				ensure_dirs(d)
			for x in true_symlinks:
				os.symlink(true_binary, os.path.join(fake_bin, x))
			for x in etc_symlinks:
				os.symlink(os.path.join(self.cnf_etc_path, x),
					os.path.join(eprefix, "etc", x))
			with open(os.path.join(var_cache_edb, "counter"), 'wb') as f:
				f.write(b"100")
			# non-empty system set keeps --depclean quiet
			with open(os.path.join(profile_path, "packages"), 'w') as f:
				f.write("*dev-libs/token-system-pkg")
			for cp, xml_data in metadata_xml_files:
				with open(os.path.join(test_repo_location, cp, "metadata.xml"), 'w') as f:
					f.write(playground.metadata_xml_template % xml_data)
			with open(os.path.join(updates_dir, "1Q-2010"), 'w') as f:
				f.write("""
slotmove =app-doc/pms-3 2 3
move dev-util/git dev-vcs/git
""")

			if debug:
				# The subprocess inherits both stdout and stderr, for
				# debugging purposes.
				stdout = None
			else:
				# The subprocess inherits stderr so that any warnings
				# triggered by python -Wd will be visible.
				stdout = subprocess.PIPE

			for args in test_commands:

				if hasattr(args, '__call__'):
					args()
					continue

				if isinstance(args[0], dict):
					local_env = env.copy()
					local_env.update(args[0])
					args = args[1:]
				else:
					local_env = env

				proc = subprocess.Popen(args,
					env=local_env, stdout=stdout)

				if debug:
					proc.wait()
				else:
					output = proc.stdout.readlines()
					proc.wait()
					proc.stdout.close()
					if proc.returncode != os.EX_OK:
						for line in output:
							sys.stderr.write(_unicode_decode(line))

				self.assertEqual(os.EX_OK, proc.returncode,
					"emerge failed with args %s" % (args,))
		finally:
			playground.cleanup()
Пример #51
0
    def testSimple(self):

        debug = False

        install_something = """
S="${WORKDIR}"

pkg_pretend() {
	einfo "called pkg_pretend for $CATEGORY/$PF"
}

src_install() {
	einfo "installing something..."
	insinto /usr/lib/${P}
	echo "blah blah blah" > "${T}"/regular-file
	doins "${T}"/regular-file
	dosym regular-file /usr/lib/${P}/symlink || die

	# Test code for bug #381629, using a copyright symbol encoded with latin-1.
	# We use $(printf "\\xa9") rather than $'\\xa9', since printf apparently
	# works in any case, while $'\\xa9' transforms to \\xef\\xbf\\xbd under
	# some conditions. TODO: Find out why it transforms to \\xef\\xbf\\xbd when
	# running tests for Python 3.2 (even though it's bash that is ultimately
	# responsible for performing the transformation).
	local latin_1_dir=/usr/lib/${P}/latin-1-$(printf "\\xa9")-directory
	insinto "${latin_1_dir}"
	echo "blah blah blah" > "${T}"/latin-1-$(printf "\\xa9")-regular-file || die
	doins "${T}"/latin-1-$(printf "\\xa9")-regular-file
	dosym latin-1-$(printf "\\xa9")-regular-file ${latin_1_dir}/latin-1-$(printf "\\xa9")-symlink || die
}

pkg_config() {
	einfo "called pkg_config for $CATEGORY/$PF"
}

pkg_info() {
	einfo "called pkg_info for $CATEGORY/$PF"
}

pkg_preinst() {
	einfo "called pkg_preinst for $CATEGORY/$PF"

	# Test that has_version and best_version work correctly with
	# prefix (involves internal ROOT -> EROOT calculation in order
	# to support ROOT override via the environment with EAPIs 3
	# and later which support prefix).
	if has_version $CATEGORY/$PN:$SLOT ; then
		einfo "has_version detects an installed instance of $CATEGORY/$PN:$SLOT"
		einfo "best_version reports that the installed instance is $(best_version $CATEGORY/$PN:$SLOT)"
	else
		einfo "has_version does not detect an installed instance of $CATEGORY/$PN:$SLOT"
	fi
}

"""

        ebuilds = {
            "dev-libs/A-1": {
                "EAPI": "4",
                "IUSE": "+flag",
                "KEYWORDS": "x86",
                "LICENSE": "GPL-2",
                "MISC_CONTENT": install_something,
                "RDEPEND": "flag? ( dev-libs/B[flag] )",
            },
            "dev-libs/B-1": {
                "EAPI": "4",
                "IUSE": "+flag",
                "KEYWORDS": "x86",
                "LICENSE": "GPL-2",
                "MISC_CONTENT": install_something,
            },
            "virtual/foo-0": {
                "EAPI": "4",
                "KEYWORDS": "x86",
                "LICENSE": "GPL-2",
            },
        }

        installed = {
            "dev-libs/A-1": {
                "EAPI": "4",
                "IUSE": "+flag",
                "KEYWORDS": "x86",
                "LICENSE": "GPL-2",
                "RDEPEND": "flag? ( dev-libs/B[flag] )",
                "USE": "flag",
            },
            "dev-libs/B-1": {
                "EAPI": "4",
                "IUSE": "+flag",
                "KEYWORDS": "x86",
                "LICENSE": "GPL-2",
                "USE": "flag",
            },
            "dev-libs/depclean-me-1": {
                "EAPI": "4",
                "IUSE": "",
                "KEYWORDS": "x86",
                "LICENSE": "GPL-2",
                "USE": "",
            },
            "app-misc/depclean-me-1": {
                "EAPI": "4",
                "IUSE": "",
                "KEYWORDS": "x86",
                "LICENSE": "GPL-2",
                "RDEPEND": "dev-libs/depclean-me",
                "USE": "",
            },
        }

        metadata_xml_files = (
            (
                "dev-libs/A",
                {
                    "herd":
                    "base-system",
                    "flags":
                    "<flag name='flag'>Description of how USE='flag' affects this package</flag>",
                },
            ),
            (
                "dev-libs/B",
                {
                    "herd":
                    "no-herd",
                    "flags":
                    "<flag name='flag'>Description of how USE='flag' affects this package</flag>",
                },
            ),
        )

        playground = ResolverPlayground(ebuilds=ebuilds,
                                        installed=installed,
                                        debug=debug)
        settings = playground.settings
        eprefix = settings["EPREFIX"]
        eroot = settings["EROOT"]
        trees = playground.trees
        portdb = trees[eroot]["porttree"].dbapi
        portdir = settings["PORTDIR"]
        var_cache_edb = os.path.join(eprefix, "var", "cache", "edb")
        cachedir = os.path.join(var_cache_edb, "dep")
        cachedir_pregen = os.path.join(portdir, "metadata", "cache")

        portage_python = portage._python_interpreter
        ebuild_cmd = (portage_python, "-Wd",
                      os.path.join(PORTAGE_BIN_PATH, "ebuild"))
        egencache_cmd = (portage_python, "-Wd",
                         os.path.join(PORTAGE_BIN_PATH, "egencache"))
        emerge_cmd = (portage_python, "-Wd",
                      os.path.join(PORTAGE_BIN_PATH, "emerge"))
        emaint_cmd = (portage_python, "-Wd",
                      os.path.join(PORTAGE_BIN_PATH, "emaint"))
        env_update_cmd = (portage_python, "-Wd",
                          os.path.join(PORTAGE_BIN_PATH, "env-update"))
        fixpackages_cmd = (portage_python, "-Wd",
                           os.path.join(PORTAGE_BIN_PATH, "fixpackages"))
        portageq_cmd = (portage_python, "-Wd",
                        os.path.join(PORTAGE_BIN_PATH, "portageq"))
        quickpkg_cmd = (portage_python, "-Wd",
                        os.path.join(PORTAGE_BIN_PATH, "quickpkg"))
        regenworld_cmd = (portage_python, "-Wd",
                          os.path.join(PORTAGE_BIN_PATH, "regenworld"))

        rm_binary = find_binary("rm")
        self.assertEqual(rm_binary is None, False, "rm command not found")
        rm_cmd = (rm_binary, )

        egencache_extra_args = []
        if self._have_python_xml():
            egencache_extra_args.append("--update-use-local-desc")

        test_ebuild = portdb.findname("dev-libs/A-1")
        self.assertFalse(test_ebuild is None)

        test_commands = (
         env_update_cmd,
         emerge_cmd + ("--version",),
         emerge_cmd + ("--info",),
         emerge_cmd + ("--info", "--verbose"),
         emerge_cmd + ("--list-sets",),
         emerge_cmd + ("--check-news",),
         rm_cmd + ("-rf", cachedir),
         rm_cmd + ("-rf", cachedir_pregen),
         emerge_cmd + ("--regen",),
         rm_cmd + ("-rf", cachedir),
         ({"FEATURES" : "metadata-transfer"},) + \
          emerge_cmd + ("--regen",),
         rm_cmd + ("-rf", cachedir),
         ({"FEATURES" : "metadata-transfer"},) + \
          emerge_cmd + ("--regen",),
         rm_cmd + ("-rf", cachedir),
         egencache_cmd + ("--update",) + tuple(egencache_extra_args),
         ({"FEATURES" : "metadata-transfer"},) + \
          emerge_cmd + ("--metadata",),
         rm_cmd + ("-rf", cachedir),
         ({"FEATURES" : "metadata-transfer"},) + \
          emerge_cmd + ("--metadata",),
         emerge_cmd + ("--metadata",),
         rm_cmd + ("-rf", cachedir),
         emerge_cmd + ("--oneshot", "virtual/foo"),
         emerge_cmd + ("--pretend", "dev-libs/A"),
         ebuild_cmd + (test_ebuild, "manifest", "clean", "package", "merge"),
         emerge_cmd + ("--pretend", "--tree", "--complete-graph", "dev-libs/A"),
         emerge_cmd + ("-p", "dev-libs/B"),
         emerge_cmd + ("-B", "dev-libs/B",),
         emerge_cmd + ("--oneshot", "--usepkg", "dev-libs/B",),

         # trigger clean prior to pkg_pretend as in bug #390711
         ebuild_cmd + (test_ebuild, "unpack"),
         emerge_cmd + ("--oneshot", "dev-libs/A",),

         emerge_cmd + ("--noreplace", "dev-libs/A",),
         emerge_cmd + ("--config", "dev-libs/A",),
         emerge_cmd + ("--info", "dev-libs/A", "dev-libs/B"),
         emerge_cmd + ("--pretend", "--depclean", "--verbose", "dev-libs/B"),
         emerge_cmd + ("--pretend", "--depclean",),
         emerge_cmd + ("--depclean",),
         quickpkg_cmd + ("dev-libs/A",),
         emerge_cmd + ("--usepkgonly", "dev-libs/A"),
         emaint_cmd + ("--check", "all"),
         emaint_cmd + ("--fix", "all"),
         fixpackages_cmd,
         regenworld_cmd,
         portageq_cmd + ("match", eroot, "dev-libs/A"),
         portageq_cmd + ("best_visible", eroot, "dev-libs/A"),
         portageq_cmd + ("best_visible", eroot, "binary", "dev-libs/A"),
         portageq_cmd + ("contents", eroot, "dev-libs/A-1"),
         portageq_cmd + ("metadata", eroot, "ebuild", "dev-libs/A-1", "EAPI", "IUSE", "RDEPEND"),
         portageq_cmd + ("metadata", eroot, "binary", "dev-libs/A-1", "EAPI", "USE", "RDEPEND"),
         portageq_cmd + ("metadata", eroot, "installed", "dev-libs/A-1", "EAPI", "USE", "RDEPEND"),
         portageq_cmd + ("owners", eroot, eroot + "usr"),
         emerge_cmd + ("-p", eroot + "usr"),
         emerge_cmd + ("-p", "--unmerge", "-q", eroot + "usr"),
         emerge_cmd + ("--unmerge", "--quiet", "dev-libs/A"),
         emerge_cmd + ("-C", "--quiet", "dev-libs/B"),
        )

        distdir = playground.distdir
        pkgdir = playground.pkgdir
        fake_bin = os.path.join(eprefix, "bin")
        portage_tmpdir = os.path.join(eprefix, "var", "tmp", "portage")
        profile_path = settings.profile_path
        user_config_dir = os.path.join(os.sep, eprefix, USER_CONFIG_PATH)

        features = []
        if not portage.process.sandbox_capable or \
         os.environ.get("SANDBOX_ON") == "1":
            features.append("-sandbox")

        # Since egencache ignores settings from the calling environment,
        # configure it via make.conf.
        make_conf = (
            "FEATURES=\"%s\"\n" % (" ".join(features), ),
            "PORTDIR=\"%s\"\n" % (portdir, ),
            "PORTAGE_GRPNAME=\"%s\"\n" % (os.environ["PORTAGE_GRPNAME"], ),
            "PORTAGE_USERNAME=\"%s\"\n" % (os.environ["PORTAGE_USERNAME"], ),
        )

        path = os.environ.get("PATH")
        if path is not None and not path.strip():
            path = None
        if path is None:
            path = ""
        else:
            path = ":" + path
        path = fake_bin + path

        pythonpath = os.environ.get("PYTHONPATH")
        if pythonpath is not None and not pythonpath.strip():
            pythonpath = None
        if pythonpath is not None and \
         pythonpath.split(":")[0] == PORTAGE_PYM_PATH:
            pass
        else:
            if pythonpath is None:
                pythonpath = ""
            else:
                pythonpath = ":" + pythonpath
            pythonpath = PORTAGE_PYM_PATH + pythonpath

        env = {
            "PORTAGE_OVERRIDE_EPREFIX": eprefix,
            "CLEAN_DELAY": "0",
            "DISTDIR": distdir,
            "EMERGE_WARNING_DELAY": "0",
            "INFODIR": "",
            "INFOPATH": "",
            "PATH": path,
            "PKGDIR": pkgdir,
            "PORTAGE_INST_GID": str(portage.data.portage_gid),
            "PORTAGE_INST_UID": str(portage.data.portage_uid),
            "PORTAGE_PYTHON": portage_python,
            "PORTAGE_TMPDIR": portage_tmpdir,
            "PYTHONPATH": pythonpath,
        }

        if "__PORTAGE_TEST_HARDLINK_LOCKS" in os.environ:
            env["__PORTAGE_TEST_HARDLINK_LOCKS"] = \
             os.environ["__PORTAGE_TEST_HARDLINK_LOCKS"]

        updates_dir = os.path.join(portdir, "profiles", "updates")
        dirs = [
            cachedir, cachedir_pregen, distdir, fake_bin, portage_tmpdir,
            updates_dir, user_config_dir, var_cache_edb
        ]
        true_symlinks = ["chown", "chgrp"]
        true_binary = find_binary("true")
        self.assertEqual(true_binary is None, False, "true command not found")
        try:
            for d in dirs:
                ensure_dirs(d)
            with open(os.path.join(user_config_dir, "make.conf"), 'w') as f:
                for line in make_conf:
                    f.write(line)
            for x in true_symlinks:
                os.symlink(true_binary, os.path.join(fake_bin, x))
            with open(os.path.join(var_cache_edb, "counter"), 'wb') as f:
                f.write(b"100")
            # non-empty system set keeps --depclean quiet
            with open(os.path.join(profile_path, "packages"), 'w') as f:
                f.write("*dev-libs/token-system-pkg")
            for cp, xml_data in metadata_xml_files:
                with open(os.path.join(portdir, cp, "metadata.xml"), 'w') as f:
                    f.write(playground.metadata_xml_template % xml_data)
            with open(os.path.join(updates_dir, "1Q-2010"), 'w') as f:
                f.write("""
slotmove =app-doc/pms-3 2 3
move dev-util/git dev-vcs/git
""")

            if debug:
                # The subprocess inherits both stdout and stderr, for
                # debugging purposes.
                stdout = None
            else:
                # The subprocess inherits stderr so that any warnings
                # triggered by python -Wd will be visible.
                stdout = subprocess.PIPE

            for args in test_commands:

                if isinstance(args[0], dict):
                    local_env = env.copy()
                    local_env.update(args[0])
                    args = args[1:]
                else:
                    local_env = env

                proc = subprocess.Popen(args, env=local_env, stdout=stdout)

                if debug:
                    proc.wait()
                else:
                    output = proc.stdout.readlines()
                    proc.wait()
                    proc.stdout.close()
                    if proc.returncode != os.EX_OK:
                        for line in output:
                            sys.stderr.write(_unicode_decode(line))

                self.assertEqual(os.EX_OK, proc.returncode,
                                 "emerge failed with args %s" % (args, ))
        finally:
            playground.cleanup()
Пример #52
0
	def testSlotAbiEmerge(self):

		debug = False

		ebuilds = {
			"dev-libs/glib-1.2.10" : {
				"SLOT": "1"
			},
			"dev-libs/glib-2.30.2" : {
				"EAPI": "4-slot-abi",
				"SLOT": "2/2.30"
			},
			"dev-libs/glib-2.32.3" : {
				"EAPI": "4-slot-abi",
				"SLOT": "2/2.32"
			},
			"dev-libs/dbus-glib-0.98" : {
				"EAPI": "4-slot-abi",
				"DEPEND":  "dev-libs/glib:2=",
				"RDEPEND": "dev-libs/glib:2="
			},
		}
		installed = {
			"dev-libs/glib-1.2.10" : {
				"EAPI": "4-slot-abi",
				"SLOT": "1"
			},
			"dev-libs/glib-2.30.2" : {
				"EAPI": "4-slot-abi",
				"SLOT": "2/2.30"
			},
			"dev-libs/dbus-glib-0.98" : {
				"EAPI": "4-slot-abi",
				"DEPEND":  "dev-libs/glib:2/2.30=",
				"RDEPEND": "dev-libs/glib:2/2.30="
			},
		}

		world = ["dev-libs/glib:1", "dev-libs/dbus-glib"]

		playground = ResolverPlayground(ebuilds=ebuilds,
			installed=installed, world=world, debug=debug)
		settings = playground.settings
		eprefix = settings["EPREFIX"]
		eroot = settings["EROOT"]
		trees = playground.trees
		portdb = trees[eroot]["porttree"].dbapi
		vardb = trees[eroot]["vartree"].dbapi
		var_cache_edb = os.path.join(eprefix, "var", "cache", "edb")
		user_config_dir = os.path.join(eprefix, USER_CONFIG_PATH)
		package_mask_path = os.path.join(user_config_dir, "package.mask")

		portage_python = portage._python_interpreter
		ebuild_cmd = (portage_python, "-Wd",
			os.path.join(PORTAGE_BIN_PATH, "ebuild"))
		emerge_cmd = (portage_python, "-Wd",
			os.path.join(PORTAGE_BIN_PATH, "emerge"))

		test_ebuild = portdb.findname("dev-libs/dbus-glib-0.98")
		self.assertFalse(test_ebuild is None)

		test_commands = (
			emerge_cmd + ("--oneshot", "dev-libs/glib",),
			(lambda: "dev-libs/glib:2/2.32=" in vardb.aux_get("dev-libs/dbus-glib-0.98", ["RDEPEND"])[0],),
			(BASH_BINARY, "-c", "echo %s >> %s" %
				tuple(map(portage._shell_quote,
				(">=dev-libs/glib-2.32", package_mask_path,)))),
			emerge_cmd + ("--oneshot", "dev-libs/glib",),
			(lambda: "dev-libs/glib:2/2.30=" in vardb.aux_get("dev-libs/dbus-glib-0.98", ["RDEPEND"])[0],),
		)

		distdir = playground.distdir
		pkgdir = playground.pkgdir
		fake_bin = os.path.join(eprefix, "bin")
		portage_tmpdir = os.path.join(eprefix, "var", "tmp", "portage")
		profile_path = settings.profile_path

		path =  os.environ.get("PATH")
		if path is not None and not path.strip():
			path = None
		if path is None:
			path = ""
		else:
			path = ":" + path
		path = fake_bin + path

		pythonpath =  os.environ.get("PYTHONPATH")
		if pythonpath is not None and not pythonpath.strip():
			pythonpath = None
		if pythonpath is not None and \
			pythonpath.split(":")[0] == PORTAGE_PYM_PATH:
			pass
		else:
			if pythonpath is None:
				pythonpath = ""
			else:
				pythonpath = ":" + pythonpath
			pythonpath = PORTAGE_PYM_PATH + pythonpath

		env = {
			"PORTAGE_OVERRIDE_EPREFIX" : eprefix,
			"PATH" : path,
			"PORTAGE_PYTHON" : portage_python,
			"PORTAGE_REPOSITORIES" : settings.repositories.config_string(),
			"PYTHONPATH" : pythonpath,
		}

		if "__PORTAGE_TEST_HARDLINK_LOCKS" in os.environ:
			env["__PORTAGE_TEST_HARDLINK_LOCKS"] = \
				os.environ["__PORTAGE_TEST_HARDLINK_LOCKS"]

		dirs = [distdir, fake_bin, portage_tmpdir,
			user_config_dir, var_cache_edb]
		true_symlinks = ["chown", "chgrp"]
		true_binary = find_binary("true")
		self.assertEqual(true_binary is None, False,
			"true command not found")
		try:
			for d in dirs:
				ensure_dirs(d)
			for x in true_symlinks:
				os.symlink(true_binary, os.path.join(fake_bin, x))
			with open(os.path.join(var_cache_edb, "counter"), 'wb') as f:
				f.write(b"100")
			# non-empty system set keeps --depclean quiet
			with open(os.path.join(profile_path, "packages"), 'w') as f:
				f.write("*dev-libs/token-system-pkg")

			if debug:
				# The subprocess inherits both stdout and stderr, for
				# debugging purposes.
				stdout = None
			else:
				# The subprocess inherits stderr so that any warnings
				# triggered by python -Wd will be visible.
				stdout = subprocess.PIPE

			for i, args in enumerate(test_commands):

				if hasattr(args[0], '__call__'):
					self.assertTrue(args[0](),
						"callable at index %s failed" % (i,))
					continue

				proc = subprocess.Popen(args,
					env=env, stdout=stdout)

				if debug:
					proc.wait()
				else:
					output = proc.stdout.readlines()
					proc.wait()
					proc.stdout.close()
					if proc.returncode != os.EX_OK:
						for line in output:
							sys.stderr.write(_unicode_decode(line))

				self.assertEqual(os.EX_OK, proc.returncode,
					"emerge failed with args %s" % (args,))
		finally:
			playground.cleanup()
Пример #53
0
    def testMoveEnt(self):

        ebuilds = {
            "dev-libs/A-2::dont_apply_updates": {
                "EAPI": "4",
                "SLOT": "2",
            },
        }

        installed = {
            "dev-libs/A-1::test_repo": {
                "EAPI": "4",
            },
            "dev-libs/A-2::dont_apply_updates": {
                "EAPI": "4",
                "SLOT": "2",
            },
        }

        binpkgs = {
            "dev-libs/A-1::test_repo": {
                "EAPI": "4",
            },
            "dev-libs/A-2::dont_apply_updates": {
                "EAPI": "4",
                "SLOT": "2",
            },
        }

        updates = textwrap.dedent("""
			move dev-libs/A dev-libs/A-moved
		""")

        playground = ResolverPlayground(binpkgs=binpkgs,
                                        ebuilds=ebuilds,
                                        installed=installed)

        settings = playground.settings
        trees = playground.trees
        eroot = settings["EROOT"]
        test_repo_location = settings.repositories["test_repo"].location
        portdb = trees[eroot]["porttree"].dbapi
        vardb = trees[eroot]["vartree"].dbapi
        bindb = trees[eroot]["bintree"].dbapi

        updates_dir = os.path.join(test_repo_location, "profiles", "updates")

        try:
            ensure_dirs(updates_dir)
            with open(os.path.join(updates_dir, "1Q-2010"), 'w') as f:
                f.write(updates)

            # Create an empty updates directory, so that this
            # repo doesn't inherit updates from the main repo.
            ensure_dirs(
                os.path.join(portdb.getRepositoryPath("dont_apply_updates"),
                             "profiles", "updates"))

            global_noiselimit = portage.util.noiselimit
            portage.util.noiselimit = -2
            try:
                _do_global_updates(trees, {})
            finally:
                portage.util.noiselimit = global_noiselimit

            # Workaround for cache validation not working
            # correctly when filesystem has timestamp precision
            # of 1 second.
            vardb._clear_cache()

            # A -> A-moved
            self.assertRaises(KeyError, vardb.aux_get, "dev-libs/A-1",
                              ["EAPI"])
            vardb.aux_get("dev-libs/A-moved-1", ["EAPI"])
            self.assertRaises(KeyError, bindb.aux_get, "dev-libs/A-1",
                              ["EAPI"])
            bindb.aux_get("dev-libs/A-moved-1", ["EAPI"])

            # dont_apply_updates
            self.assertRaises(KeyError, vardb.aux_get, "dev-libs/A-moved-2",
                              ["EAPI"])
            vardb.aux_get("dev-libs/A-2", ["EAPI"])
            self.assertRaises(KeyError, bindb.aux_get, "dev-libs/A-moved-2",
                              ["EAPI"])
            bindb.aux_get("dev-libs/A-2", ["EAPI"])

        finally:
            playground.cleanup()
Пример #54
0
 def _ensure_dirs(self):
     ensure_dirs(os.path.dirname(self._filename), gid=portage_gid, mode=0o2750, mask=0o2)
Пример #55
0
    def testSyncLocal(self):
        debug = False

        skip_reason = self._must_skip()
        if skip_reason:
            self.portage_skip = skip_reason
            self.assertFalse(True, skip_reason)
            return

        repos_conf = textwrap.dedent("""
			[DEFAULT]
			%(default_keys)s
			[test_repo]
			location = %(EPREFIX)s/var/repositories/test_repo
			sync-type = %(sync-type)s
			sync-depth = %(sync-depth)s
			sync-uri = file://%(EPREFIX)s/var/repositories/test_repo_sync
			sync-rcu = %(sync-rcu)s
			sync-rcu-store-dir = %(EPREFIX)s/var/repositories/test_repo_rcu_storedir
			auto-sync = %(auto-sync)s
			%(repo_extra_keys)s
		""")

        profile = {
            "eapi": ("5", ),
            "package.use.stable.mask": ("dev-libs/A flag", )
        }

        ebuilds = {
            "dev-libs/A-0": {},
            "sys-apps/portage-3.0": {
                "IUSE": "+python_targets_python3_8"
            },
        }

        installed = {
            "sys-apps/portage-2.3.99": {
                "EAPI": "7",
                "IUSE": "+python_targets_python3_8",
                "USE": "python_targets_python3_8",
            },
        }

        user_config = {"make.conf": ('FEATURES="metadata-transfer"', )}

        playground = ResolverPlayground(
            ebuilds=ebuilds,
            installed=installed,
            profile=profile,
            user_config=user_config,
            debug=debug,
        )
        settings = playground.settings
        eprefix = settings["EPREFIX"]
        eroot = settings["EROOT"]
        homedir = os.path.join(eroot, "home")
        distdir = os.path.join(eprefix, "distdir")
        repo = settings.repositories["test_repo"]
        metadata_dir = os.path.join(repo.location, "metadata")
        rcu_store_dir = os.path.join(
            eprefix, "var/repositories/test_repo_rcu_storedir")

        cmds = {}
        for cmd in ("emerge", "emaint"):
            for bindir in (self.bindir, self.sbindir):
                path = os.path.join(bindir, cmd)
                if os.path.exists(path):
                    cmds[cmd] = (portage._python_interpreter, "-b", "-Wd",
                                 path)
                    break
            else:
                raise AssertionError("%s binary not found in %s or %s" %
                                     (cmd, self.bindir, self.sbindir))

        git_binary = find_binary("git")
        git_cmd = (git_binary, )

        hg_binary = find_binary("hg")
        hg_cmd = (hg_binary, )

        committer_name = "Gentoo Dev"
        committer_email = "*****@*****.**"

        def repos_set_conf(
            sync_type,
            dflt_keys=None,
            xtra_keys=None,
            auto_sync="yes",
            sync_rcu=False,
            sync_depth=None,
        ):
            env["PORTAGE_REPOSITORIES"] = repos_conf % {
                "EPREFIX": eprefix,
                "sync-type": sync_type,
                "sync-depth": 0 if sync_depth is None else sync_depth,
                "sync-rcu": "yes" if sync_rcu else "no",
                "auto-sync": auto_sync,
                "default_keys": "" if dflt_keys is None else dflt_keys,
                "repo_extra_keys": "" if xtra_keys is None else xtra_keys,
            }

        def alter_ebuild():
            with open(
                    os.path.join(repo.location + "_sync", "dev-libs", "A",
                                 "A-0.ebuild"),
                    "a",
            ) as f:
                f.write("\n")
            bump_timestamp()

        def bump_timestamp():
            bump_timestamp.timestamp += datetime.timedelta(seconds=1)
            with open(
                    os.path.join(repo.location + "_sync", "metadata",
                                 "timestamp.chk"), "w") as f:
                f.write(
                    bump_timestamp.timestamp.strftime(
                        "%s\n" % TIMESTAMP_FORMAT, ))

        bump_timestamp.timestamp = datetime.datetime.utcnow()

        bump_timestamp_cmds = ((homedir, bump_timestamp), )

        sync_cmds = (
            (homedir, cmds["emerge"] + ("--sync", )),
            (
                homedir,
                lambda: self.assertTrue(
                    os.path.exists(os.path.join(repo.location, "dev-libs", "A")
                                   ),
                    "dev-libs/A expected, but missing",
                ),
            ),
            (homedir, cmds["emaint"] + ("sync", "-A")),
        )

        sync_cmds_auto_sync = (
            (homedir, lambda: repos_set_conf("rsync", auto_sync="no")),
            (homedir, cmds["emerge"] + ("--sync", )),
            (
                homedir,
                lambda: self.assertFalse(
                    os.path.exists(os.path.join(repo.location, "dev-libs", "A")
                                   ),
                    "dev-libs/A found, expected missing",
                ),
            ),
            (homedir, lambda: repos_set_conf("rsync", auto_sync="yes")),
        )

        rename_repo = (
            (homedir,
             lambda: os.rename(repo.location, repo.location + "_sync")), )

        rsync_opts_repos = (
            (homedir, alter_ebuild),
            (
                homedir,
                lambda: repos_set_conf(
                    "rsync",
                    None,
                    "sync-rsync-extra-opts = --backup --backup-dir=%s" %
                    _shell_quote(repo.location + "_back"),
                ),
            ),
            (homedir, cmds["emerge"] + ("--sync", )),
            (homedir,
             lambda: self.assertTrue(os.path.exists(repo.location + "_back"))),
            (homedir, lambda: shutil.rmtree(repo.location + "_back")),
            (homedir, lambda: repos_set_conf("rsync")),
        )

        rsync_opts_repos_default = (
            (homedir, alter_ebuild),
            (
                homedir,
                lambda: repos_set_conf(
                    "rsync",
                    "sync-rsync-extra-opts = --backup --backup-dir=%s" %
                    _shell_quote(repo.location + "_back"),
                ),
            ),
            (homedir, cmds["emerge"] + ("--sync", )),
            (homedir,
             lambda: self.assertTrue(os.path.exists(repo.location + "_back"))),
            (homedir, lambda: shutil.rmtree(repo.location + "_back")),
            (homedir, lambda: repos_set_conf("rsync")),
        )

        rsync_opts_repos_default_ovr = (
            (homedir, alter_ebuild),
            (
                homedir,
                lambda: repos_set_conf(
                    "rsync",
                    "sync-rsync-extra-opts = --backup --backup-dir=%s" %
                    _shell_quote(repo.location + "_back_nowhere"),
                    "sync-rsync-extra-opts = --backup --backup-dir=%s" %
                    _shell_quote(repo.location + "_back"),
                ),
            ),
            (homedir, cmds["emerge"] + ("--sync", )),
            (homedir,
             lambda: self.assertTrue(os.path.exists(repo.location + "_back"))),
            (homedir, lambda: shutil.rmtree(repo.location + "_back")),
            (homedir, lambda: repos_set_conf("rsync")),
        )

        rsync_opts_repos_default_cancel = (
            (homedir, alter_ebuild),
            (
                homedir,
                lambda: repos_set_conf(
                    "rsync",
                    "sync-rsync-extra-opts = --backup --backup-dir=%s" %
                    _shell_quote(repo.location + "_back_nowhere"),
                    "sync-rsync-extra-opts = ",
                ),
            ),
            (homedir, cmds["emerge"] + ("--sync", )),
            (
                homedir,
                lambda: self.assertFalse(
                    os.path.exists(repo.location + "_back")),
            ),
            (homedir, lambda: repos_set_conf("rsync")),
        )

        delete_repo_location = (
            (homedir, lambda: shutil.rmtree(repo.user_location)),
            (homedir, lambda: os.mkdir(repo.user_location)),
        )

        delete_rcu_store_dir = ((homedir,
                                 lambda: shutil.rmtree(rcu_store_dir)), )

        revert_rcu_layout = (
            (
                homedir,
                lambda: os.rename(repo.user_location, repo.user_location +
                                  ".bak"),
            ),
            (
                homedir,
                lambda: os.rename(
                    os.path.realpath(repo.user_location + ".bak"), repo.
                    user_location),
            ),
            (homedir, lambda: os.unlink(repo.user_location + ".bak")),
            (homedir,
             lambda: shutil.rmtree(repo.user_location + "_rcu_storedir")),
        )

        upstream_git_commit = (
            (
                repo.location + "_sync",
                git_cmd +
                ("commit", "--allow-empty", "-m", "test empty commit"),
            ),
            (
                repo.location + "_sync",
                git_cmd +
                ("commit", "--allow-empty", "-m", "test empty commit 2"),
            ),
        )

        delete_sync_repo = ((homedir,
                             lambda: shutil.rmtree(repo.location + "_sync")), )

        git_repo_create = (
            (
                repo.location,
                git_cmd + (
                    "config",
                    "--global",
                    "user.name",
                    committer_name,
                ),
            ),
            (
                repo.location,
                git_cmd + (
                    "config",
                    "--global",
                    "user.email",
                    committer_email,
                ),
            ),
            (repo.location, git_cmd + ("init-db", )),
            (repo.location, git_cmd + ("add", ".")),
            (repo.location,
             git_cmd + ("commit", "-a", "-m", "add whole repo")),
        )

        sync_type_git = ((homedir, lambda: repos_set_conf("git")), )

        sync_type_git_shallow = ((
            homedir, lambda: repos_set_conf("git", sync_depth=1)), )

        sync_rsync_rcu = ((homedir,
                           lambda: repos_set_conf("rsync", sync_rcu=True)), )

        delete_git_dir = (
            (homedir,
             lambda: shutil.rmtree(os.path.join(repo.location, ".git"))), )

        def hg_init_global_config():
            with open(os.path.join(homedir, ".hgrc"), "wt") as f:
                f.write("[ui]\nusername = {} <{}>\n".format(
                    committer_name, committer_email))

        hg_repo_create = (
            (repo.location, hg_init_global_config),
            (repo.location, hg_cmd + ("init", )),
            (repo.location, hg_cmd + ("add", ".")),
            (repo.location, hg_cmd + ("commit", "-A", "-m", "add whole repo")),
        )

        sync_type_mercurial = ((homedir,
                                lambda: repos_set_conf("mercurial")), )

        def append_newline(path):
            with open(path, "at") as f:
                f.write("\n")

        upstream_hg_commit = (
            (
                repo.location + "_sync",
                lambda: append_newline(
                    os.path.join(repo.location + "_sync",
                                 "metadata/layout.conf")),
            ),
            (
                repo.location + "_sync",
                hg_cmd +
                ("commit", "metadata/layout.conf", "-m", "test empty commit"),
            ),
            (
                repo.location + "_sync",
                lambda: append_newline(
                    os.path.join(repo.location + "_sync",
                                 "metadata/layout.conf")),
            ),
            (
                repo.location + "_sync",
                hg_cmd + ("commit", "metadata/layout.conf", "-m",
                          "test empty commit 2"),
            ),
        )

        if hg_binary is None:
            mercurial_tests = ()
        else:
            mercurial_tests = (delete_sync_repo + delete_git_dir +
                               hg_repo_create + sync_type_mercurial +
                               rename_repo + sync_cmds + upstream_hg_commit +
                               sync_cmds)

        pythonpath = os.environ.get("PYTHONPATH")
        if pythonpath is not None and not pythonpath.strip():
            pythonpath = None
        if pythonpath is not None and pythonpath.split(
                ":")[0] == PORTAGE_PYM_PATH:
            pass
        else:
            if pythonpath is None:
                pythonpath = ""
            else:
                pythonpath = ":" + pythonpath
            pythonpath = PORTAGE_PYM_PATH + pythonpath

        env = {
            "PORTAGE_OVERRIDE_EPREFIX":
            eprefix,
            "DISTDIR":
            distdir,
            "GENTOO_COMMITTER_NAME":
            committer_name,
            "GENTOO_COMMITTER_EMAIL":
            committer_email,
            "HOME":
            homedir,
            "PATH":
            os.environ["PATH"],
            "PORTAGE_GRPNAME":
            os.environ["PORTAGE_GRPNAME"],
            "PORTAGE_USERNAME":
            os.environ["PORTAGE_USERNAME"],
            "PYTHONDONTWRITEBYTECODE":
            os.environ.get("PYTHONDONTWRITEBYTECODE", ""),
            "PYTHONPATH":
            pythonpath,
        }
        repos_set_conf("rsync")

        if os.environ.get("SANDBOX_ON") == "1":
            # avoid problems from nested sandbox instances
            env["FEATURES"] = "-sandbox -usersandbox"

        dirs = [homedir, metadata_dir]
        try:
            for d in dirs:
                ensure_dirs(d)

            timestamp_path = os.path.join(metadata_dir, "timestamp.chk")
            with open(timestamp_path, "w") as f:
                f.write(
                    bump_timestamp.timestamp.strftime(
                        "%s\n" % TIMESTAMP_FORMAT, ))

            if debug:
                # The subprocess inherits both stdout and stderr, for
                # debugging purposes.
                stdout = None
            else:
                # The subprocess inherits stderr so that any warnings
                # triggered by python -Wd will be visible.
                stdout = subprocess.PIPE

            for cwd, cmd in (rename_repo + sync_cmds_auto_sync + sync_cmds +
                             rsync_opts_repos + rsync_opts_repos_default +
                             rsync_opts_repos_default_ovr +
                             rsync_opts_repos_default_cancel +
                             bump_timestamp_cmds + sync_rsync_rcu + sync_cmds +
                             delete_rcu_store_dir + sync_cmds +
                             revert_rcu_layout + delete_repo_location +
                             sync_cmds + sync_cmds + bump_timestamp_cmds +
                             sync_cmds + revert_rcu_layout + delete_sync_repo +
                             git_repo_create + sync_type_git + rename_repo +
                             sync_cmds + upstream_git_commit + sync_cmds +
                             sync_type_git_shallow + upstream_git_commit +
                             sync_cmds + mercurial_tests):

                if hasattr(cmd, "__call__"):
                    cmd()
                    continue

                abs_cwd = os.path.join(repo.location, cwd)
                proc = subprocess.Popen(cmd,
                                        cwd=abs_cwd,
                                        env=env,
                                        stdout=stdout)

                if debug:
                    proc.wait()
                else:
                    output = proc.stdout.readlines()
                    proc.wait()
                    proc.stdout.close()
                    if proc.returncode != os.EX_OK:
                        for line in output:
                            sys.stderr.write(_unicode_decode(line))

                self.assertEqual(
                    os.EX_OK,
                    proc.returncode,
                    "%s failed in %s" % (
                        cmd,
                        cwd,
                    ),
                )

        finally:
            playground.cleanup()