def _archive_copy(src_st, src_path, dest_path): """ Copy file from src_path to dest_path. Regular files and symlinks are supported. If an EnvironmentError occurs, then it is logged to stderr. @param src_st: source file lstat result @type src_st: posix.stat_result @param src_path: source file path @type src_path: str @param dest_path: destination file path @type dest_path: str """ # Remove destination file in order to ensure that the following # symlink or copy2 call won't fail (see bug #535850). try: os.unlink(dest_path) except OSError: pass try: if stat.S_ISLNK(src_st.st_mode): os.symlink(os.readlink(src_path), dest_path) else: shutil.copy2(src_path, dest_path) except EnvironmentError as e: portage.util.writemsg( _('dispatch-conf: Error copying %(src_path)s to ' '%(dest_path)s: %(reason)s\n') % { "src_path": src_path, "dest_path": dest_path, "reason": e }, noiselevel=-1)
def _prepare_fake_distdir(settings, alist): orig_distdir = settings["DISTDIR"] edpath = os.path.join(settings["PORTAGE_BUILDDIR"], "distdir") portage.util.ensure_dirs(edpath, gid=portage_gid, mode=0o755) # Remove any unexpected files or directories. for x in os.listdir(edpath): symlink_path = os.path.join(edpath, x) st = os.lstat(symlink_path) if x in alist and stat.S_ISLNK(st.st_mode): continue if stat.S_ISDIR(st.st_mode): shutil.rmtree(symlink_path) else: os.unlink(symlink_path) # Check for existing symlinks and recreate if necessary. for x in alist: symlink_path = os.path.join(edpath, x) target = os.path.join(orig_distdir, x) try: link_target = os.readlink(symlink_path) except OSError: os.symlink(target, symlink_path) else: if link_target != target: os.unlink(symlink_path) os.symlink(target, symlink_path)
def _archive_copy(src_st, src_path, dest_path): """ Copy file from src_path to dest_path. Regular files and symlinks are supported. If an EnvironmentError occurs, then it is logged to stderr. @param src_st: source file lstat result @type src_st: posix.stat_result @param src_path: source file path @type src_path: str @param dest_path: destination file path @type dest_path: str """ # Remove destination file in order to ensure that the following # symlink or copy2 call won't fail (see bug #535850). try: os.unlink(dest_path) except OSError: pass try: if stat.S_ISLNK(src_st.st_mode): os.symlink(os.readlink(src_path), dest_path) else: shutil.copy2(src_path, dest_path) except EnvironmentError as e: portage.util.writemsg( _("dispatch-conf: Error copying %(src_path)s to " "%(dest_path)s: %(reason)s\n") % { "src_path": src_path, "dest_path": dest_path, "reason": e }, noiselevel=-1, )
def test_gpkg_symlink_path(self): if sys.version_info.major < 3: self.skipTest("Not support Python 2") playground = ResolverPlayground( user_config={ "make.conf": ('BINPKG_COMPRESS="none"',), } ) tmpdir = tempfile.mkdtemp() try: settings = playground.settings orig_full_path = os.path.join(tmpdir, "orig/") os.makedirs(orig_full_path) os.symlink( "aaaabbbb/ccccdddd/eeeeffff/gggghhhh/iiiijjjj/kkkkllll/" "mmmmnnnn/oooopppp/qqqqrrrr/sssstttt/uuuuvvvv/wwwwxxxx/" "yyyyzzzz/00001111/22223333/44445555/66667777/88889999/test", os.path.join(orig_full_path, "a_long_symlink"), ) gpkg_file_loc = os.path.join(tmpdir, "test.gpkg.tar") test_gpkg = gpkg(settings, "test", gpkg_file_loc) check_result = test_gpkg._check_pre_image_files( os.path.join(tmpdir, "orig") ) self.assertEqual(check_result, (0, 14, 166, 0, 0)) test_gpkg.compress(os.path.join(tmpdir, "orig"), {"meta": "test"}) with open(gpkg_file_loc, "rb") as container: # container self.assertEqual( test_gpkg._get_tar_format(container), tarfile.USTAR_FORMAT ) with tarfile.open(gpkg_file_loc, "r") as container: metadata = io.BytesIO(container.extractfile("test/metadata.tar").read()) self.assertEqual( test_gpkg._get_tar_format(metadata), tarfile.USTAR_FORMAT ) metadata.close() image = io.BytesIO(container.extractfile("test/image.tar").read()) self.assertEqual(test_gpkg._get_tar_format(image), tarfile.GNU_FORMAT) image.close() test_gpkg.decompress(os.path.join(tmpdir, "test")) r = compare_files( os.path.join(tmpdir, "orig/", "a_long_symlink"), os.path.join(tmpdir, "test/", "a_long_symlink"), skipped_types=("atime", "mtime", "ctime"), ) self.assertEqual(r, ()) finally: shutil.rmtree(tmpdir) playground.cleanup()
def _prepare_fake_filesdir(settings): real_filesdir = settings["O"]+"/files" symlink_path = settings["FILESDIR"] try: link_target = os.readlink(symlink_path) except OSError: os.symlink(real_filesdir, symlink_path) else: if link_target != real_filesdir: os.unlink(symlink_path) os.symlink(real_filesdir, symlink_path)
def commit_update(self): update_location = self.current_update self._update_location = None try: snapshots = [int(name) for name in os.listdir(self._snapshots_dir)] except OSError: snapshots = [] portage.util.ensure_dirs(self._snapshots_dir) portage.util.apply_stat_permissions( self._snapshots_dir, os.stat(self._storage_location)) if snapshots: new_id = max(snapshots) + 1 else: new_id = 1 os.rename(update_location, os.path.join(self._snapshots_dir, str(new_id))) new_symlink = self._latest_symlink + '.new' try: os.unlink(new_symlink) except OSError: pass os.symlink('snapshots/{}'.format(new_id), new_symlink) # If SyncManager.pre_sync creates an empty directory where # self._latest_symlink is suppose to be (which is normal if # sync-rcu-store-dir has been removed), then we need to remove # the directory or else rename will raise IsADirectoryError # when we try to replace the directory with a symlink. try: os.rmdir(self._latest_symlink) except OSError: pass os.rename(new_symlink, self._latest_symlink) try: user_location_correct = os.path.samefile(self._user_location, self._latest_symlink) except OSError: user_location_correct = False if not user_location_correct: new_symlink = self._user_location + '.new' try: os.unlink(new_symlink) except OSError: pass os.symlink(self._latest_symlink, new_symlink) os.rename(new_symlink, self._user_location) coroutine_return() yield None
def modify_files(dir_path): for name in os.listdir(dir_path): path = os.path.join(dir_path, name) st = os.lstat(path) if stat.S_ISREG(st.st_mode): with io.open(path, mode='a', encoding=_encodings["stdio"]) as f: f.write("modified at %d\n" % time.time()) elif stat.S_ISLNK(st.st_mode): old_dest = os.readlink(path) os.unlink(path) os.symlink(old_dest + " modified at %d" % time.time(), path)
def _create_symlink(self, cpv): """Create a ${PKGDIR}/${CATEGORY}/${PF}.tbz2 symlink (and ${PKGDIR}/${CATEGORY} directory, if necessary). Any file that may exist in the location of the symlink will first be removed.""" mycat, mypkg = catsplit(cpv) full_path = os.path.join(self.pkgdir, mycat, mypkg + ".tbz2") self._ensure_dir(os.path.dirname(full_path)) try: os.unlink(full_path) except OSError as e: if e.errno != errno.ENOENT: raise del e os.symlink(os.path.join("..", "All", mypkg + ".tbz2"), full_path)
def _hardlink_atomic(self, src, dest, dir_info, symlink=False): head, tail = os.path.split(dest) hardlink_tmp = os.path.join( head, ".%s._mirrordist_hardlink_.%s" % (tail, portage.getpid())) try: try: if symlink: os.symlink(src, hardlink_tmp) else: os.link(src, hardlink_tmp) except OSError as e: if e.errno != errno.EXDEV: msg = "hardlink %s from %s failed: %s" % ( self.distfile, dir_info, e, ) self.scheduler.output(msg + "\n", background=True, log_path=self._log_path) logging.error(msg) return False try: os.rename(hardlink_tmp, dest) except OSError as e: msg = "hardlink rename '%s' from %s failed: %s" % ( self.distfile, dir_info, e, ) self.scheduler.output(msg + "\n", background=True, log_path=self._log_path) logging.error(msg) return False finally: try: os.unlink(hardlink_tmp) except OSError: pass return True
def commit_update(self): update_location = self.current_update self._update_location = None try: snapshots = [int(name) for name in os.listdir(self._snapshots_dir)] except OSError: snapshots = [] portage.util.ensure_dirs(self._snapshots_dir) portage.util.apply_stat_permissions( self._snapshots_dir, os.stat(self._storage_location)) if snapshots: new_id = max(snapshots) + 1 else: new_id = 1 os.rename(update_location, os.path.join(self._snapshots_dir, str(new_id))) new_symlink = self._latest_symlink + '.new' try: os.unlink(new_symlink) except OSError: pass os.symlink('snapshots/{}'.format(new_id), new_symlink) os.rename(new_symlink, self._latest_symlink) try: user_location_correct = os.path.samefile(self._user_location, self._latest_symlink) except OSError: user_location_correct = False if not user_location_correct: new_symlink = self._user_location + '.new' try: os.unlink(new_symlink) except OSError: pass os.symlink(self._latest_symlink, new_symlink) os.rename(new_symlink, self._user_location) coroutine_return() yield None
def commit_update(self): update_location = self.current_update self._update_location = None try: snapshots = [int(name) for name in os.listdir(self._snapshots_dir)] except OSError: snapshots = [] portage.util.ensure_dirs(self._snapshots_dir) portage.util.apply_stat_permissions(self._snapshots_dir, os.stat(self._storage_location)) if snapshots: new_id = max(snapshots) + 1 else: new_id = 1 os.rename(update_location, os.path.join(self._snapshots_dir, str(new_id))) new_symlink = self._latest_symlink + '.new' try: os.unlink(new_symlink) except OSError: pass os.symlink('snapshots/{}'.format(new_id), new_symlink) os.rename(new_symlink, self._latest_symlink) try: user_location_correct = os.path.samefile(self._user_location, self._latest_symlink) except OSError: user_location_correct = False if not user_location_correct: new_symlink = self._user_location + '.new' try: os.unlink(new_symlink) except OSError: pass os.symlink(self._latest_symlink, new_symlink) os.rename(new_symlink, self._user_location) coroutine_return() yield None
def testSymlinkDir(self): """ Test that masked symlinks to directories are removed. """ tmp_dir = tempfile.mkdtemp() try: base_dir = os.path.join(tmp_dir, "foo") target_dir = os.path.join(tmp_dir, "foo", "bar") link_name = os.path.join(tmp_dir, "foo", "baz") os.mkdir(base_dir) os.mkdir(target_dir) os.symlink(target_dir, link_name) install_mask = InstallMask("/foo/") install_mask_dir(tmp_dir, install_mask) self.assertFalse(os.path.lexists(link_name), "failed to remove {}".format(link_name)) self.assertFalse(os.path.lexists(base_dir), "failed to remove {}".format(base_dir)) finally: shutil.rmtree(tmp_dir)
def testSimple(self): debug = False skip_reason = self._must_skip() if skip_reason: self.portage_skip = skip_reason self.assertFalse(True, skip_reason) return copyright_header = """# Copyright 1999-%s Gentoo Foundation # Distributed under the terms of the GNU General Public License v2 # $Header: $ """ % time.gmtime().tm_year repo_configs = { "test_repo": { "layout.conf": ("update-changelog = true", ), } } profiles = ( ("x86", "default/linux/x86/test_profile", "stable"), ("x86", "default/linux/x86/test_dev", "dev"), ("x86", "default/linux/x86/test_exp", "exp"), ) profile = { "eapi": ("5", ), "package.use.stable.mask": ("dev-libs/A flag", ) } ebuilds = { "dev-libs/A-0": { "COPYRIGHT_HEADER": copyright_header, "DESCRIPTION": "Desc goes here", "EAPI": "5", "HOMEPAGE": "https://example.com", "IUSE": "flag", "KEYWORDS": "x86", "LICENSE": "GPL-2", "RDEPEND": "flag? ( dev-libs/B[flag] )", }, "dev-libs/A-1": { "COPYRIGHT_HEADER": copyright_header, "DESCRIPTION": "Desc goes here", "EAPI": "4", "HOMEPAGE": "https://example.com", "IUSE": "flag", "KEYWORDS": "~x86", "LICENSE": "GPL-2", "RDEPEND": "flag? ( dev-libs/B[flag] )", }, "dev-libs/B-1": { "COPYRIGHT_HEADER": copyright_header, "DESCRIPTION": "Desc goes here", "EAPI": "4", "HOMEPAGE": "https://example.com", "IUSE": "flag", "KEYWORDS": "~x86", "LICENSE": "GPL-2", }, "dev-libs/C-0": { "COPYRIGHT_HEADER": copyright_header, "DESCRIPTION": "Desc goes here", "EAPI": "4", "HOMEPAGE": "https://example.com", "IUSE": "flag", # must be unstable, since dev-libs/A[flag] is stable masked "KEYWORDS": "~x86", "LICENSE": "GPL-2", "RDEPEND": "flag? ( dev-libs/A[flag] )", }, } licenses = ["GPL-2"] arch_list = ["x86"] metadata_xsd = os.path.join(REPOMAN_BASE_PATH, "cnf/metadata.xsd") metadata_xml_files = ( ( "dev-libs/A", { "flags": "<flag name='flag' restrict='>=dev-libs/A-0'>Description of how USE='flag' affects this package</flag>", }, ), ( "dev-libs/B", { "flags": "<flag name='flag'>Description of how USE='flag' affects this package</flag>", }, ), ( "dev-libs/C", { "flags": "<flag name='flag'>Description of how USE='flag' affects this package</flag>", }, ), ) use_desc = (("flag", "Description of how USE='flag' affects packages"), ) playground = ResolverPlayground(ebuilds=ebuilds, profile=profile, repo_configs=repo_configs, debug=debug) settings = playground.settings eprefix = settings["EPREFIX"] eroot = settings["EROOT"] portdb = playground.trees[playground.eroot]["porttree"].dbapi homedir = os.path.join(eroot, "home") distdir = os.path.join(eprefix, "distdir") test_repo_location = settings.repositories["test_repo"].location profiles_dir = os.path.join(test_repo_location, "profiles") license_dir = os.path.join(test_repo_location, "licenses") repoman_cmd = (portage._python_interpreter, "-b", "-Wd", os.path.join(self.bindir, "repoman")) git_binary = find_binary("git") git_cmd = (git_binary, ) cp_binary = find_binary("cp") self.assertEqual(cp_binary is None, False, "cp command not found") cp_cmd = (cp_binary, ) test_ebuild = portdb.findname("dev-libs/A-1") self.assertFalse(test_ebuild is None) committer_name = "Gentoo Dev" committer_email = "*****@*****.**" git_test = ( ("", repoman_cmd + ("manifest", )), ("", git_cmd + ( "config", "--global", "user.name", committer_name, )), ("", git_cmd + ( "config", "--global", "user.email", committer_email, )), ("", git_cmd + ("init-db", )), ("", git_cmd + ("add", ".")), ("", git_cmd + ("commit", "-a", "-m", "add whole repo")), ("", repoman_cmd + ("full", "-d")), ("", repoman_cmd + ("full", "--include-profiles", "default/linux/x86/test_profile")), ("", cp_cmd + (test_ebuild, test_ebuild[:-8] + "2.ebuild")), ("", git_cmd + ("add", test_ebuild[:-8] + "2.ebuild")), ("", repoman_cmd + ("commit", "-m", "cat/pkg: bump to version 2")), ("", cp_cmd + (test_ebuild, test_ebuild[:-8] + "3.ebuild")), ("", git_cmd + ("add", test_ebuild[:-8] + "3.ebuild")), ("dev-libs", repoman_cmd + ("commit", "-m", "cat/pkg: bump to version 3")), ("", cp_cmd + (test_ebuild, test_ebuild[:-8] + "4.ebuild")), ("", git_cmd + ("add", test_ebuild[:-8] + "4.ebuild")), ("dev-libs/A", repoman_cmd + ("commit", "-m", "cat/pkg: bump to version 4")), ) env = { "PORTAGE_OVERRIDE_EPREFIX": eprefix, "DISTDIR": distdir, "GENTOO_COMMITTER_NAME": committer_name, "GENTOO_COMMITTER_EMAIL": committer_email, "HOME": homedir, "PATH": os.environ["PATH"], "PORTAGE_GRPNAME": os.environ["PORTAGE_GRPNAME"], "PORTAGE_USERNAME": os.environ["PORTAGE_USERNAME"], "PORTAGE_REPOSITORIES": settings.repositories.config_string(), "PYTHONDONTWRITEBYTECODE": os.environ.get("PYTHONDONTWRITEBYTECODE", ""), } if os.environ.get("SANDBOX_ON") == "1": # avoid problems from nested sandbox instances env["FEATURES"] = "-sandbox -usersandbox" dirs = [homedir, license_dir, profiles_dir, distdir] try: for d in dirs: ensure_dirs(d) with open(os.path.join(test_repo_location, "skel.ChangeLog"), 'w') as f: f.write(copyright_header) with open(os.path.join(profiles_dir, "profiles.desc"), 'w') as f: for x in profiles: f.write("%s %s %s\n" % x) # ResolverPlayground only created the first profile, # so create the remaining ones. for x in profiles[1:]: sub_profile_dir = os.path.join(profiles_dir, x[1]) ensure_dirs(sub_profile_dir) for config_file, lines in profile.items(): file_name = os.path.join(sub_profile_dir, config_file) with open(file_name, "w") as f: for line in lines: f.write("%s\n" % line) for x in licenses: open(os.path.join(license_dir, x), 'wb').close() with open(os.path.join(profiles_dir, "arch.list"), 'w') as f: for x in arch_list: f.write("%s\n" % x) with open(os.path.join(profiles_dir, "use.desc"), 'w') as f: for k, v in use_desc: f.write("%s - %s\n" % (k, v)) for cp, xml_data in metadata_xml_files: with open(os.path.join(test_repo_location, cp, "metadata.xml"), 'w') as f: f.write(playground.metadata_xml_template % xml_data) # Use a symlink to test_repo, in order to trigger bugs # involving canonical vs. non-canonical paths. test_repo_symlink = os.path.join(eroot, "test_repo_symlink") os.symlink(test_repo_location, test_repo_symlink) metadata_xsd_dest = os.path.join( test_repo_location, 'metadata/xml-schema/metadata.xsd') os.makedirs(os.path.dirname(metadata_xsd_dest)) os.symlink(metadata_xsd, metadata_xsd_dest) if debug: # The subprocess inherits both stdout and stderr, for # debugging purposes. stdout = None else: # The subprocess inherits stderr so that any warnings # triggered by python -Wd will be visible. stdout = subprocess.PIPE for cwd in ("", "dev-libs", "dev-libs/A", "dev-libs/B"): abs_cwd = os.path.join(test_repo_symlink, cwd) proc = subprocess.Popen(repoman_cmd + ("full", ), cwd=abs_cwd, env=env, stdout=stdout) if debug: proc.wait() else: output = proc.stdout.readlines() proc.wait() proc.stdout.close() if proc.returncode != os.EX_OK: for line in output: sys.stderr.write(_unicode_decode(line)) self.assertEqual(os.EX_OK, proc.returncode, "repoman failed in %s" % (cwd, )) if git_binary is not None: for cwd, cmd in git_test: abs_cwd = os.path.join(test_repo_symlink, cwd) proc = subprocess.Popen(cmd, cwd=abs_cwd, env=env, stdout=stdout) if debug: proc.wait() else: output = proc.stdout.readlines() proc.wait() proc.stdout.close() if proc.returncode != os.EX_OK: for line in output: sys.stderr.write(_unicode_decode(line)) self.assertEqual(os.EX_OK, proc.returncode, "%s failed in %s" % ( cmd, cwd, )) finally: playground.cleanup()
def testDoebuild(self): """ Invoke portage.doebuild() with the fd_pipes parameter, and check that the expected output appears in the pipe. This functionality is not used by portage internally, but it is supported for API consumers (see bug #475812). """ output_fd = 200 ebuild_body = ["S=${WORKDIR}"] for phase_func in ( "pkg_info", "pkg_nofetch", "pkg_pretend", "pkg_setup", "src_unpack", "src_prepare", "src_configure", "src_compile", "src_test", "src_install", ): ebuild_body.append(("%s() { echo ${EBUILD_PHASE}" " 1>&%s; }") % (phase_func, output_fd)) ebuild_body.append("") ebuild_body = "\n".join(ebuild_body) ebuilds = { "app-misct/foo-1": { "EAPI": "5", "MISC_CONTENT": ebuild_body, } } # Override things that may be unavailable, or may have portability # issues when running tests in exotic environments. # prepstrip - bug #447810 (bash read builtin EINTR problem) true_symlinks = ("find", "prepstrip", "sed", "scanelf") true_binary = portage.process.find_binary("true") self.assertEqual(true_binary is None, False, "true command not found") dev_null = open(os.devnull, "wb") playground = ResolverPlayground(ebuilds=ebuilds) try: QueryCommand._db = playground.trees root_config = playground.trees[playground.eroot]["root_config"] portdb = root_config.trees["porttree"].dbapi settings = portage.config(clone=playground.settings) if "__PORTAGE_TEST_HARDLINK_LOCKS" in os.environ: settings["__PORTAGE_TEST_HARDLINK_LOCKS"] = os.environ[ "__PORTAGE_TEST_HARDLINK_LOCKS"] settings.backup_changes("__PORTAGE_TEST_HARDLINK_LOCKS") settings.features.add("noauto") settings.features.add("test") settings["PORTAGE_PYTHON"] = portage._python_interpreter settings["PORTAGE_QUIET"] = "1" settings["PYTHONDONTWRITEBYTECODE"] = os.environ.get( "PYTHONDONTWRITEBYTECODE", "") fake_bin = os.path.join(settings["EPREFIX"], "bin") portage.util.ensure_dirs(fake_bin) for x in true_symlinks: os.symlink(true_binary, os.path.join(fake_bin, x)) settings["__PORTAGE_TEST_PATH_OVERRIDE"] = fake_bin settings.backup_changes("__PORTAGE_TEST_PATH_OVERRIDE") cpv = "app-misct/foo-1" metadata = dict( zip(Package.metadata_keys, portdb.aux_get(cpv, Package.metadata_keys))) pkg = Package( built=False, cpv=cpv, installed=False, metadata=metadata, root_config=root_config, type_name="ebuild", ) settings.setcpv(pkg) ebuildpath = portdb.findname(cpv) self.assertNotEqual(ebuildpath, None) for phase in ( "info", "nofetch", "pretend", "setup", "unpack", "prepare", "configure", "compile", "test", "install", "qmerge", "clean", "merge", ): pr, pw = os.pipe() producer = DoebuildProcess( doebuild_pargs=(ebuildpath, phase), doebuild_kwargs={ "settings": settings, "mydbapi": portdb, "tree": "porttree", "vartree": root_config.trees["vartree"], "fd_pipes": { 1: dev_null.fileno(), 2: dev_null.fileno(), output_fd: pw, }, "prev_mtimes": {}, }, ) consumer = PipeReader(input_files={"producer": pr}) task_scheduler = TaskScheduler(iter([producer, consumer]), max_jobs=2) try: task_scheduler.start() finally: # PipeReader closes pr os.close(pw) task_scheduler.wait() output = portage._unicode_decode( consumer.getvalue()).rstrip("\n") if task_scheduler.returncode != os.EX_OK: portage.writemsg(output, noiselevel=-1) self.assertEqual(task_scheduler.returncode, os.EX_OK) if phase not in ("clean", "merge", "qmerge"): self.assertEqual(phase, output) finally: dev_null.close() playground.cleanup() QueryCommand._db = None
def _create_profile(self, ebuilds, installed, profile, repo_configs, user_config, sets): user_config_dir = os.path.join(self.eroot, USER_CONFIG_PATH) try: os.makedirs(user_config_dir) except os.error: pass for repo in self._repositories: if repo == "DEFAULT": continue repo_dir = self._get_repo_dir(repo) profile_dir = os.path.join(repo_dir, "profiles") metadata_dir = os.path.join(repo_dir, "metadata") os.makedirs(metadata_dir) #Create $REPO/profiles/categories categories = set() for cpv in ebuilds: ebuilds_repo = Atom("="+cpv, allow_repo=True).repo if ebuilds_repo is None: ebuilds_repo = "test_repo" if ebuilds_repo == repo: categories.add(catsplit(cpv)[0]) categories_file = os.path.join(profile_dir, "categories") with open(categories_file, "w") as f: for cat in categories: f.write(cat + "\n") #Create $REPO/profiles/license_groups license_file = os.path.join(profile_dir, "license_groups") with open(license_file, "w") as f: f.write("EULA TEST\n") repo_config = repo_configs.get(repo) if repo_config: for config_file, lines in repo_config.items(): if config_file not in self.config_files and not any(fnmatch.fnmatch(config_file, os.path.join(x, "*")) for x in self.config_files): raise ValueError("Unknown config file: '%s'" % config_file) if config_file in ("layout.conf",): file_name = os.path.join(repo_dir, "metadata", config_file) else: file_name = os.path.join(profile_dir, config_file) if "/" in config_file and not os.path.isdir(os.path.dirname(file_name)): os.makedirs(os.path.dirname(file_name)) with open(file_name, "w") as f: for line in lines: f.write("%s\n" % line) # Temporarily write empty value of masters until it becomes default. # TODO: Delete all references to "# use implicit masters" when empty value becomes default. if config_file == "layout.conf" and not any(line.startswith(("masters =", "# use implicit masters")) for line in lines): f.write("masters =\n") #Create $profile_dir/eclass (we fail to digest the ebuilds if it's not there) os.makedirs(os.path.join(repo_dir, "eclass")) # Temporarily write empty value of masters until it becomes default. if not repo_config or "layout.conf" not in repo_config: layout_conf_path = os.path.join(repo_dir, "metadata", "layout.conf") with open(layout_conf_path, "w") as f: f.write("masters =\n") if repo == "test_repo": #Create a minimal profile in /usr/portage sub_profile_dir = os.path.join(profile_dir, "default", "linux", "x86", "test_profile") os.makedirs(sub_profile_dir) if not (profile and "eapi" in profile): eapi_file = os.path.join(sub_profile_dir, "eapi") with open(eapi_file, "w") as f: f.write("0\n") make_defaults_file = os.path.join(sub_profile_dir, "make.defaults") with open(make_defaults_file, "w") as f: f.write("ARCH=\"x86\"\n") f.write("ACCEPT_KEYWORDS=\"x86\"\n") use_force_file = os.path.join(sub_profile_dir, "use.force") with open(use_force_file, "w") as f: f.write("x86\n") parent_file = os.path.join(sub_profile_dir, "parent") with open(parent_file, "w") as f: f.write("..\n") if profile: for config_file, lines in profile.items(): if config_file not in self.config_files: raise ValueError("Unknown config file: '%s'" % config_file) file_name = os.path.join(sub_profile_dir, config_file) with open(file_name, "w") as f: for line in lines: f.write("%s\n" % line) #Create profile symlink os.symlink(sub_profile_dir, os.path.join(user_config_dir, "make.profile")) make_conf = { "ACCEPT_KEYWORDS": "x86", "CLEAN_DELAY": "0", "DISTDIR" : self.distdir, "EMERGE_WARNING_DELAY": "0", "PKGDIR": self.pkgdir, "PORTAGE_INST_GID": str(portage.data.portage_gid), "PORTAGE_INST_UID": str(portage.data.portage_uid), "PORTAGE_TMPDIR": os.path.join(self.eroot, 'var/tmp'), } if os.environ.get("NOCOLOR"): make_conf["NOCOLOR"] = os.environ["NOCOLOR"] # Pass along PORTAGE_USERNAME and PORTAGE_GRPNAME since they # need to be inherited by ebuild subprocesses. if 'PORTAGE_USERNAME' in os.environ: make_conf['PORTAGE_USERNAME'] = os.environ['PORTAGE_USERNAME'] if 'PORTAGE_GRPNAME' in os.environ: make_conf['PORTAGE_GRPNAME'] = os.environ['PORTAGE_GRPNAME'] make_conf_lines = [] for k_v in make_conf.items(): make_conf_lines.append('%s="%s"' % k_v) if "make.conf" in user_config: make_conf_lines.extend(user_config["make.conf"]) if not portage.process.sandbox_capable or \ os.environ.get("SANDBOX_ON") == "1": # avoid problems from nested sandbox instances make_conf_lines.append('FEATURES="${FEATURES} -sandbox -usersandbox"') configs = user_config.copy() configs["make.conf"] = make_conf_lines for config_file, lines in configs.items(): if config_file not in self.config_files: raise ValueError("Unknown config file: '%s'" % config_file) file_name = os.path.join(user_config_dir, config_file) with open(file_name, "w") as f: for line in lines: f.write("%s\n" % line) #Create /usr/share/portage/config/make.globals make_globals_path = os.path.join(self.eroot, GLOBAL_CONFIG_PATH.lstrip(os.sep), "make.globals") ensure_dirs(os.path.dirname(make_globals_path)) os.symlink(os.path.join(cnf_path, "make.globals"), make_globals_path) #Create /usr/share/portage/config/sets/portage.conf default_sets_conf_dir = os.path.join(self.eroot, "usr/share/portage/config/sets") try: os.makedirs(default_sets_conf_dir) except os.error: pass provided_sets_portage_conf = ( os.path.join(cnf_path, "sets", "portage.conf")) os.symlink(provided_sets_portage_conf, os.path.join(default_sets_conf_dir, "portage.conf")) set_config_dir = os.path.join(user_config_dir, "sets") try: os.makedirs(set_config_dir) except os.error: pass for sets_file, lines in sets.items(): file_name = os.path.join(set_config_dir, sets_file) with open(file_name, "w") as f: for line in lines: f.write("%s\n" % line)
def testConfigProtect(self): """ Demonstrates many different scenarios. For example: * regular file replaces regular file * regular file replaces symlink * regular file replaces directory * symlink replaces symlink * symlink replaces regular file * symlink replaces directory * directory replaces regular file * directory replaces symlink """ debug = False content_A_1 = """ S="${WORKDIR}" src_install() { insinto /etc/A keepdir /etc/A/dir_a keepdir /etc/A/symlink_replaces_dir keepdir /etc/A/regular_replaces_dir echo regular_a_1 > "${T}"/regular_a doins "${T}"/regular_a echo regular_b_1 > "${T}"/regular_b doins "${T}"/regular_b dosym regular_a /etc/A/regular_replaces_symlink dosym regular_b /etc/A/symlink_replaces_symlink echo regular_replaces_regular_1 > \ "${T}"/regular_replaces_regular doins "${T}"/regular_replaces_regular echo symlink_replaces_regular > \ "${T}"/symlink_replaces_regular doins "${T}"/symlink_replaces_regular } """ content_A_2 = """ S="${WORKDIR}" src_install() { insinto /etc/A keepdir /etc/A/dir_a dosym dir_a /etc/A/symlink_replaces_dir echo regular_replaces_dir > "${T}"/regular_replaces_dir doins "${T}"/regular_replaces_dir echo regular_a_2 > "${T}"/regular_a doins "${T}"/regular_a echo regular_b_2 > "${T}"/regular_b doins "${T}"/regular_b echo regular_replaces_symlink > \ "${T}"/regular_replaces_symlink doins "${T}"/regular_replaces_symlink dosym regular_b /etc/A/symlink_replaces_symlink echo regular_replaces_regular_2 > \ "${T}"/regular_replaces_regular doins "${T}"/regular_replaces_regular dosym regular_a /etc/A/symlink_replaces_regular } """ ebuilds = { "dev-libs/A-1": { "EAPI": "5", "IUSE": "+flag", "KEYWORDS": "x86", "LICENSE": "GPL-2", "MISC_CONTENT": content_A_1, }, "dev-libs/A-2": { "EAPI": "5", "IUSE": "+flag", "KEYWORDS": "x86", "LICENSE": "GPL-2", "MISC_CONTENT": content_A_2, }, } playground = ResolverPlayground(ebuilds=ebuilds, debug=debug) settings = playground.settings eprefix = settings["EPREFIX"] eroot = settings["EROOT"] var_cache_edb = os.path.join(eprefix, "var", "cache", "edb") portage_python = portage._python_interpreter dispatch_conf_cmd = (portage_python, "-b", "-Wd", os.path.join(self.sbindir, "dispatch-conf")) emerge_cmd = (portage_python, "-b", "-Wd", os.path.join(self.bindir, "emerge")) etc_update_cmd = (BASH_BINARY, os.path.join(self.sbindir, "etc-update")) etc_update_auto = etc_update_cmd + ( "--automode", "-5", ) config_protect = "/etc" def modify_files(dir_path): for name in os.listdir(dir_path): path = os.path.join(dir_path, name) st = os.lstat(path) if stat.S_ISREG(st.st_mode): with io.open(path, mode='a', encoding=_encodings["stdio"]) as f: f.write("modified at %d\n" % time.time()) elif stat.S_ISLNK(st.st_mode): old_dest = os.readlink(path) os.unlink(path) os.symlink(old_dest + " modified at %d" % time.time(), path) def updated_config_files(count): self.assertEqual( count, sum( len(x[1]) for x in find_updated_config_files( eroot, shlex_split(config_protect)))) test_commands = ( etc_update_cmd, dispatch_conf_cmd, emerge_cmd + ("-1", "=dev-libs/A-1"), partial(updated_config_files, 0), emerge_cmd + ("-1", "=dev-libs/A-2"), partial(updated_config_files, 2), etc_update_auto, partial(updated_config_files, 0), emerge_cmd + ("-1", "=dev-libs/A-2"), partial(updated_config_files, 0), # Test bug #523684, where a file renamed or removed by the # admin forces replacement files to be merged with config # protection. partial(shutil.rmtree, os.path.join(eprefix, "etc", "A")), emerge_cmd + ("-1", "=dev-libs/A-2"), partial(updated_config_files, 8), etc_update_auto, partial(updated_config_files, 0), # Modify some config files, and verify that it triggers # config protection. partial(modify_files, os.path.join(eroot, "etc", "A")), emerge_cmd + ("-1", "=dev-libs/A-2"), partial(updated_config_files, 6), etc_update_auto, partial(updated_config_files, 0), # Modify some config files, downgrade to A-1, and verify # that config protection works properly when the file # types are changing. partial(modify_files, os.path.join(eroot, "etc", "A")), emerge_cmd + ("-1", "--noconfmem", "=dev-libs/A-1"), partial(updated_config_files, 6), etc_update_auto, partial(updated_config_files, 0), ) distdir = playground.distdir fake_bin = os.path.join(eprefix, "bin") portage_tmpdir = os.path.join(eprefix, "var", "tmp", "portage") path = os.environ.get("PATH") if path is not None and not path.strip(): path = None if path is None: path = "" else: path = ":" + path path = fake_bin + path pythonpath = os.environ.get("PYTHONPATH") if pythonpath is not None and not pythonpath.strip(): pythonpath = None if pythonpath is not None and \ pythonpath.split(":")[0] == PORTAGE_PYM_PATH: pass else: if pythonpath is None: pythonpath = "" else: pythonpath = ":" + pythonpath pythonpath = PORTAGE_PYM_PATH + pythonpath env = { "PORTAGE_OVERRIDE_EPREFIX": eprefix, "CLEAN_DELAY": "0", "CONFIG_PROTECT": config_protect, "DISTDIR": distdir, "EMERGE_DEFAULT_OPTS": "-v", "EMERGE_WARNING_DELAY": "0", "INFODIR": "", "INFOPATH": "", "PATH": path, "PORTAGE_INST_GID": str(portage.data.portage_gid), "PORTAGE_INST_UID": str(portage.data.portage_uid), "PORTAGE_PYTHON": portage_python, "PORTAGE_REPOSITORIES": settings.repositories.config_string(), "PORTAGE_TMPDIR": portage_tmpdir, "PYTHONDONTWRITEBYTECODE": os.environ.get("PYTHONDONTWRITEBYTECODE", ""), "PYTHONPATH": pythonpath, "__PORTAGE_TEST_PATH_OVERRIDE": fake_bin, } if "__PORTAGE_TEST_HARDLINK_LOCKS" in os.environ: env["__PORTAGE_TEST_HARDLINK_LOCKS"] = \ os.environ["__PORTAGE_TEST_HARDLINK_LOCKS"] dirs = [distdir, fake_bin, portage_tmpdir, var_cache_edb] etc_symlinks = ("dispatch-conf.conf", "etc-update.conf") # Override things that may be unavailable, or may have portability # issues when running tests in exotic environments. # prepstrip - bug #447810 (bash read builtin EINTR problem) true_symlinks = ["prepstrip", "scanelf"] true_binary = find_binary("true") self.assertEqual(true_binary is None, False, "true command not found") try: for d in dirs: ensure_dirs(d) for x in true_symlinks: os.symlink(true_binary, os.path.join(fake_bin, x)) for x in etc_symlinks: os.symlink(os.path.join(self.cnf_etc_path, x), os.path.join(eprefix, "etc", x)) with open(os.path.join(var_cache_edb, "counter"), 'wb') as f: f.write(b"100") if debug: # The subprocess inherits both stdout and stderr, for # debugging purposes. stdout = None else: # The subprocess inherits stderr so that any warnings # triggered by python -Wd will be visible. stdout = subprocess.PIPE for args in test_commands: if hasattr(args, '__call__'): args() continue if isinstance(args[0], dict): local_env = env.copy() local_env.update(args[0]) args = args[1:] else: local_env = env proc = subprocess.Popen(args, env=local_env, stdout=stdout) if debug: proc.wait() else: output = proc.stdout.readlines() proc.wait() proc.stdout.close() if proc.returncode != os.EX_OK: for line in output: sys.stderr.write(_unicode_decode(line)) self.assertEqual(os.EX_OK, proc.returncode, "emerge failed with args %s" % (args, )) finally: playground.cleanup()
def fetch(myuris, mysettings, listonly=0, fetchonly=0, locks_in_subdir=".locks", use_locks=1, try_mirrors=1, digests=None, allow_missing_digests=True, force=False): """ Fetch files to DISTDIR and also verify digests if they are available. @param myuris: Maps each file name to a tuple of available fetch URIs. @type myuris: dict @param mysettings: Portage config instance. @type mysettings: portage.config @param listonly: Only print URIs and do not actually fetch them. @type listonly: bool @param fetchonly: Do not block for files that are locked by a concurrent fetcher process. This means that the function can return successfully *before* all files have been successfully fetched! @type fetchonly: bool @param use_locks: Enable locks. This parameter is ineffective if FEATURES=distlocks is disabled in the portage config! @type use_locks: bool @param digests: Maps each file name to a dict of digest types and values. @type digests: dict @param allow_missing_digests: Enable fetch even if there are no digests available for verification. @type allow_missing_digests: bool @param force: Force download, even when a file already exists in DISTDIR. This is most useful when there are no digests available, since otherwise download will be automatically forced if the existing file does not match the available digests. Also, this avoids the need to remove the existing file in advance, which makes it possible to atomically replace the file and avoid interference with concurrent processes. @type force: bool @rtype: int @return: 1 if successful, 0 otherwise. """ if force and digests: # Since the force parameter can trigger unnecessary fetch when the # digests match, do not allow force=True when digests are provided. raise PortageException(_('fetch: force=True is not allowed when digests are provided')) if not myuris: return 1 features = mysettings.features restrict = mysettings.get("PORTAGE_RESTRICT","").split() userfetch = portage.data.secpass >= 2 and "userfetch" in features # 'nomirror' is bad/negative logic. You Restrict mirroring, not no-mirroring. restrict_mirror = "mirror" in restrict or "nomirror" in restrict if restrict_mirror: if ("mirror" in features) and ("lmirror" not in features): # lmirror should allow you to bypass mirror restrictions. # XXX: This is not a good thing, and is temporary at best. print(_(">>> \"mirror\" mode desired and \"mirror\" restriction found; skipping fetch.")) return 1 # Generally, downloading the same file repeatedly from # every single available mirror is a waste of bandwidth # and time, so there needs to be a cap. checksum_failure_max_tries = 5 v = checksum_failure_max_tries try: v = int(mysettings.get("PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS", checksum_failure_max_tries)) except (ValueError, OverflowError): writemsg(_("!!! Variable PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS" " contains non-integer value: '%s'\n") % \ mysettings["PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS"], noiselevel=-1) writemsg(_("!!! Using PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS " "default value: %s\n") % checksum_failure_max_tries, noiselevel=-1) v = checksum_failure_max_tries if v < 1: writemsg(_("!!! Variable PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS" " contains value less than 1: '%s'\n") % v, noiselevel=-1) writemsg(_("!!! Using PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS " "default value: %s\n") % checksum_failure_max_tries, noiselevel=-1) v = checksum_failure_max_tries checksum_failure_max_tries = v del v fetch_resume_size_default = "350K" fetch_resume_size = mysettings.get("PORTAGE_FETCH_RESUME_MIN_SIZE") if fetch_resume_size is not None: fetch_resume_size = "".join(fetch_resume_size.split()) if not fetch_resume_size: # If it's undefined or empty, silently use the default. fetch_resume_size = fetch_resume_size_default match = _fetch_resume_size_re.match(fetch_resume_size) if match is None or \ (match.group(2).upper() not in _size_suffix_map): writemsg(_("!!! Variable PORTAGE_FETCH_RESUME_MIN_SIZE" " contains an unrecognized format: '%s'\n") % \ mysettings["PORTAGE_FETCH_RESUME_MIN_SIZE"], noiselevel=-1) writemsg(_("!!! Using PORTAGE_FETCH_RESUME_MIN_SIZE " "default value: %s\n") % fetch_resume_size_default, noiselevel=-1) fetch_resume_size = None if fetch_resume_size is None: fetch_resume_size = fetch_resume_size_default match = _fetch_resume_size_re.match(fetch_resume_size) fetch_resume_size = int(match.group(1)) * \ 2 ** _size_suffix_map[match.group(2).upper()] # Behave like the package has RESTRICT="primaryuri" after a # couple of checksum failures, to increase the probablility # of success before checksum_failure_max_tries is reached. checksum_failure_primaryuri = 2 thirdpartymirrors = mysettings.thirdpartymirrors() # In the background parallel-fetch process, it's safe to skip checksum # verification of pre-existing files in $DISTDIR that have the correct # file size. The parent process will verify their checksums prior to # the unpack phase. parallel_fetchonly = "PORTAGE_PARALLEL_FETCHONLY" in mysettings if parallel_fetchonly: fetchonly = 1 check_config_instance(mysettings) custommirrors = grabdict(os.path.join(mysettings["PORTAGE_CONFIGROOT"], CUSTOM_MIRRORS_FILE), recursive=1) mymirrors=[] if listonly or ("distlocks" not in features): use_locks = 0 distdir_writable = os.access(mysettings["DISTDIR"], os.W_OK) fetch_to_ro = 0 if "skiprocheck" in features: fetch_to_ro = 1 if not distdir_writable and fetch_to_ro: if use_locks: writemsg(colorize("BAD", _("!!! For fetching to a read-only filesystem, " "locking should be turned off.\n")), noiselevel=-1) writemsg(_("!!! This can be done by adding -distlocks to " "FEATURES in /etc/portage/make.conf\n"), noiselevel=-1) # use_locks = 0 # local mirrors are always added if try_mirrors and "local" in custommirrors: mymirrors += custommirrors["local"] if restrict_mirror: # We don't add any mirrors. pass else: if try_mirrors: mymirrors += [x.rstrip("/") for x in mysettings["GENTOO_MIRRORS"].split() if x] hash_filter = _hash_filter(mysettings.get("PORTAGE_CHECKSUM_FILTER", "")) if hash_filter.transparent: hash_filter = None skip_manifest = mysettings.get("EBUILD_SKIP_MANIFEST") == "1" if skip_manifest: allow_missing_digests = True pkgdir = mysettings.get("O") if digests is None and not (pkgdir is None or skip_manifest): mydigests = mysettings.repositories.get_repo_for_location( os.path.dirname(os.path.dirname(pkgdir))).load_manifest( pkgdir, mysettings["DISTDIR"]).getTypeDigests("DIST") elif digests is None or skip_manifest: # no digests because fetch was not called for a specific package mydigests = {} else: mydigests = digests ro_distdirs = [x for x in \ shlex_split(mysettings.get("PORTAGE_RO_DISTDIRS", "")) \ if os.path.isdir(x)] fsmirrors = [] for x in range(len(mymirrors)-1,-1,-1): if mymirrors[x] and mymirrors[x][0]=='/': fsmirrors += [mymirrors[x]] del mymirrors[x] restrict_fetch = "fetch" in restrict force_mirror = "force-mirror" in features and not restrict_mirror custom_local_mirrors = custommirrors.get("local", []) if restrict_fetch: # With fetch restriction, a normal uri may only be fetched from # custom local mirrors (if available). A mirror:// uri may also # be fetched from specific mirrors (effectively overriding fetch # restriction, but only for specific mirrors). locations = custom_local_mirrors else: locations = mymirrors file_uri_tuples = [] # Check for 'items' attribute since OrderedDict is not a dict. if hasattr(myuris, 'items'): for myfile, uri_set in myuris.items(): for myuri in uri_set: file_uri_tuples.append((myfile, myuri)) if not uri_set: file_uri_tuples.append((myfile, None)) else: for myuri in myuris: if urlparse(myuri).scheme: file_uri_tuples.append((os.path.basename(myuri), myuri)) else: file_uri_tuples.append((os.path.basename(myuri), None)) filedict = OrderedDict() primaryuri_dict = {} thirdpartymirror_uris = {} for myfile, myuri in file_uri_tuples: if myfile not in filedict: filedict[myfile]=[] if distdir_writable: mirror_cache = os.path.join(mysettings["DISTDIR"], ".mirror-cache.json") else: mirror_cache = None for l in locations: filedict[myfile].append(functools.partial( get_mirror_url, l, myfile, mysettings, mirror_cache)) if myuri is None: continue if myuri[:9]=="mirror://": eidx = myuri.find("/", 9) if eidx != -1: mirrorname = myuri[9:eidx] path = myuri[eidx+1:] # Try user-defined mirrors first if mirrorname in custommirrors: for cmirr in custommirrors[mirrorname]: filedict[myfile].append( cmirr.rstrip("/") + "/" + path) # now try the official mirrors if mirrorname in thirdpartymirrors: uris = [locmirr.rstrip("/") + "/" + path \ for locmirr in thirdpartymirrors[mirrorname]] random.shuffle(uris) filedict[myfile].extend(uris) thirdpartymirror_uris.setdefault(myfile, []).extend(uris) if mirrorname not in custommirrors and \ mirrorname not in thirdpartymirrors: writemsg(_("!!! No known mirror by the name: %s\n") % (mirrorname)) else: writemsg(_("Invalid mirror definition in SRC_URI:\n"), noiselevel=-1) writemsg(" %s\n" % (myuri), noiselevel=-1) else: if restrict_fetch or force_mirror: # Only fetch from specific mirrors is allowed. continue primaryuris = primaryuri_dict.get(myfile) if primaryuris is None: primaryuris = [] primaryuri_dict[myfile] = primaryuris primaryuris.append(myuri) # Order primaryuri_dict values to match that in SRC_URI. for uris in primaryuri_dict.values(): uris.reverse() # Prefer thirdpartymirrors over normal mirrors in cases when # the file does not yet exist on the normal mirrors. for myfile, uris in thirdpartymirror_uris.items(): primaryuri_dict.setdefault(myfile, []).extend(uris) # Now merge primaryuri values into filedict (includes mirrors # explicitly referenced in SRC_URI). if "primaryuri" in restrict: for myfile, uris in filedict.items(): filedict[myfile] = primaryuri_dict.get(myfile, []) + uris else: for myfile in filedict: filedict[myfile] += primaryuri_dict.get(myfile, []) can_fetch=True if listonly: can_fetch = False if can_fetch and not fetch_to_ro: try: _ensure_distdir(mysettings, mysettings["DISTDIR"]) except PortageException as e: if not os.path.isdir(mysettings["DISTDIR"]): writemsg("!!! %s\n" % str(e), noiselevel=-1) writemsg(_("!!! Directory Not Found: DISTDIR='%s'\n") % mysettings["DISTDIR"], noiselevel=-1) writemsg(_("!!! Fetching will fail!\n"), noiselevel=-1) if can_fetch and \ not fetch_to_ro and \ not os.access(mysettings["DISTDIR"], os.W_OK): writemsg(_("!!! No write access to '%s'\n") % mysettings["DISTDIR"], noiselevel=-1) can_fetch = False distdir_writable = can_fetch and not fetch_to_ro failed_files = set() restrict_fetch_msg = False valid_hashes = set(get_valid_checksum_keys()) valid_hashes.discard("size") for myfile in filedict: """ fetched status 0 nonexistent 1 partially downloaded 2 completely downloaded """ fetched = 0 orig_digests = mydigests.get(myfile, {}) if not (allow_missing_digests or listonly): verifiable_hash_types = set(orig_digests).intersection(valid_hashes) if not verifiable_hash_types: expected = " ".join(sorted(valid_hashes)) got = set(orig_digests) got.discard("size") got = " ".join(sorted(got)) reason = (_("Insufficient data for checksum verification"), got, expected) writemsg(_("!!! Fetched file: %s VERIFY FAILED!\n") % myfile, noiselevel=-1) writemsg(_("!!! Reason: %s\n") % reason[0], noiselevel=-1) writemsg(_("!!! Got: %s\n!!! Expected: %s\n") % \ (reason[1], reason[2]), noiselevel=-1) if fetchonly: failed_files.add(myfile) continue else: return 0 size = orig_digests.get("size") if size == 0: # Zero-byte distfiles are always invalid, so discard their digests. del mydigests[myfile] orig_digests.clear() size = None pruned_digests = orig_digests if parallel_fetchonly: pruned_digests = {} if size is not None: pruned_digests["size"] = size myfile_path = os.path.join(mysettings["DISTDIR"], myfile) download_path = myfile_path if fetch_to_ro else myfile_path + _download_suffix has_space = True has_space_superuser = True file_lock = None if listonly: writemsg_stdout("\n", noiselevel=-1) else: # check if there is enough space in DISTDIR to completely store myfile # overestimate the filesize so we aren't bitten by FS overhead vfs_stat = None if size is not None and hasattr(os, "statvfs"): try: vfs_stat = os.statvfs(mysettings["DISTDIR"]) except OSError as e: writemsg_level("!!! statvfs('%s'): %s\n" % (mysettings["DISTDIR"], e), noiselevel=-1, level=logging.ERROR) del e if vfs_stat is not None: try: mysize = os.stat(myfile_path).st_size except OSError as e: if e.errno not in (errno.ENOENT, errno.ESTALE): raise del e mysize = 0 if (size - mysize + vfs_stat.f_bsize) >= \ (vfs_stat.f_bsize * vfs_stat.f_bavail): if (size - mysize + vfs_stat.f_bsize) >= \ (vfs_stat.f_bsize * vfs_stat.f_bfree): has_space_superuser = False if not has_space_superuser: has_space = False elif portage.data.secpass < 2: has_space = False elif userfetch: has_space = False if distdir_writable and use_locks: lock_kwargs = {} if fetchonly: lock_kwargs["flags"] = os.O_NONBLOCK try: file_lock = lockfile(myfile_path, wantnewlockfile=1, **lock_kwargs) except TryAgain: writemsg(_(">>> File '%s' is already locked by " "another fetcher. Continuing...\n") % myfile, noiselevel=-1) continue try: if not listonly: eout = EOutput() eout.quiet = mysettings.get("PORTAGE_QUIET") == "1" match, mystat = _check_distfile( myfile_path, pruned_digests, eout, hash_filter=hash_filter) if match and not force: # Skip permission adjustment for symlinks, since we don't # want to modify anything outside of the primary DISTDIR, # and symlinks typically point to PORTAGE_RO_DISTDIRS. if distdir_writable and not os.path.islink(myfile_path): try: apply_secpass_permissions(myfile_path, gid=portage_gid, mode=0o664, mask=0o2, stat_cached=mystat) except PortageException as e: if not os.access(myfile_path, os.R_OK): writemsg(_("!!! Failed to adjust permissions:" " %s\n") % str(e), noiselevel=-1) del e continue # Remove broken symlinks or symlinks to files which # _check_distfile did not match above. if distdir_writable and mystat is None or os.path.islink(myfile_path): try: os.unlink(myfile_path) except OSError as e: if e.errno not in (errno.ENOENT, errno.ESTALE): raise mystat = None if mystat is not None: if stat.S_ISDIR(mystat.st_mode): writemsg_level( _("!!! Unable to fetch file since " "a directory is in the way: \n" "!!! %s\n") % myfile_path, level=logging.ERROR, noiselevel=-1) return 0 if distdir_writable and not force: # Since _check_distfile did not match above, the file # is either corrupt or its identity has changed since # the last time it was fetched, so rename it. temp_filename = _checksum_failure_temp_file( mysettings, mysettings["DISTDIR"], myfile) writemsg_stdout(_("Refetching... " "File renamed to '%s'\n\n") % \ temp_filename, noiselevel=-1) # Stat the temporary download file for comparison with # fetch_resume_size. try: mystat = os.stat(download_path) except OSError as e: if e.errno not in (errno.ENOENT, errno.ESTALE): raise mystat = None if mystat is not None: if mystat.st_size == 0: if distdir_writable: try: os.unlink(download_path) except OSError: pass elif distdir_writable and size is not None: if mystat.st_size < fetch_resume_size and \ mystat.st_size < size: # If the file already exists and the size does not # match the existing digests, it may be that the # user is attempting to update the digest. In this # case, the digestgen() function will advise the # user to use `ebuild --force foo.ebuild manifest` # in order to force the old digests to be replaced. # Since the user may want to keep this file, rename # it instead of deleting it. writemsg(_(">>> Renaming distfile with size " "%d (smaller than " "PORTAGE_FETCH_RESU" "ME_MIN_SIZE)\n") % mystat.st_size) temp_filename = \ _checksum_failure_temp_file( mysettings, mysettings["DISTDIR"], os.path.basename(download_path)) writemsg_stdout(_("Refetching... " "File renamed to '%s'\n\n") % \ temp_filename, noiselevel=-1) elif mystat.st_size >= size: temp_filename = \ _checksum_failure_temp_file( mysettings, mysettings["DISTDIR"], os.path.basename(download_path)) writemsg_stdout(_("Refetching... " "File renamed to '%s'\n\n") % \ temp_filename, noiselevel=-1) if distdir_writable and ro_distdirs: readonly_file = None for x in ro_distdirs: filename = os.path.join(x, myfile) match, mystat = _check_distfile( filename, pruned_digests, eout, hash_filter=hash_filter) if match: readonly_file = filename break if readonly_file is not None: try: os.unlink(myfile_path) except OSError as e: if e.errno not in (errno.ENOENT, errno.ESTALE): raise del e os.symlink(readonly_file, myfile_path) continue # this message is shown only after we know that # the file is not already fetched if not has_space: writemsg(_("!!! Insufficient space to store %s in %s\n") % \ (myfile, mysettings["DISTDIR"]), noiselevel=-1) if has_space_superuser: writemsg(_("!!! Insufficient privileges to use " "remaining space.\n"), noiselevel=-1) if userfetch: writemsg(_("!!! You may set FEATURES=\"-userfetch\"" " in /etc/portage/make.conf in order to fetch with\n" "!!! superuser privileges.\n"), noiselevel=-1) if fsmirrors and not os.path.exists(myfile_path) and has_space: for mydir in fsmirrors: mirror_file = os.path.join(mydir, myfile) try: shutil.copyfile(mirror_file, download_path) writemsg(_("Local mirror has file: %s\n") % myfile) break except (IOError, OSError) as e: if e.errno not in (errno.ENOENT, errno.ESTALE): raise del e try: mystat = os.stat(download_path) except OSError as e: if e.errno not in (errno.ENOENT, errno.ESTALE): raise del e else: # Skip permission adjustment for symlinks, since we don't # want to modify anything outside of the primary DISTDIR, # and symlinks typically point to PORTAGE_RO_DISTDIRS. if not os.path.islink(download_path): try: apply_secpass_permissions(download_path, gid=portage_gid, mode=0o664, mask=0o2, stat_cached=mystat) except PortageException as e: if not os.access(download_path, os.R_OK): writemsg(_("!!! Failed to adjust permissions:" " %s\n") % (e,), noiselevel=-1) # If the file is empty then it's obviously invalid. Remove # the empty file and try to download if possible. if mystat.st_size == 0: if distdir_writable: try: os.unlink(download_path) except EnvironmentError: pass elif not orig_digests: # We don't have a digest, but the file exists. We must # assume that it is fully downloaded. if not force: continue else: if (mydigests[myfile].get("size") is not None and mystat.st_size < mydigests[myfile]["size"] and not restrict_fetch): fetched = 1 # Try to resume this download. elif parallel_fetchonly and \ mystat.st_size == mydigests[myfile]["size"]: eout = EOutput() eout.quiet = \ mysettings.get("PORTAGE_QUIET") == "1" eout.ebegin( "%s size ;-)" % (myfile, )) eout.eend(0) continue else: digests = _filter_unaccelarated_hashes(mydigests[myfile]) if hash_filter is not None: digests = _apply_hash_filter(digests, hash_filter) verified_ok, reason = verify_all(download_path, digests) if not verified_ok: writemsg(_("!!! Previously fetched" " file: '%s'\n") % myfile, noiselevel=-1) writemsg(_("!!! Reason: %s\n") % reason[0], noiselevel=-1) writemsg(_("!!! Got: %s\n" "!!! Expected: %s\n") % \ (reason[1], reason[2]), noiselevel=-1) if reason[0] == _("Insufficient data for checksum verification"): return 0 if distdir_writable: temp_filename = \ _checksum_failure_temp_file( mysettings, mysettings["DISTDIR"], os.path.basename(download_path)) writemsg_stdout(_("Refetching... " "File renamed to '%s'\n\n") % \ temp_filename, noiselevel=-1) else: if not fetch_to_ro: _movefile(download_path, myfile_path, mysettings=mysettings) eout = EOutput() eout.quiet = \ mysettings.get("PORTAGE_QUIET", None) == "1" if digests: digests = list(digests) digests.sort() eout.ebegin( "%s %s ;-)" % (myfile, " ".join(digests))) eout.eend(0) continue # fetch any remaining files # Create a reversed list since that is optimal for list.pop(). uri_list = filedict[myfile][:] uri_list.reverse() checksum_failure_count = 0 tried_locations = set() while uri_list: loc = uri_list.pop() if isinstance(loc, functools.partial): loc = loc() # Eliminate duplicates here in case we've switched to # "primaryuri" mode on the fly due to a checksum failure. if loc in tried_locations: continue tried_locations.add(loc) if listonly: writemsg_stdout(loc+" ", noiselevel=-1) continue # allow different fetchcommands per protocol protocol = loc[0:loc.find("://")] global_config_path = GLOBAL_CONFIG_PATH if portage.const.EPREFIX: global_config_path = os.path.join(portage.const.EPREFIX, GLOBAL_CONFIG_PATH.lstrip(os.sep)) missing_file_param = False fetchcommand_var = "FETCHCOMMAND_" + protocol.upper() fetchcommand = mysettings.get(fetchcommand_var) if fetchcommand is None: fetchcommand_var = "FETCHCOMMAND" fetchcommand = mysettings.get(fetchcommand_var) if fetchcommand is None: writemsg_level( _("!!! %s is unset. It should " "have been defined in\n!!! %s/make.globals.\n") \ % (fetchcommand_var, global_config_path), level=logging.ERROR, noiselevel=-1) return 0 if "${FILE}" not in fetchcommand: writemsg_level( _("!!! %s does not contain the required ${FILE}" " parameter.\n") % fetchcommand_var, level=logging.ERROR, noiselevel=-1) missing_file_param = True resumecommand_var = "RESUMECOMMAND_" + protocol.upper() resumecommand = mysettings.get(resumecommand_var) if resumecommand is None: resumecommand_var = "RESUMECOMMAND" resumecommand = mysettings.get(resumecommand_var) if resumecommand is None: writemsg_level( _("!!! %s is unset. It should " "have been defined in\n!!! %s/make.globals.\n") \ % (resumecommand_var, global_config_path), level=logging.ERROR, noiselevel=-1) return 0 if "${FILE}" not in resumecommand: writemsg_level( _("!!! %s does not contain the required ${FILE}" " parameter.\n") % resumecommand_var, level=logging.ERROR, noiselevel=-1) missing_file_param = True if missing_file_param: writemsg_level( _("!!! Refer to the make.conf(5) man page for " "information about how to\n!!! correctly specify " "FETCHCOMMAND and RESUMECOMMAND.\n"), level=logging.ERROR, noiselevel=-1) if myfile != os.path.basename(loc): return 0 if not can_fetch: if fetched != 2: try: mysize = os.stat(download_path).st_size except OSError as e: if e.errno not in (errno.ENOENT, errno.ESTALE): raise del e mysize = 0 if mysize == 0: writemsg(_("!!! File %s isn't fetched but unable to get it.\n") % myfile, noiselevel=-1) elif size is None or size > mysize: writemsg(_("!!! File %s isn't fully fetched, but unable to complete it\n") % myfile, noiselevel=-1) else: writemsg(_("!!! File %s is incorrect size, " "but unable to retry.\n") % myfile, noiselevel=-1) return 0 else: continue if fetched != 2 and has_space: #we either need to resume or start the download if fetched == 1: try: mystat = os.stat(download_path) except OSError as e: if e.errno not in (errno.ENOENT, errno.ESTALE): raise del e fetched = 0 else: if distdir_writable and mystat.st_size < fetch_resume_size: writemsg(_(">>> Deleting distfile with size " "%d (smaller than " "PORTAGE_FETCH_RESU" "ME_MIN_SIZE)\n") % mystat.st_size) try: os.unlink(download_path) except OSError as e: if e.errno not in \ (errno.ENOENT, errno.ESTALE): raise del e fetched = 0 if fetched == 1: #resume mode: writemsg(_(">>> Resuming download...\n")) locfetch=resumecommand command_var = resumecommand_var else: #normal mode: locfetch=fetchcommand command_var = fetchcommand_var writemsg_stdout(_(">>> Downloading '%s'\n") % \ _hide_url_passwd(loc)) variables = { "URI": loc, "FILE": os.path.basename(download_path) } for k in ("DISTDIR", "PORTAGE_SSH_OPTS"): v = mysettings.get(k) if v is not None: variables[k] = v myfetch = shlex_split(locfetch) myfetch = [varexpand(x, mydict=variables) for x in myfetch] myret = -1 try: myret = _spawn_fetch(mysettings, myfetch) finally: try: apply_secpass_permissions(download_path, gid=portage_gid, mode=0o664, mask=0o2) except FileNotFound: pass except PortageException as e: if not os.access(download_path, os.R_OK): writemsg(_("!!! Failed to adjust permissions:" " %s\n") % str(e), noiselevel=-1) del e # If the file is empty then it's obviously invalid. Don't # trust the return value from the fetcher. Remove the # empty file and try to download again. try: mystat = os.lstat(download_path) if mystat.st_size == 0 or (stat.S_ISLNK(mystat.st_mode) and not os.path.exists(download_path)): os.unlink(download_path) fetched = 0 continue except EnvironmentError: pass if mydigests is not None and myfile in mydigests: try: mystat = os.stat(download_path) except OSError as e: if e.errno not in (errno.ENOENT, errno.ESTALE): raise del e fetched = 0 else: if stat.S_ISDIR(mystat.st_mode): # This can happen if FETCHCOMMAND erroneously # contains wget's -P option where it should # instead have -O. writemsg_level( _("!!! The command specified in the " "%s variable appears to have\n!!! " "created a directory instead of a " "normal file.\n") % command_var, level=logging.ERROR, noiselevel=-1) writemsg_level( _("!!! Refer to the make.conf(5) " "man page for information about how " "to\n!!! correctly specify " "FETCHCOMMAND and RESUMECOMMAND.\n"), level=logging.ERROR, noiselevel=-1) return 0 # no exception? file exists. let digestcheck() report # an appropriately for size or checksum errors # If the fetcher reported success and the file is # too small, it's probably because the digest is # bad (upstream changed the distfile). In this # case we don't want to attempt to resume. Show a # digest verification failure to that the user gets # a clue about what just happened. if myret != os.EX_OK and \ mystat.st_size < mydigests[myfile]["size"]: # Fetch failed... Try the next one... Kill 404 files though. if (mystat[stat.ST_SIZE]<100000) and (len(myfile)>4) and not ((myfile[-5:]==".html") or (myfile[-4:]==".htm")): html404=re.compile("<title>.*(not found|404).*</title>",re.I|re.M) with io.open( _unicode_encode(download_path, encoding=_encodings['fs'], errors='strict'), mode='r', encoding=_encodings['content'], errors='replace' ) as f: if html404.search(f.read()): try: os.unlink(download_path) writemsg(_(">>> Deleting invalid distfile. (Improper 404 redirect from server.)\n")) fetched = 0 continue except (IOError, OSError): pass fetched = 1 continue if True: # File is the correct size--check the checksums for the fetched # file NOW, for those users who don't have a stable/continuous # net connection. This way we have a chance to try to download # from another mirror... digests = _filter_unaccelarated_hashes(mydigests[myfile]) if hash_filter is not None: digests = _apply_hash_filter(digests, hash_filter) verified_ok, reason = verify_all(download_path, digests) if not verified_ok: writemsg(_("!!! Fetched file: %s VERIFY FAILED!\n") % myfile, noiselevel=-1) writemsg(_("!!! Reason: %s\n") % reason[0], noiselevel=-1) writemsg(_("!!! Got: %s\n!!! Expected: %s\n") % \ (reason[1], reason[2]), noiselevel=-1) if reason[0] == _("Insufficient data for checksum verification"): return 0 if distdir_writable: temp_filename = \ _checksum_failure_temp_file( mysettings, mysettings["DISTDIR"], os.path.basename(download_path)) writemsg_stdout(_("Refetching... " "File renamed to '%s'\n\n") % \ temp_filename, noiselevel=-1) fetched=0 checksum_failure_count += 1 if checksum_failure_count == \ checksum_failure_primaryuri: # Switch to "primaryuri" mode in order # to increase the probablility of # of success. primaryuris = \ primaryuri_dict.get(myfile) if primaryuris: uri_list.extend( reversed(primaryuris)) if checksum_failure_count >= \ checksum_failure_max_tries: break else: if not fetch_to_ro: _movefile(download_path, myfile_path, mysettings=mysettings) eout = EOutput() eout.quiet = mysettings.get("PORTAGE_QUIET", None) == "1" if digests: eout.ebegin("%s %s ;-)" % \ (myfile, " ".join(sorted(digests)))) eout.eend(0) fetched=2 break else: # no digests available if not myret: if not fetch_to_ro: _movefile(download_path, myfile_path, mysettings=mysettings) fetched=2 break elif mydigests!=None: writemsg(_("No digest file available and download failed.\n\n"), noiselevel=-1) finally: if use_locks and file_lock: unlockfile(file_lock) file_lock = None if listonly: writemsg_stdout("\n", noiselevel=-1) if fetched != 2: if restrict_fetch and not restrict_fetch_msg: restrict_fetch_msg = True msg = _("\n!!! %s/%s" " has fetch restriction turned on.\n" "!!! This probably means that this " "ebuild's files must be downloaded\n" "!!! manually. See the comments in" " the ebuild for more information.\n\n") % \ (mysettings["CATEGORY"], mysettings["PF"]) writemsg_level(msg, level=logging.ERROR, noiselevel=-1) elif restrict_fetch: pass elif listonly: pass elif not filedict[myfile]: writemsg(_("Warning: No mirrors available for file" " '%s'\n") % (myfile), noiselevel=-1) else: writemsg(_("!!! Couldn't download '%s'. Aborting.\n") % myfile, noiselevel=-1) if listonly: failed_files.add(myfile) continue elif fetchonly: failed_files.add(myfile) continue return 0 if failed_files: return 0 return 1
def _create_profile(self, ebuilds, installed, profile, repo_configs, user_config, sets): user_config_dir = os.path.join(self.eroot, USER_CONFIG_PATH) try: os.makedirs(user_config_dir) except os.error: pass for repo in self.repo_dirs: repo_dir = self._get_repo_dir(repo) profile_dir = os.path.join(self._get_repo_dir(repo), "profiles") metadata_dir = os.path.join(repo_dir, "metadata") os.makedirs(metadata_dir) #Create $REPO/profiles/categories categories = set() for cpv in ebuilds: ebuilds_repo = Atom("=" + cpv, allow_repo=True).repo if ebuilds_repo is None: ebuilds_repo = "test_repo" if ebuilds_repo == repo: categories.add(catsplit(cpv)[0]) categories_file = os.path.join(profile_dir, "categories") f = open(categories_file, "w") for cat in categories: f.write(cat + "\n") f.close() #Create $REPO/profiles/license_groups license_file = os.path.join(profile_dir, "license_groups") f = open(license_file, "w") f.write("EULA TEST\n") f.close() repo_config = repo_configs.get(repo) if repo_config: for config_file, lines in repo_config.items(): if config_file not in self.config_files: raise ValueError("Unknown config file: '%s'" % config_file) if config_file in ("layout.conf", ): file_name = os.path.join(repo_dir, "metadata", config_file) else: file_name = os.path.join(profile_dir, config_file) f = open(file_name, "w") for line in lines: f.write("%s\n" % line) f.close() #Create $profile_dir/eclass (we fail to digest the ebuilds if it's not there) os.makedirs(os.path.join(repo_dir, "eclass")) if repo == "test_repo": #Create a minimal profile in /usr/portage sub_profile_dir = os.path.join(profile_dir, "default", "linux", "x86", "test_profile") os.makedirs(sub_profile_dir) eapi_file = os.path.join(sub_profile_dir, "eapi") f = open(eapi_file, "w") f.write("0\n") f.close() make_defaults_file = os.path.join(sub_profile_dir, "make.defaults") f = open(make_defaults_file, "w") f.write("ARCH=\"x86\"\n") f.write("ACCEPT_KEYWORDS=\"x86\"\n") f.close() use_force_file = os.path.join(sub_profile_dir, "use.force") f = open(use_force_file, "w") f.write("x86\n") f.close() parent_file = os.path.join(sub_profile_dir, "parent") f = open(parent_file, "w") f.write("..\n") f.close() if profile: for config_file, lines in profile.items(): if config_file not in self.config_files: raise ValueError("Unknown config file: '%s'" % config_file) file_name = os.path.join(sub_profile_dir, config_file) f = open(file_name, "w") for line in lines: f.write("%s\n" % line) f.close() #Create profile symlink os.symlink(sub_profile_dir, os.path.join(user_config_dir, "make.profile")) #Create minimal herds.xml herds_xml = """<?xml version="1.0" encoding="UTF-8"?> <!DOCTYPE herds SYSTEM "http://www.gentoo.org/dtd/herds.dtd"> <?xml-stylesheet href="/xsl/herds.xsl" type="text/xsl" ?> <?xml-stylesheet href="/xsl/guide.xsl" type="text/xsl" ?> <herds> <herd> <name>base-system</name> <email>[email protected]</email> <description>Core system utilities and libraries.</description> <maintainer> <email>[email protected]</email> <name>Base System</name> <role>Base System Maintainer</role> </maintainer> </herd> </herds> """ with open(os.path.join(metadata_dir, "metadata.xml"), 'w') as f: f.write(herds_xml) # Write empty entries for each repository, in order to exercise # RepoConfigLoader's repos.conf processing. repos_conf_file = os.path.join(user_config_dir, "repos.conf") f = open(repos_conf_file, "w") for repo in sorted(self.repo_dirs.keys()): f.write("[%s]\n" % repo) f.write("\n") f.close() for config_file, lines in user_config.items(): if config_file not in self.config_files: raise ValueError("Unknown config file: '%s'" % config_file) file_name = os.path.join(user_config_dir, config_file) f = open(file_name, "w") for line in lines: f.write("%s\n" % line) f.close() #Create /usr/share/portage/config/make.globals make_globals_path = os.path.join(self.eroot, GLOBAL_CONFIG_PATH.lstrip(os.sep), "make.globals") ensure_dirs(os.path.dirname(make_globals_path)) os.symlink(os.path.join(PORTAGE_BASE_PATH, "cnf", "make.globals"), make_globals_path) #Create /usr/share/portage/config/sets/portage.conf default_sets_conf_dir = os.path.join(self.eroot, "usr/share/portage/config/sets") try: os.makedirs(default_sets_conf_dir) except os.error: pass provided_sets_portage_conf = \ os.path.join(PORTAGE_BASE_PATH, "cnf/sets/portage.conf") os.symlink(provided_sets_portage_conf, os.path.join(default_sets_conf_dir, "portage.conf")) set_config_dir = os.path.join(user_config_dir, "sets") try: os.makedirs(set_config_dir) except os.error: pass for sets_file, lines in sets.items(): file_name = os.path.join(set_config_dir, sets_file) f = open(file_name, "w") for line in lines: f.write("%s\n" % line) f.close() user_config_dir = os.path.join(self.eroot, "etc", "portage") try: os.makedirs(user_config_dir) except os.error: pass for config_file, lines in user_config.items(): if config_file not in self.config_files: raise ValueError("Unknown config file: '%s'" % config_file) file_name = os.path.join(user_config_dir, config_file) f = open(file_name, "w") for line in lines: f.write("%s\n" % line) f.close()
def testBlockerFileCollision(self): debug = False install_something = """ S="${WORKDIR}" src_install() { einfo "installing something..." insinto /usr/lib echo "${PN}" > "${T}/file-collision" doins "${T}/file-collision" } """ ebuilds = { "dev-libs/A-1" : { "EAPI": "6", "MISC_CONTENT": install_something, "RDEPEND": "!dev-libs/B", }, "dev-libs/B-1" : { "EAPI": "6", "MISC_CONTENT": install_something, "RDEPEND": "!dev-libs/A", }, } playground = ResolverPlayground(ebuilds=ebuilds, debug=debug) settings = playground.settings eprefix = settings["EPREFIX"] eroot = settings["EROOT"] var_cache_edb = os.path.join(eprefix, "var", "cache", "edb") user_config_dir = os.path.join(eprefix, USER_CONFIG_PATH) portage_python = portage._python_interpreter emerge_cmd = (portage_python, "-b", "-Wd", os.path.join(self.bindir, "emerge")) file_collision = os.path.join(eroot, 'usr/lib/file-collision') test_commands = ( emerge_cmd + ("--oneshot", "dev-libs/A",), (lambda: portage.util.grablines(file_collision) == ["A\n"],), emerge_cmd + ("--oneshot", "dev-libs/B",), (lambda: portage.util.grablines(file_collision) == ["B\n"],), emerge_cmd + ("--oneshot", "dev-libs/A",), (lambda: portage.util.grablines(file_collision) == ["A\n"],), ({"FEATURES":"parallel-install"},) + emerge_cmd + ("--oneshot", "dev-libs/B",), (lambda: portage.util.grablines(file_collision) == ["B\n"],), ({"FEATURES":"parallel-install"},) + emerge_cmd + ("-Cq", "dev-libs/B",), (lambda: not os.path.exists(file_collision),), ) fake_bin = os.path.join(eprefix, "bin") portage_tmpdir = os.path.join(eprefix, "var", "tmp", "portage") profile_path = settings.profile_path path = os.environ.get("PATH") if path is not None and not path.strip(): path = None if path is None: path = "" else: path = ":" + path path = fake_bin + path pythonpath = os.environ.get("PYTHONPATH") if pythonpath is not None and not pythonpath.strip(): pythonpath = None if pythonpath is not None and \ pythonpath.split(":")[0] == PORTAGE_PYM_PATH: pass else: if pythonpath is None: pythonpath = "" else: pythonpath = ":" + pythonpath pythonpath = PORTAGE_PYM_PATH + pythonpath env = { "PORTAGE_OVERRIDE_EPREFIX" : eprefix, "PATH" : path, "PORTAGE_PYTHON" : portage_python, "PORTAGE_REPOSITORIES" : settings.repositories.config_string(), "PYTHONDONTWRITEBYTECODE" : os.environ.get("PYTHONDONTWRITEBYTECODE", ""), "PYTHONPATH" : pythonpath, } if "__PORTAGE_TEST_HARDLINK_LOCKS" in os.environ: env["__PORTAGE_TEST_HARDLINK_LOCKS"] = \ os.environ["__PORTAGE_TEST_HARDLINK_LOCKS"] dirs = [playground.distdir, fake_bin, portage_tmpdir, user_config_dir, var_cache_edb] true_symlinks = ["chown", "chgrp"] true_binary = find_binary("true") self.assertEqual(true_binary is None, False, "true command not found") try: for d in dirs: ensure_dirs(d) for x in true_symlinks: os.symlink(true_binary, os.path.join(fake_bin, x)) with open(os.path.join(var_cache_edb, "counter"), 'wb') as f: f.write(b"100") # non-empty system set keeps --unmerge quiet with open(os.path.join(profile_path, "packages"), 'w') as f: f.write("*dev-libs/token-system-pkg") if debug: # The subprocess inherits both stdout and stderr, for # debugging purposes. stdout = None else: # The subprocess inherits stderr so that any warnings # triggered by python -Wd will be visible. stdout = subprocess.PIPE for i, args in enumerate(test_commands): if hasattr(args[0], '__call__'): self.assertTrue(args[0](), "callable at index %s failed" % (i,)) continue if isinstance(args[0], dict): local_env = env.copy() local_env.update(args[0]) args = args[1:] else: local_env = env proc = subprocess.Popen(args, env=local_env, stdout=stdout) if debug: proc.wait() else: output = proc.stdout.readlines() proc.wait() proc.stdout.close() if proc.returncode != os.EX_OK: for line in output: sys.stderr.write(_unicode_decode(line)) self.assertEqual(os.EX_OK, proc.returncode, "emerge failed with args %s" % (args,)) finally: playground.debug = False playground.cleanup()
def _create_profile(self, ebuilds, installed, profile, user_config, sets): #Create $PORTDIR/profiles/categories categories = set() for cpv in chain(ebuilds.keys(), installed.keys()): categories.add(catsplit(cpv)[0]) profile_dir = os.path.join(self.portdir, "profiles") try: os.makedirs(profile_dir) except os.error: pass categories_file = os.path.join(profile_dir, "categories") f = open(categories_file, "w") for cat in categories: f.write(cat + "\n") f.close() #Create $REPO/profiles/license_groups license_file = os.path.join(profile_dir, "license_groups") f = open(license_file, "w") f.write("EULA TEST\n") f.close() #Create $profile_dir/eclass (we fail to digest the ebuilds if it's not there) os.makedirs(os.path.join(self.portdir, "eclass")) sub_profile_dir = os.path.join(profile_dir, "default", "linux", "x86", "test_profile") os.makedirs(sub_profile_dir) eapi_file = os.path.join(sub_profile_dir, "eapi") f = open(eapi_file, "w") f.write("0\n") f.close() make_defaults_file = os.path.join(sub_profile_dir, "make.defaults") f = open(make_defaults_file, "w") f.write("ARCH=\"x86\"\n") f.write("ACCEPT_KEYWORDS=\"x86\"\n") f.close() use_force_file = os.path.join(sub_profile_dir, "use.force") f = open(use_force_file, "w") f.write("x86\n") f.close() if profile: #This is meant to allow the consumer to set up his own profile, #with package.mask and what not. raise NotImplementedError() #Create profile symlink os.makedirs(os.path.join(self.eroot, "etc")) os.symlink(sub_profile_dir, os.path.join(self.eroot, "etc", "make.profile")) user_config_dir = os.path.join(self.eroot, "etc", "portage") try: os.makedirs(user_config_dir) except os.error: pass for config_file, lines in user_config.items(): if config_file not in self.config_files: raise ValueError("Unknown config file: '%s'" % config_file) file_name = os.path.join(user_config_dir, config_file) f = open(file_name, "w") for line in lines: f.write("%s\n" % line) f.close() #Create /usr/share/portage/config/sets/portage.conf default_sets_conf_dir = os.path.join(self.eroot, "usr/share/portage/config/sets") try: os.makedirs(default_sets_conf_dir) except os.error: pass provided_sets_portage_conf = \ os.path.join(PORTAGE_BASE_PATH, "cnf/sets/portage.conf") os.symlink(provided_sets_portage_conf, os.path.join(default_sets_conf_dir, "portage.conf")) set_config_dir = os.path.join(user_config_dir, "sets") try: os.makedirs(set_config_dir) except os.error: pass for sets_file, lines in sets.items(): file_name = os.path.join(set_config_dir, sets_file) f = open(file_name, "w") for line in lines: f.write("%s\n" % line) f.close()
def _testEbuildFetch( self, loop, scheme, host, orig_distfiles, ebuilds, content, server, playground, ro_distdir, ): mirror_layouts = ( ( "[structure]", "0=filename-hash BLAKE2B 8", "1=flat", ), ( "[structure]", "1=filename-hash BLAKE2B 8", "0=flat", ), ( "[structure]", "0=content-hash SHA512 8:8:8", "1=flat", ), ) fetchcommand = portage.util.shlex_split( playground.settings["FETCHCOMMAND"]) fetch_bin = portage.process.find_binary(fetchcommand[0]) if fetch_bin is None: self.skipTest("FETCHCOMMAND not found: {}".format( playground.settings["FETCHCOMMAND"])) eubin = os.path.join(playground.eprefix, "usr", "bin") os.symlink(fetch_bin, os.path.join(eubin, os.path.basename(fetch_bin))) resumecommand = portage.util.shlex_split( playground.settings["RESUMECOMMAND"]) resume_bin = portage.process.find_binary(resumecommand[0]) if resume_bin is None: self.skipTest("RESUMECOMMAND not found: {}".format( playground.settings["RESUMECOMMAND"])) if resume_bin != fetch_bin: os.symlink(resume_bin, os.path.join(eubin, os.path.basename(resume_bin))) root_config = playground.trees[playground.eroot]["root_config"] portdb = root_config.trees["porttree"].dbapi def run_async(func, *args, **kwargs): with ForkExecutor(loop=loop) as executor: return loop.run_until_complete( loop.run_in_executor( executor, functools.partial(func, *args, **kwargs))) for layout_lines in mirror_layouts: settings = config(clone=playground.settings) layout_data = "".join("{}\n".format(line) for line in layout_lines) mirror_conf = MirrorLayoutConfig() mirror_conf.read_from_file(io.StringIO(layout_data)) layouts = mirror_conf.get_all_layouts() content["/distfiles/layout.conf"] = layout_data.encode("utf8") distfiles = {} for k, v in orig_distfiles.items(): filename = DistfileName( k, digests=dict((algo, checksum_str(v, hashname=algo)) for algo in MANIFEST2_HASH_DEFAULTS), ) distfiles[filename] = v # mirror path for layout in layouts: content["/distfiles/" + layout.get_path(filename)] = v # upstream path content["/distfiles/{}.txt".format(k)] = v shutil.rmtree(settings["DISTDIR"]) os.makedirs(settings["DISTDIR"]) with open(os.path.join(settings["DISTDIR"], "layout.conf"), "wt") as f: f.write(layout_data) if any( isinstance(layout, ContentHashLayout) for layout in layouts): content_db = os.path.join(playground.eprefix, "var/db/emirrordist/content.db") os.makedirs(os.path.dirname(content_db), exist_ok=True) try: os.unlink(content_db) except OSError: pass else: content_db = None # Demonstrate that fetch preserves a stale file in DISTDIR when no digests are given. foo_uri = { "foo": ("{scheme}://{host}:{port}/distfiles/foo".format( scheme=scheme, host=host, port=server.server_port), ) } foo_path = os.path.join(settings["DISTDIR"], "foo") foo_stale_content = b"stale content\n" with open(foo_path, "wb") as f: f.write(b"stale content\n") self.assertTrue( bool(run_async(fetch, foo_uri, settings, try_mirrors=False))) with open(foo_path, "rb") as f: self.assertEqual(f.read(), foo_stale_content) with open(foo_path, "rb") as f: self.assertNotEqual(f.read(), distfiles["foo"]) # Use force=True to update the stale file. self.assertTrue( bool( run_async(fetch, foo_uri, settings, try_mirrors=False, force=True))) with open(foo_path, "rb") as f: self.assertEqual(f.read(), distfiles["foo"]) # Test force=True with FEATURES=skiprocheck, using read-only DISTDIR. # FETCHCOMMAND is set to temporarily chmod +w DISTDIR. Note that # FETCHCOMMAND must perform atomic rename itself due to read-only # DISTDIR. with open(foo_path, "wb") as f: f.write(b"stale content\n") orig_fetchcommand = settings["FETCHCOMMAND"] orig_distdir_mode = os.stat(settings["DISTDIR"]).st_mode temp_fetchcommand = os.path.join(eubin, "fetchcommand") with open(temp_fetchcommand, "w") as f: f.write(""" set -e URI=$1 DISTDIR=$2 FILE=$3 trap 'chmod a-w "${DISTDIR}"' EXIT chmod ug+w "${DISTDIR}" %s mv -f "${DISTDIR}/${FILE}.__download__" "${DISTDIR}/${FILE}" """ % orig_fetchcommand.replace("${FILE}", "${FILE}.__download__")) settings[ "FETCHCOMMAND"] = '"%s" "%s" "${URI}" "${DISTDIR}" "${FILE}"' % ( BASH_BINARY, temp_fetchcommand, ) settings.features.add("skiprocheck") settings.features.remove("distlocks") os.chmod(settings["DISTDIR"], 0o555) try: self.assertTrue( bool( run_async(fetch, foo_uri, settings, try_mirrors=False, force=True))) finally: settings["FETCHCOMMAND"] = orig_fetchcommand os.chmod(settings["DISTDIR"], orig_distdir_mode) settings.features.remove("skiprocheck") settings.features.add("distlocks") os.unlink(temp_fetchcommand) with open(foo_path, "rb") as f: self.assertEqual(f.read(), distfiles["foo"]) # Test emirrordist invocation. emirrordist_cmd = ( portage._python_interpreter, "-b", "-Wd", os.path.join(self.bindir, "emirrordist"), "--distfiles", settings["DISTDIR"], "--config-root", settings["EPREFIX"], "--delete", "--repositories-configuration", settings.repositories.config_string(), "--repo", "test_repo", "--mirror", ) if content_db is not None: emirrordist_cmd = emirrordist_cmd + ( "--content-db", content_db, ) env = settings.environ() env["PYTHONPATH"] = ":".join( filter( None, [PORTAGE_PYM_PATH] + os.environ.get("PYTHONPATH", "").split(":"), )) for k in distfiles: try: os.unlink(os.path.join(settings["DISTDIR"], k)) except OSError: pass proc = loop.run_until_complete( asyncio.create_subprocess_exec(*emirrordist_cmd, env=env)) self.assertEqual(loop.run_until_complete(proc.wait()), 0) for k in distfiles: with open( os.path.join(settings["DISTDIR"], layouts[0].get_path(k)), "rb") as f: self.assertEqual(f.read(), distfiles[k]) if content_db is not None: loop.run_until_complete( self._test_content_db( emirrordist_cmd, env, layouts, content_db, distfiles, settings, portdb, )) # Tests only work with one ebuild at a time, so the config # pool only needs a single config instance. class config_pool: @staticmethod def allocate(): return settings @staticmethod def deallocate(settings): pass def async_fetch(pkg, ebuild_path): fetcher = EbuildFetcher( config_pool=config_pool, ebuild_path=ebuild_path, fetchonly=False, fetchall=True, pkg=pkg, scheduler=loop, ) fetcher.start() return fetcher.async_wait() for cpv in ebuilds: metadata = dict( zip( Package.metadata_keys, portdb.aux_get(cpv, Package.metadata_keys), )) pkg = Package( built=False, cpv=cpv, installed=False, metadata=metadata, root_config=root_config, type_name="ebuild", ) settings.setcpv(pkg) ebuild_path = portdb.findname(pkg.cpv) portage.doebuild_environment(ebuild_path, "fetch", settings=settings, db=portdb) # Test good files in DISTDIR for k in settings["AA"].split(): os.stat(os.path.join(settings["DISTDIR"], k)) self.assertEqual( loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0) for k in settings["AA"].split(): with open(os.path.join(settings["DISTDIR"], k), "rb") as f: self.assertEqual(f.read(), distfiles[k]) # Test digestgen with fetch os.unlink( os.path.join(os.path.dirname(ebuild_path), "Manifest")) for k in settings["AA"].split(): os.unlink(os.path.join(settings["DISTDIR"], k)) with ForkExecutor(loop=loop) as executor: self.assertTrue( bool( loop.run_until_complete( loop.run_in_executor( executor, functools.partial(digestgen, mysettings=settings, myportdb=portdb), )))) for k in settings["AA"].split(): with open(os.path.join(settings["DISTDIR"], k), "rb") as f: self.assertEqual(f.read(), distfiles[k]) # Test missing files in DISTDIR for k in settings["AA"].split(): os.unlink(os.path.join(settings["DISTDIR"], k)) self.assertEqual( loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0) for k in settings["AA"].split(): with open(os.path.join(settings["DISTDIR"], k), "rb") as f: self.assertEqual(f.read(), distfiles[k]) # Test empty files in DISTDIR for k in settings["AA"].split(): file_path = os.path.join(settings["DISTDIR"], k) with open(file_path, "wb") as f: pass self.assertEqual(os.stat(file_path).st_size, 0) self.assertEqual( loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0) for k in settings["AA"].split(): with open(os.path.join(settings["DISTDIR"], k), "rb") as f: self.assertEqual(f.read(), distfiles[k]) # Test non-empty files containing null bytes in DISTDIR for k in settings["AA"].split(): file_path = os.path.join(settings["DISTDIR"], k) with open(file_path, "wb") as f: f.write(len(distfiles[k]) * b"\0") self.assertEqual( os.stat(file_path).st_size, len(distfiles[k])) self.assertEqual( loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0) for k in settings["AA"].split(): with open(os.path.join(settings["DISTDIR"], k), "rb") as f: self.assertEqual(f.read(), distfiles[k]) # Test PORTAGE_RO_DISTDIRS settings["PORTAGE_RO_DISTDIRS"] = '"{}"'.format(ro_distdir) orig_fetchcommand = settings["FETCHCOMMAND"] orig_resumecommand = settings["RESUMECOMMAND"] try: settings["FETCHCOMMAND"] = settings["RESUMECOMMAND"] = "" for k in settings["AA"].split(): file_path = os.path.join(settings["DISTDIR"], k) os.rename(file_path, os.path.join(ro_distdir, k)) self.assertEqual( loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0) for k in settings["AA"].split(): file_path = os.path.join(settings["DISTDIR"], k) self.assertTrue(os.path.islink(file_path)) with open(file_path, "rb") as f: self.assertEqual(f.read(), distfiles[k]) os.unlink(file_path) finally: settings.pop("PORTAGE_RO_DISTDIRS") settings["FETCHCOMMAND"] = orig_fetchcommand settings["RESUMECOMMAND"] = orig_resumecommand # Test local filesystem in GENTOO_MIRRORS orig_mirrors = settings["GENTOO_MIRRORS"] orig_fetchcommand = settings["FETCHCOMMAND"] try: settings["GENTOO_MIRRORS"] = ro_distdir settings["FETCHCOMMAND"] = settings["RESUMECOMMAND"] = "" self.assertEqual( loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0) for k in settings["AA"].split(): with open(os.path.join(settings["DISTDIR"], k), "rb") as f: self.assertEqual(f.read(), distfiles[k]) finally: settings["GENTOO_MIRRORS"] = orig_mirrors settings["FETCHCOMMAND"] = orig_fetchcommand settings["RESUMECOMMAND"] = orig_resumecommand # Test readonly DISTDIR orig_distdir_mode = os.stat(settings["DISTDIR"]).st_mode try: os.chmod(settings["DISTDIR"], 0o555) self.assertEqual( loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0) for k in settings["AA"].split(): with open(os.path.join(settings["DISTDIR"], k), "rb") as f: self.assertEqual(f.read(), distfiles[k]) finally: os.chmod(settings["DISTDIR"], orig_distdir_mode) # Test parallel-fetch mode settings["PORTAGE_PARALLEL_FETCHONLY"] = "1" try: self.assertEqual( loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0) for k in settings["AA"].split(): with open(os.path.join(settings["DISTDIR"], k), "rb") as f: self.assertEqual(f.read(), distfiles[k]) for k in settings["AA"].split(): os.unlink(os.path.join(settings["DISTDIR"], k)) self.assertEqual( loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0) for k in settings["AA"].split(): with open(os.path.join(settings["DISTDIR"], k), "rb") as f: self.assertEqual(f.read(), distfiles[k]) finally: settings.pop("PORTAGE_PARALLEL_FETCHONLY") # Test RESUMECOMMAND orig_resume_min_size = settings[ "PORTAGE_FETCH_RESUME_MIN_SIZE"] try: settings["PORTAGE_FETCH_RESUME_MIN_SIZE"] = "2" for k in settings["AA"].split(): file_path = os.path.join(settings["DISTDIR"], k) os.unlink(file_path) with open(file_path + _download_suffix, "wb") as f: f.write(distfiles[k][:2]) self.assertEqual( loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0) for k in settings["AA"].split(): with open(os.path.join(settings["DISTDIR"], k), "rb") as f: self.assertEqual(f.read(), distfiles[k]) finally: settings[ "PORTAGE_FETCH_RESUME_MIN_SIZE"] = orig_resume_min_size # Test readonly DISTDIR + skiprocheck, with FETCHCOMMAND set to temporarily chmod DISTDIR orig_fetchcommand = settings["FETCHCOMMAND"] orig_distdir_mode = os.stat(settings["DISTDIR"]).st_mode for k in settings["AA"].split(): os.unlink(os.path.join(settings["DISTDIR"], k)) try: os.chmod(settings["DISTDIR"], 0o555) settings["FETCHCOMMAND"] = ( '"%s" -c "chmod ug+w \\"${DISTDIR}\\"; %s; status=\\$?; chmod a-w \\"${DISTDIR}\\"; exit \\$status"' % (BASH_BINARY, orig_fetchcommand.replace('"', '\\"'))) settings.features.add("skiprocheck") settings.features.remove("distlocks") self.assertEqual( loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0) finally: settings["FETCHCOMMAND"] = orig_fetchcommand os.chmod(settings["DISTDIR"], orig_distdir_mode) settings.features.remove("skiprocheck") settings.features.add("distlocks")
def testSlotAbiEmerge(self): debug = False ebuilds = { "dev-libs/glib-1.2.10" : { "SLOT": "1" }, "dev-libs/glib-2.30.2" : { "EAPI": "4-slot-abi", "SLOT": "2/2.30" }, "dev-libs/glib-2.32.3" : { "EAPI": "4-slot-abi", "SLOT": "2/2.32" }, "dev-libs/dbus-glib-0.98" : { "EAPI": "4-slot-abi", "DEPEND": "dev-libs/glib:2=", "RDEPEND": "dev-libs/glib:2=" }, } installed = { "dev-libs/glib-1.2.10" : { "EAPI": "4-slot-abi", "SLOT": "1" }, "dev-libs/glib-2.30.2" : { "EAPI": "4-slot-abi", "SLOT": "2/2.30" }, "dev-libs/dbus-glib-0.98" : { "EAPI": "4-slot-abi", "DEPEND": "dev-libs/glib:2/2.30=", "RDEPEND": "dev-libs/glib:2/2.30=" }, } world = ["dev-libs/glib:1", "dev-libs/dbus-glib"] playground = ResolverPlayground(ebuilds=ebuilds, installed=installed, world=world, debug=debug) settings = playground.settings eprefix = settings["EPREFIX"] eroot = settings["EROOT"] trees = playground.trees portdb = trees[eroot]["porttree"].dbapi vardb = trees[eroot]["vartree"].dbapi var_cache_edb = os.path.join(eprefix, "var", "cache", "edb") user_config_dir = os.path.join(eprefix, USER_CONFIG_PATH) package_mask_path = os.path.join(user_config_dir, "package.mask") portage_python = portage._python_interpreter ebuild_cmd = (portage_python, "-b", "-Wd", os.path.join(PORTAGE_BIN_PATH, "ebuild")) emerge_cmd = (portage_python, "-b", "-Wd", os.path.join(PORTAGE_BIN_PATH, "emerge")) test_ebuild = portdb.findname("dev-libs/dbus-glib-0.98") self.assertFalse(test_ebuild is None) test_commands = ( emerge_cmd + ("--oneshot", "dev-libs/glib",), (lambda: "dev-libs/glib:2/2.32=" in vardb.aux_get("dev-libs/dbus-glib-0.98", ["RDEPEND"])[0],), (BASH_BINARY, "-c", "echo %s >> %s" % tuple(map(portage._shell_quote, (">=dev-libs/glib-2.32", package_mask_path,)))), emerge_cmd + ("--oneshot", "dev-libs/glib",), (lambda: "dev-libs/glib:2/2.30=" in vardb.aux_get("dev-libs/dbus-glib-0.98", ["RDEPEND"])[0],), ) distdir = playground.distdir pkgdir = playground.pkgdir fake_bin = os.path.join(eprefix, "bin") portage_tmpdir = os.path.join(eprefix, "var", "tmp", "portage") profile_path = settings.profile_path path = os.environ.get("PATH") if path is not None and not path.strip(): path = None if path is None: path = "" else: path = ":" + path path = fake_bin + path pythonpath = os.environ.get("PYTHONPATH") if pythonpath is not None and not pythonpath.strip(): pythonpath = None if pythonpath is not None and \ pythonpath.split(":")[0] == PORTAGE_PYM_PATH: pass else: if pythonpath is None: pythonpath = "" else: pythonpath = ":" + pythonpath pythonpath = PORTAGE_PYM_PATH + pythonpath env = { "PORTAGE_OVERRIDE_EPREFIX" : eprefix, "PATH" : path, "PORTAGE_PYTHON" : portage_python, "PORTAGE_REPOSITORIES" : settings.repositories.config_string(), "PYTHONPATH" : pythonpath, } if "__PORTAGE_TEST_HARDLINK_LOCKS" in os.environ: env["__PORTAGE_TEST_HARDLINK_LOCKS"] = \ os.environ["__PORTAGE_TEST_HARDLINK_LOCKS"] dirs = [distdir, fake_bin, portage_tmpdir, user_config_dir, var_cache_edb] true_symlinks = ["chown", "chgrp"] true_binary = find_binary("true") self.assertEqual(true_binary is None, False, "true command not found") try: for d in dirs: ensure_dirs(d) for x in true_symlinks: os.symlink(true_binary, os.path.join(fake_bin, x)) with open(os.path.join(var_cache_edb, "counter"), 'wb') as f: f.write(b"100") # non-empty system set keeps --depclean quiet with open(os.path.join(profile_path, "packages"), 'w') as f: f.write("*dev-libs/token-system-pkg") if debug: # The subprocess inherits both stdout and stderr, for # debugging purposes. stdout = None else: # The subprocess inherits stderr so that any warnings # triggered by python -Wd will be visible. stdout = subprocess.PIPE for i, args in enumerate(test_commands): if hasattr(args[0], '__call__'): self.assertTrue(args[0](), "callable at index %s failed" % (i,)) continue proc = subprocess.Popen(args, env=env, stdout=stdout) if debug: proc.wait() else: output = proc.stdout.readlines() proc.wait() proc.stdout.close() if proc.returncode != os.EX_OK: for line in output: sys.stderr.write(_unicode_decode(line)) self.assertEqual(os.EX_OK, proc.returncode, "emerge failed with args %s" % (args,)) finally: playground.cleanup()
def _create_profile(self, ebuilds, installed, profile, repo_configs, user_config, sets): user_config_dir = os.path.join(self.eroot, USER_CONFIG_PATH) try: os.makedirs(user_config_dir) except os.error: pass for repo in self._repositories: if repo == "DEFAULT": continue repo_dir = self._get_repo_dir(repo) profile_dir = os.path.join(repo_dir, "profiles") metadata_dir = os.path.join(repo_dir, "metadata") os.makedirs(metadata_dir) #Create $REPO/profiles/categories categories = set() for cpv in ebuilds: ebuilds_repo = Atom("=" + cpv, allow_repo=True).repo if ebuilds_repo is None: ebuilds_repo = "test_repo" if ebuilds_repo == repo: categories.add(catsplit(cpv)[0]) categories_file = os.path.join(profile_dir, "categories") with open(categories_file, "w") as f: for cat in categories: f.write(cat + "\n") #Create $REPO/profiles/license_groups license_file = os.path.join(profile_dir, "license_groups") with open(license_file, "w") as f: f.write("EULA TEST\n") repo_config = repo_configs.get(repo) if repo_config: for config_file, lines in repo_config.items(): if config_file not in self.config_files and not any( fnmatch.fnmatch(config_file, os.path.join(x, "*")) for x in self.config_files): raise ValueError("Unknown config file: '%s'" % config_file) if config_file in ("layout.conf", ): file_name = os.path.join(repo_dir, "metadata", config_file) else: file_name = os.path.join(profile_dir, config_file) if "/" in config_file and not os.path.isdir( os.path.dirname(file_name)): os.makedirs(os.path.dirname(file_name)) with open(file_name, "w") as f: for line in lines: f.write("%s\n" % line) # Temporarily write empty value of masters until it becomes default. # TODO: Delete all references to "# use implicit masters" when empty value becomes default. if config_file == "layout.conf" and not any( line.startswith(("masters =", "# use implicit masters")) for line in lines): f.write("masters =\n") #Create $profile_dir/eclass (we fail to digest the ebuilds if it's not there) os.makedirs(os.path.join(repo_dir, "eclass")) # Temporarily write empty value of masters until it becomes default. if not repo_config or "layout.conf" not in repo_config: layout_conf_path = os.path.join(repo_dir, "metadata", "layout.conf") with open(layout_conf_path, "w") as f: f.write("masters =\n") if repo == "test_repo": #Create a minimal profile in /usr/portage sub_profile_dir = os.path.join(profile_dir, "default", "linux", "x86", "test_profile") os.makedirs(sub_profile_dir) if not (profile and "eapi" in profile): eapi_file = os.path.join(sub_profile_dir, "eapi") with open(eapi_file, "w") as f: f.write("0\n") make_defaults_file = os.path.join(sub_profile_dir, "make.defaults") with open(make_defaults_file, "w") as f: f.write("ARCH=\"x86\"\n") f.write("ACCEPT_KEYWORDS=\"x86\"\n") use_force_file = os.path.join(sub_profile_dir, "use.force") with open(use_force_file, "w") as f: f.write("x86\n") parent_file = os.path.join(sub_profile_dir, "parent") with open(parent_file, "w") as f: f.write("..\n") if profile: for config_file, lines in profile.items(): if config_file not in self.config_files: raise ValueError("Unknown config file: '%s'" % config_file) file_name = os.path.join(sub_profile_dir, config_file) with open(file_name, "w") as f: for line in lines: f.write("%s\n" % line) #Create profile symlink os.symlink(sub_profile_dir, os.path.join(user_config_dir, "make.profile")) #Create minimal herds.xml herds_xml = """<?xml version="1.0" encoding="UTF-8"?> <!DOCTYPE herds SYSTEM "http://www.gentoo.org/dtd/herds.dtd"> <?xml-stylesheet href="/xsl/herds.xsl" type="text/xsl" ?> <?xml-stylesheet href="/xsl/guide.xsl" type="text/xsl" ?> <herds> <herd> <name>base-system</name> <email>[email protected]</email> <description>Core system utilities and libraries.</description> <maintainer> <email>[email protected]</email> <name>Base System</name> <role>Base System Maintainer</role> </maintainer> </herd> </herds> """ with open(os.path.join(metadata_dir, "metadata.xml"), 'w') as f: f.write(herds_xml) make_conf = { "ACCEPT_KEYWORDS": "x86", "CLEAN_DELAY": "0", "DISTDIR": self.distdir, "EMERGE_WARNING_DELAY": "0", "PKGDIR": self.pkgdir, "PORTAGE_INST_GID": str(portage.data.portage_gid), "PORTAGE_INST_UID": str(portage.data.portage_uid), "PORTAGE_TMPDIR": os.path.join(self.eroot, 'var/tmp'), } if os.environ.get("NOCOLOR"): make_conf["NOCOLOR"] = os.environ["NOCOLOR"] # Pass along PORTAGE_USERNAME and PORTAGE_GRPNAME since they # need to be inherited by ebuild subprocesses. if 'PORTAGE_USERNAME' in os.environ: make_conf['PORTAGE_USERNAME'] = os.environ['PORTAGE_USERNAME'] if 'PORTAGE_GRPNAME' in os.environ: make_conf['PORTAGE_GRPNAME'] = os.environ['PORTAGE_GRPNAME'] make_conf_lines = [] for k_v in make_conf.items(): make_conf_lines.append('%s="%s"' % k_v) if "make.conf" in user_config: make_conf_lines.extend(user_config["make.conf"]) if not portage.process.sandbox_capable or \ os.environ.get("SANDBOX_ON") == "1": # avoid problems from nested sandbox instances make_conf_lines.append( 'FEATURES="${FEATURES} -sandbox -usersandbox"') configs = user_config.copy() configs["make.conf"] = make_conf_lines for config_file, lines in configs.items(): if config_file not in self.config_files: raise ValueError("Unknown config file: '%s'" % config_file) file_name = os.path.join(user_config_dir, config_file) with open(file_name, "w") as f: for line in lines: f.write("%s\n" % line) #Create /usr/share/portage/config/make.globals make_globals_path = os.path.join(self.eroot, GLOBAL_CONFIG_PATH.lstrip(os.sep), "make.globals") ensure_dirs(os.path.dirname(make_globals_path)) os.symlink(os.path.join(cnf_path, "make.globals"), make_globals_path) #Create /usr/share/portage/config/sets/portage.conf default_sets_conf_dir = os.path.join(self.eroot, "usr/share/portage/config/sets") try: os.makedirs(default_sets_conf_dir) except os.error: pass provided_sets_portage_conf = (os.path.join(cnf_path, "sets", "portage.conf")) os.symlink(provided_sets_portage_conf, os.path.join(default_sets_conf_dir, "portage.conf")) set_config_dir = os.path.join(user_config_dir, "sets") try: os.makedirs(set_config_dir) except os.error: pass for sets_file, lines in sets.items(): file_name = os.path.join(set_config_dir, sets_file) with open(file_name, "w") as f: for line in lines: f.write("%s\n" % line)
def testSimple(self): debug = False skip_reason = self._must_skip() if skip_reason: self.portage_skip = skip_reason self.assertFalse(True, skip_reason) return copyright_header = """# Copyright 1999-%s Gentoo Foundation # Distributed under the terms of the GNU General Public License v2 # $Header: $ """ % time.gmtime().tm_year repo_configs = { "test_repo": { "layout.conf": ( "update-changelog = true", ), } } profiles = ( ("x86", "default/linux/x86/test_profile", "stable"), ) ebuilds = { "dev-libs/A-1": { "COPYRIGHT_HEADER" : copyright_header, "DESCRIPTION" : "Desc goes here", "EAPI" : "4", "HOMEPAGE" : "http://example.com", "IUSE" : "flag", "KEYWORDS": "~x86", "LICENSE": "GPL-2", "RDEPEND": "flag? ( dev-libs/B[flag] )", }, "dev-libs/B-1": { "COPYRIGHT_HEADER" : copyright_header, "DESCRIPTION" : "Desc goes here", "EAPI" : "4", "HOMEPAGE" : "http://example.com", "IUSE" : "flag", "KEYWORDS": "~x86", "LICENSE": "GPL-2", }, } licenses = ["GPL-2"] arch_list = ["x86"] metadata_dtd = os.path.join(PORTAGE_BASE_PATH, "cnf/metadata.dtd") metadata_xml_files = ( ( "dev-libs/A", { "herd" : "base-system", "flags" : "<flag name='flag'>Description of how USE='flag' affects this package</flag>", }, ), ( "dev-libs/B", { "herd" : "no-herd", "flags" : "<flag name='flag'>Description of how USE='flag' affects this package</flag>", }, ), ) use_desc = ( ("flag", "Description of how USE='flag' affects packages"), ) playground = ResolverPlayground(ebuilds=ebuilds, repo_configs=repo_configs, debug=debug) settings = playground.settings eprefix = settings["EPREFIX"] eroot = settings["EROOT"] portdb = playground.trees[playground.eroot]["porttree"].dbapi homedir = os.path.join(eroot, "home") distdir = os.path.join(eprefix, "distdir") portdir = settings["PORTDIR"] profiles_dir = os.path.join(portdir, "profiles") license_dir = os.path.join(portdir, "licenses") repoman_cmd = (portage._python_interpreter, "-Wd", os.path.join(PORTAGE_BIN_PATH, "repoman")) git_binary = find_binary("git") git_cmd = (git_binary,) cp_binary = find_binary("cp") self.assertEqual(cp_binary is None, False, "cp command not found") cp_cmd = (cp_binary,) test_ebuild = portdb.findname("dev-libs/A-1") self.assertFalse(test_ebuild is None) committer_name = "Gentoo Dev" committer_email = "*****@*****.**" git_test = ( ("", repoman_cmd + ("manifest",)), ("", git_cmd + ("config", "--global", "user.name", committer_name,)), ("", git_cmd + ("config", "--global", "user.email", committer_email,)), ("", git_cmd + ("init-db",)), ("", git_cmd + ("add", ".")), ("", git_cmd + ("commit", "-a", "-m", "add whole repo")), ("", cp_cmd + (test_ebuild, test_ebuild[:-8] + "2.ebuild")), ("", git_cmd + ("add", test_ebuild[:-8] + "2.ebuild")), ("", repoman_cmd + ("commit", "-m", "bump to version 2")), ("", cp_cmd + (test_ebuild, test_ebuild[:-8] + "3.ebuild")), ("", git_cmd + ("add", test_ebuild[:-8] + "3.ebuild")), ("dev-libs", repoman_cmd + ("commit", "-m", "bump to version 3")), ("", cp_cmd + (test_ebuild, test_ebuild[:-8] + "4.ebuild")), ("", git_cmd + ("add", test_ebuild[:-8] + "4.ebuild")), ("dev-libs/A", repoman_cmd + ("commit", "-m", "bump to version 4")), ) pythonpath = os.environ.get("PYTHONPATH") if pythonpath is not None and not pythonpath.strip(): pythonpath = None if pythonpath is not None and \ pythonpath.split(":")[0] == PORTAGE_PYM_PATH: pass else: if pythonpath is None: pythonpath = "" else: pythonpath = ":" + pythonpath pythonpath = PORTAGE_PYM_PATH + pythonpath env = { "PORTAGE_OVERRIDE_EPREFIX" : eprefix, "DISTDIR" : distdir, "GENTOO_COMMITTER_NAME" : committer_name, "GENTOO_COMMITTER_EMAIL" : committer_email, "HOME" : homedir, "PATH" : os.environ["PATH"], "PORTAGE_GRPNAME" : os.environ["PORTAGE_GRPNAME"], "PORTAGE_USERNAME" : os.environ["PORTAGE_USERNAME"], "PORTDIR" : portdir, "PYTHONPATH" : pythonpath, } if os.environ.get("SANDBOX_ON") == "1": # avoid problems from nested sandbox instances env["FEATURES"] = "-sandbox" dirs = [homedir, license_dir, profiles_dir, distdir] try: for d in dirs: ensure_dirs(d) with open(os.path.join(portdir, "skel.ChangeLog"), 'w') as f: f.write(copyright_header) with open(os.path.join(profiles_dir, "profiles.desc"), 'w') as f: for x in profiles: f.write("%s %s %s\n" % x) for x in licenses: open(os.path.join(license_dir, x), 'wb').close() with open(os.path.join(profiles_dir, "arch.list"), 'w') as f: for x in arch_list: f.write("%s\n" % x) with open(os.path.join(profiles_dir, "use.desc"), 'w') as f: for k, v in use_desc: f.write("%s - %s\n" % (k, v)) for cp, xml_data in metadata_xml_files: with open(os.path.join(portdir, cp, "metadata.xml"), 'w') as f: f.write(playground.metadata_xml_template % xml_data) # Use a symlink to portdir, in order to trigger bugs # involving canonical vs. non-canonical paths. portdir_symlink = os.path.join(eroot, "portdir_symlink") os.symlink(portdir, portdir_symlink) # repoman checks metadata.dtd for recent CTIME, so copy the file in # order to ensure that the CTIME is current shutil.copyfile(metadata_dtd, os.path.join(distdir, "metadata.dtd")) if debug: # The subprocess inherits both stdout and stderr, for # debugging purposes. stdout = None else: # The subprocess inherits stderr so that any warnings # triggered by python -Wd will be visible. stdout = subprocess.PIPE for cwd in ("", "dev-libs", "dev-libs/A", "dev-libs/B"): abs_cwd = os.path.join(portdir_symlink, cwd) proc = subprocess.Popen([portage._python_interpreter, "-Wd", os.path.join(PORTAGE_BIN_PATH, "repoman"), "full"], cwd=abs_cwd, env=env, stdout=stdout) if debug: proc.wait() else: output = proc.stdout.readlines() proc.wait() proc.stdout.close() if proc.returncode != os.EX_OK: for line in output: sys.stderr.write(_unicode_decode(line)) self.assertEqual(os.EX_OK, proc.returncode, "repoman failed in %s" % (cwd,)) if git_binary is not None: for cwd, cmd in git_test: abs_cwd = os.path.join(portdir_symlink, cwd) proc = subprocess.Popen(cmd, cwd=abs_cwd, env=env, stdout=stdout) if debug: proc.wait() else: output = proc.stdout.readlines() proc.wait() proc.stdout.close() if proc.returncode != os.EX_OK: for line in output: sys.stderr.write(_unicode_decode(line)) self.assertEqual(os.EX_OK, proc.returncode, "%s failed in %s" % (cmd, cwd,)) finally: playground.cleanup()
async def _async_test_simple(self, playground, metadata_xml_files, loop): debug = playground.debug settings = playground.settings eprefix = settings["EPREFIX"] eroot = settings["EROOT"] trees = playground.trees portdb = trees[eroot]["porttree"].dbapi test_repo_location = settings.repositories["test_repo"].location var_cache_edb = os.path.join(eprefix, "var", "cache", "edb") cachedir = os.path.join(var_cache_edb, "dep") cachedir_pregen = os.path.join(test_repo_location, "metadata", "md5-cache") portage_python = portage._python_interpreter dispatch_conf_cmd = ( portage_python, "-b", "-Wd", os.path.join(self.sbindir, "dispatch-conf"), ) ebuild_cmd = (portage_python, "-b", "-Wd", os.path.join(self.bindir, "ebuild")) egencache_cmd = ( portage_python, "-b", "-Wd", os.path.join(self.bindir, "egencache"), "--repo", "test_repo", "--repositories-configuration", settings.repositories.config_string(), ) emerge_cmd = (portage_python, "-b", "-Wd", os.path.join(self.bindir, "emerge")) emaint_cmd = (portage_python, "-b", "-Wd", os.path.join(self.sbindir, "emaint")) env_update_cmd = ( portage_python, "-b", "-Wd", os.path.join(self.sbindir, "env-update"), ) etc_update_cmd = (BASH_BINARY, os.path.join(self.sbindir, "etc-update")) fixpackages_cmd = ( portage_python, "-b", "-Wd", os.path.join(self.sbindir, "fixpackages"), ) portageq_cmd = ( portage_python, "-b", "-Wd", os.path.join(self.bindir, "portageq"), ) quickpkg_cmd = ( portage_python, "-b", "-Wd", os.path.join(self.bindir, "quickpkg"), ) regenworld_cmd = ( portage_python, "-b", "-Wd", os.path.join(self.sbindir, "regenworld"), ) rm_binary = find_binary("rm") self.assertEqual(rm_binary is None, False, "rm command not found") rm_cmd = (rm_binary, ) egencache_extra_args = [] if self._have_python_xml(): egencache_extra_args.append("--update-use-local-desc") test_ebuild = portdb.findname("dev-libs/A-1") self.assertFalse(test_ebuild is None) cross_prefix = os.path.join(eprefix, "cross_prefix") cross_root = os.path.join(eprefix, "cross_root") cross_eroot = os.path.join(cross_root, eprefix.lstrip(os.sep)) binhost_dir = os.path.join(eprefix, "binhost") binhost_address = "127.0.0.1" binhost_remote_path = "/binhost" binhost_server = AsyncHTTPServer( binhost_address, BinhostContentMap(binhost_remote_path, binhost_dir), loop).__enter__() binhost_uri = "http://{address}:{port}{path}".format( address=binhost_address, port=binhost_server.server_port, path=binhost_remote_path, ) binpkg_format = settings.get("BINPKG_FORMAT", SUPPORTED_GENTOO_BINPKG_FORMATS[0]) self.assertIn(binpkg_format, ("xpak", "gpkg")) if binpkg_format == "xpak": foo_filename = "foo-0-1.xpak" elif binpkg_format == "gpkg": foo_filename = "foo-0-1.gpkg.tar" test_commands = () if hasattr(argparse.ArgumentParser, "parse_intermixed_args"): test_commands += ( emerge_cmd + ("--oneshot", "dev-libs/A", "-v", "dev-libs/A"), ) test_commands += ( emerge_cmd + ( "--usepkgonly", "--root", cross_root, "--quickpkg-direct=y", "--quickpkg-direct-root", "/", "dev-libs/A", ), emerge_cmd + ( "--usepkgonly", "--quickpkg-direct=y", "--quickpkg-direct-root", cross_root, "dev-libs/A", ), env_update_cmd, portageq_cmd + ( "envvar", "-v", "CONFIG_PROTECT", "EROOT", "PORTAGE_CONFIGROOT", "PORTAGE_TMPDIR", "USERLAND", ), etc_update_cmd, dispatch_conf_cmd, emerge_cmd + ("--version", ), emerge_cmd + ("--info", ), emerge_cmd + ("--info", "--verbose"), emerge_cmd + ("--list-sets", ), emerge_cmd + ("--check-news", ), rm_cmd + ("-rf", cachedir), rm_cmd + ("-rf", cachedir_pregen), emerge_cmd + ("--regen", ), rm_cmd + ("-rf", cachedir), ({ "FEATURES": "metadata-transfer" }, ) + emerge_cmd + ("--regen", ), rm_cmd + ("-rf", cachedir), ({ "FEATURES": "metadata-transfer" }, ) + emerge_cmd + ("--regen", ), rm_cmd + ("-rf", cachedir), egencache_cmd + ("--update", ) + tuple(egencache_extra_args), ({ "FEATURES": "metadata-transfer" }, ) + emerge_cmd + ("--metadata", ), rm_cmd + ("-rf", cachedir), ({ "FEATURES": "metadata-transfer" }, ) + emerge_cmd + ("--metadata", ), emerge_cmd + ("--metadata", ), rm_cmd + ("-rf", cachedir), emerge_cmd + ("--oneshot", "virtual/foo"), lambda: self.assertFalse( os.path.exists( os.path.join(pkgdir, "virtual", "foo", foo_filename))), ({ "FEATURES": "unmerge-backup" }, ) + emerge_cmd + ("--unmerge", "virtual/foo"), lambda: self.assertTrue( os.path.exists( os.path.join(pkgdir, "virtual", "foo", foo_filename))), emerge_cmd + ("--pretend", "dev-libs/A"), ebuild_cmd + (test_ebuild, "manifest", "clean", "package", "merge"), emerge_cmd + ("--pretend", "--tree", "--complete-graph", "dev-libs/A"), emerge_cmd + ("-p", "dev-libs/B"), emerge_cmd + ("-p", "--newrepo", "dev-libs/B"), emerge_cmd + ( "-B", "dev-libs/B", ), emerge_cmd + ( "--oneshot", "--usepkg", "dev-libs/B", ), # trigger clean prior to pkg_pretend as in bug #390711 ebuild_cmd + (test_ebuild, "unpack"), emerge_cmd + ( "--oneshot", "dev-libs/A", ), emerge_cmd + ( "--noreplace", "dev-libs/A", ), emerge_cmd + ( "--config", "dev-libs/A", ), emerge_cmd + ("--info", "dev-libs/A", "dev-libs/B"), emerge_cmd + ("--pretend", "--depclean", "--verbose", "dev-libs/B"), emerge_cmd + ( "--pretend", "--depclean", ), emerge_cmd + ("--depclean", ), quickpkg_cmd + ( "--include-config", "y", "dev-libs/A", ), # Test bug #523684, where a file renamed or removed by the # admin forces replacement files to be merged with config # protection. lambda: self.assertEqual( 0, len( list( find_updated_config_files( eroot, shlex_split(settings["CONFIG_PROTECT"])))), ), lambda: os.unlink(os.path.join(eprefix, "etc", "A-0")), emerge_cmd + ("--usepkgonly", "dev-libs/A"), lambda: self.assertEqual( 1, len( list( find_updated_config_files( eroot, shlex_split(settings["CONFIG_PROTECT"])))), ), emaint_cmd + ("--check", "all"), emaint_cmd + ("--fix", "all"), fixpackages_cmd, regenworld_cmd, portageq_cmd + ("match", eroot, "dev-libs/A"), portageq_cmd + ("best_visible", eroot, "dev-libs/A"), portageq_cmd + ("best_visible", eroot, "binary", "dev-libs/A"), portageq_cmd + ("contents", eroot, "dev-libs/A-1"), portageq_cmd + ("metadata", eroot, "ebuild", "dev-libs/A-1", "EAPI", "IUSE", "RDEPEND"), portageq_cmd + ("metadata", eroot, "binary", "dev-libs/A-1", "EAPI", "USE", "RDEPEND"), portageq_cmd + ( "metadata", eroot, "installed", "dev-libs/A-1", "EAPI", "USE", "RDEPEND", ), portageq_cmd + ("owners", eroot, eroot + "usr"), emerge_cmd + ("-p", eroot + "usr"), emerge_cmd + ("-p", "--unmerge", "-q", eroot + "usr"), emerge_cmd + ("--unmerge", "--quiet", "dev-libs/A"), emerge_cmd + ("-C", "--quiet", "dev-libs/B"), # If EMERGE_DEFAULT_OPTS contains --autounmask=n, then --autounmask # must be specified with --autounmask-continue. ( { "EMERGE_DEFAULT_OPTS": "--autounmask=n" }, ) + emerge_cmd + ( "--autounmask", "--autounmask-continue", "dev-libs/C", ), # Verify that the above --autounmask-continue command caused # USE=flag to be applied correctly to dev-libs/D. portageq_cmd + ("match", eroot, "dev-libs/D[flag]"), # Test cross-prefix usage, including chpathtool for binpkgs. # EAPI 7 ( { "EPREFIX": cross_prefix }, ) + emerge_cmd + ("dev-libs/C", ), ({ "EPREFIX": cross_prefix }, ) + portageq_cmd + ("has_version", cross_prefix, "dev-libs/C"), ({ "EPREFIX": cross_prefix }, ) + portageq_cmd + ("has_version", cross_prefix, "dev-libs/D"), ({ "ROOT": cross_root }, ) + emerge_cmd + ("dev-libs/D", ), portageq_cmd + ("has_version", cross_eroot, "dev-libs/D"), # EAPI 5 ( { "EPREFIX": cross_prefix }, ) + emerge_cmd + ("--usepkgonly", "dev-libs/A"), ({ "EPREFIX": cross_prefix }, ) + portageq_cmd + ("has_version", cross_prefix, "dev-libs/A"), ({ "EPREFIX": cross_prefix }, ) + portageq_cmd + ("has_version", cross_prefix, "dev-libs/B"), ({ "EPREFIX": cross_prefix }, ) + emerge_cmd + ("-C", "--quiet", "dev-libs/B"), ({ "EPREFIX": cross_prefix }, ) + emerge_cmd + ("-C", "--quiet", "dev-libs/A"), ({ "EPREFIX": cross_prefix }, ) + emerge_cmd + ("dev-libs/A", ), ({ "EPREFIX": cross_prefix }, ) + portageq_cmd + ("has_version", cross_prefix, "dev-libs/A"), ({ "EPREFIX": cross_prefix }, ) + portageq_cmd + ("has_version", cross_prefix, "dev-libs/B"), # Test ROOT support ( { "ROOT": cross_root }, ) + emerge_cmd + ("dev-libs/B", ), portageq_cmd + ("has_version", cross_eroot, "dev-libs/B"), ) # Test binhost support if FETCHCOMMAND is available. binrepos_conf_file = os.path.join(os.sep, eprefix, BINREPOS_CONF_FILE) with open(binrepos_conf_file, "wt") as f: f.write("[test-binhost]\n") f.write("sync-uri = {}\n".format(binhost_uri)) fetchcommand = portage.util.shlex_split( playground.settings["FETCHCOMMAND"]) fetch_bin = portage.process.find_binary(fetchcommand[0]) if fetch_bin is not None: test_commands = test_commands + ( lambda: os.rename(pkgdir, binhost_dir), emerge_cmd + ("-e", "--getbinpkgonly", "dev-libs/A"), lambda: shutil.rmtree(pkgdir), lambda: os.rename(binhost_dir, pkgdir), # Remove binrepos.conf and test PORTAGE_BINHOST. lambda: os.unlink(binrepos_conf_file), lambda: os.rename(pkgdir, binhost_dir), ({ "PORTAGE_BINHOST": binhost_uri }, ) + emerge_cmd + ("-fe", "--getbinpkgonly", "dev-libs/A"), lambda: shutil.rmtree(pkgdir), lambda: os.rename(binhost_dir, pkgdir), ) distdir = playground.distdir pkgdir = playground.pkgdir fake_bin = os.path.join(eprefix, "bin") portage_tmpdir = os.path.join(eprefix, "var", "tmp", "portage") profile_path = settings.profile_path user_config_dir = os.path.join(os.sep, eprefix, USER_CONFIG_PATH) path = os.environ.get("PATH") if path is not None and not path.strip(): path = None if path is None: path = "" else: path = ":" + path path = fake_bin + path pythonpath = os.environ.get("PYTHONPATH") if pythonpath is not None and not pythonpath.strip(): pythonpath = None if pythonpath is not None and pythonpath.split( ":")[0] == PORTAGE_PYM_PATH: pass else: if pythonpath is None: pythonpath = "" else: pythonpath = ":" + pythonpath pythonpath = PORTAGE_PYM_PATH + pythonpath env = { "PORTAGE_OVERRIDE_EPREFIX": eprefix, "CLEAN_DELAY": "0", "DISTDIR": distdir, "EMERGE_WARNING_DELAY": "0", "INFODIR": "", "INFOPATH": "", "PATH": path, "PKGDIR": pkgdir, "PORTAGE_INST_GID": str(portage.data.portage_gid), "PORTAGE_INST_UID": str(portage.data.portage_uid), "PORTAGE_PYTHON": portage_python, "PORTAGE_REPOSITORIES": settings.repositories.config_string(), "PORTAGE_TMPDIR": portage_tmpdir, "PORTAGE_LOGDIR": portage_tmpdir, "PYTHONDONTWRITEBYTECODE": os.environ.get("PYTHONDONTWRITEBYTECODE", ""), "PYTHONPATH": pythonpath, "__PORTAGE_TEST_PATH_OVERRIDE": fake_bin, } if "__PORTAGE_TEST_HARDLINK_LOCKS" in os.environ: env["__PORTAGE_TEST_HARDLINK_LOCKS"] = os.environ[ "__PORTAGE_TEST_HARDLINK_LOCKS"] updates_dir = os.path.join(test_repo_location, "profiles", "updates") dirs = [ cachedir, cachedir_pregen, cross_eroot, cross_prefix, distdir, fake_bin, portage_tmpdir, updates_dir, user_config_dir, var_cache_edb, ] etc_symlinks = ("dispatch-conf.conf", "etc-update.conf") # Override things that may be unavailable, or may have portability # issues when running tests in exotic environments. # prepstrip - bug #447810 (bash read builtin EINTR problem) true_symlinks = ["find", "prepstrip", "sed", "scanelf"] true_binary = find_binary("true") self.assertEqual(true_binary is None, False, "true command not found") try: for d in dirs: ensure_dirs(d) for x in true_symlinks: os.symlink(true_binary, os.path.join(fake_bin, x)) for x in etc_symlinks: os.symlink(os.path.join(self.cnf_etc_path, x), os.path.join(eprefix, "etc", x)) with open(os.path.join(var_cache_edb, "counter"), "wb") as f: f.write(b"100") # non-empty system set keeps --depclean quiet with open(os.path.join(profile_path, "packages"), "w") as f: f.write("*dev-libs/token-system-pkg") for cp, xml_data in metadata_xml_files: with open(os.path.join(test_repo_location, cp, "metadata.xml"), "w") as f: f.write(playground.metadata_xml_template % xml_data) with open(os.path.join(updates_dir, "1Q-2010"), "w") as f: f.write(""" slotmove =app-doc/pms-3 2 3 move dev-util/git dev-vcs/git """) if debug: # The subprocess inherits both stdout and stderr, for # debugging purposes. stdout = None else: # The subprocess inherits stderr so that any warnings # triggered by python -Wd will be visible. stdout = subprocess.PIPE for args in test_commands: if hasattr(args, "__call__"): args() continue if isinstance(args[0], dict): local_env = env.copy() local_env.update(args[0]) args = args[1:] else: local_env = env proc = await asyncio.create_subprocess_exec(*args, env=local_env, stderr=None, stdout=stdout) if debug: await proc.wait() else: output, _err = await proc.communicate() await proc.wait() if proc.returncode != os.EX_OK: portage.writemsg(output) self.assertEqual(os.EX_OK, proc.returncode, "emerge failed with args %s" % (args, )) finally: binhost_server.__exit__(None, None, None) playground.cleanup()
def fetch(myuris, mysettings, listonly=0, fetchonly=0, locks_in_subdir=".locks", use_locks=1, try_mirrors=1, digests=None, allow_missing_digests=True): "fetch files. Will use digest file if available." if not myuris: return 1 features = mysettings.features restrict = mysettings.get("PORTAGE_RESTRICT","").split() userfetch = secpass >= 2 and "userfetch" in features userpriv = secpass >= 2 and "userpriv" in features # 'nomirror' is bad/negative logic. You Restrict mirroring, not no-mirroring. restrict_mirror = "mirror" in restrict or "nomirror" in restrict if restrict_mirror: if ("mirror" in features) and ("lmirror" not in features): # lmirror should allow you to bypass mirror restrictions. # XXX: This is not a good thing, and is temporary at best. print(_(">>> \"mirror\" mode desired and \"mirror\" restriction found; skipping fetch.")) return 1 # Generally, downloading the same file repeatedly from # every single available mirror is a waste of bandwidth # and time, so there needs to be a cap. checksum_failure_max_tries = 5 v = checksum_failure_max_tries try: v = int(mysettings.get("PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS", checksum_failure_max_tries)) except (ValueError, OverflowError): writemsg(_("!!! Variable PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS" " contains non-integer value: '%s'\n") % \ mysettings["PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS"], noiselevel=-1) writemsg(_("!!! Using PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS " "default value: %s\n") % checksum_failure_max_tries, noiselevel=-1) v = checksum_failure_max_tries if v < 1: writemsg(_("!!! Variable PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS" " contains value less than 1: '%s'\n") % v, noiselevel=-1) writemsg(_("!!! Using PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS " "default value: %s\n") % checksum_failure_max_tries, noiselevel=-1) v = checksum_failure_max_tries checksum_failure_max_tries = v del v fetch_resume_size_default = "350K" fetch_resume_size = mysettings.get("PORTAGE_FETCH_RESUME_MIN_SIZE") if fetch_resume_size is not None: fetch_resume_size = "".join(fetch_resume_size.split()) if not fetch_resume_size: # If it's undefined or empty, silently use the default. fetch_resume_size = fetch_resume_size_default match = _fetch_resume_size_re.match(fetch_resume_size) if match is None or \ (match.group(2).upper() not in _size_suffix_map): writemsg(_("!!! Variable PORTAGE_FETCH_RESUME_MIN_SIZE" " contains an unrecognized format: '%s'\n") % \ mysettings["PORTAGE_FETCH_RESUME_MIN_SIZE"], noiselevel=-1) writemsg(_("!!! Using PORTAGE_FETCH_RESUME_MIN_SIZE " "default value: %s\n") % fetch_resume_size_default, noiselevel=-1) fetch_resume_size = None if fetch_resume_size is None: fetch_resume_size = fetch_resume_size_default match = _fetch_resume_size_re.match(fetch_resume_size) fetch_resume_size = int(match.group(1)) * \ 2 ** _size_suffix_map[match.group(2).upper()] # Behave like the package has RESTRICT="primaryuri" after a # couple of checksum failures, to increase the probablility # of success before checksum_failure_max_tries is reached. checksum_failure_primaryuri = 2 thirdpartymirrors = mysettings.thirdpartymirrors() # In the background parallel-fetch process, it's safe to skip checksum # verification of pre-existing files in $DISTDIR that have the correct # file size. The parent process will verify their checksums prior to # the unpack phase. parallel_fetchonly = "PORTAGE_PARALLEL_FETCHONLY" in mysettings if parallel_fetchonly: fetchonly = 1 check_config_instance(mysettings) custommirrors = grabdict(os.path.join(mysettings["PORTAGE_CONFIGROOT"], CUSTOM_MIRRORS_FILE), recursive=1) mymirrors=[] if listonly or ("distlocks" not in features): use_locks = 0 fetch_to_ro = 0 if "skiprocheck" in features: fetch_to_ro = 1 if not os.access(mysettings["DISTDIR"],os.W_OK) and fetch_to_ro: if use_locks: writemsg(colorize("BAD", _("!!! For fetching to a read-only filesystem, " "locking should be turned off.\n")), noiselevel=-1) writemsg(_("!!! This can be done by adding -distlocks to " "FEATURES in /etc/make.conf\n"), noiselevel=-1) # use_locks = 0 # local mirrors are always added if "local" in custommirrors: mymirrors += custommirrors["local"] if restrict_mirror: # We don't add any mirrors. pass else: if try_mirrors: mymirrors += [x.rstrip("/") for x in mysettings["GENTOO_MIRRORS"].split() if x] hash_filter = _hash_filter(mysettings.get("PORTAGE_CHECKSUM_FILTER", "")) if hash_filter.transparent: hash_filter = None skip_manifest = mysettings.get("EBUILD_SKIP_MANIFEST") == "1" if skip_manifest: allow_missing_digests = True pkgdir = mysettings.get("O") if digests is None and not (pkgdir is None or skip_manifest): mydigests = mysettings.repositories.get_repo_for_location( os.path.dirname(os.path.dirname(pkgdir))).load_manifest( pkgdir, mysettings["DISTDIR"]).getTypeDigests("DIST") elif digests is None or skip_manifest: # no digests because fetch was not called for a specific package mydigests = {} else: mydigests = digests ro_distdirs = [x for x in \ shlex_split(mysettings.get("PORTAGE_RO_DISTDIRS", "")) \ if os.path.isdir(x)] fsmirrors = [] for x in range(len(mymirrors)-1,-1,-1): if mymirrors[x] and mymirrors[x][0]=='/': fsmirrors += [mymirrors[x]] del mymirrors[x] restrict_fetch = "fetch" in restrict force_mirror = "force-mirror" in features and not restrict_mirror custom_local_mirrors = custommirrors.get("local", []) if restrict_fetch: # With fetch restriction, a normal uri may only be fetched from # custom local mirrors (if available). A mirror:// uri may also # be fetched from specific mirrors (effectively overriding fetch # restriction, but only for specific mirrors). locations = custom_local_mirrors else: locations = mymirrors file_uri_tuples = [] # Check for 'items' attribute since OrderedDict is not a dict. if hasattr(myuris, 'items'): for myfile, uri_set in myuris.items(): for myuri in uri_set: file_uri_tuples.append((myfile, myuri)) else: for myuri in myuris: file_uri_tuples.append((os.path.basename(myuri), myuri)) filedict = OrderedDict() primaryuri_dict = {} thirdpartymirror_uris = {} for myfile, myuri in file_uri_tuples: if myfile not in filedict: filedict[myfile]=[] for y in range(0,len(locations)): filedict[myfile].append(locations[y]+"/distfiles/"+myfile) if myuri[:9]=="mirror://": eidx = myuri.find("/", 9) if eidx != -1: mirrorname = myuri[9:eidx] path = myuri[eidx+1:] # Try user-defined mirrors first if mirrorname in custommirrors: for cmirr in custommirrors[mirrorname]: filedict[myfile].append( cmirr.rstrip("/") + "/" + path) # now try the official mirrors if mirrorname in thirdpartymirrors: uris = [locmirr.rstrip("/") + "/" + path \ for locmirr in thirdpartymirrors[mirrorname]] random.shuffle(uris) filedict[myfile].extend(uris) thirdpartymirror_uris.setdefault(myfile, []).extend(uris) if not filedict[myfile]: writemsg(_("No known mirror by the name: %s\n") % (mirrorname)) else: writemsg(_("Invalid mirror definition in SRC_URI:\n"), noiselevel=-1) writemsg(" %s\n" % (myuri), noiselevel=-1) else: if restrict_fetch or force_mirror: # Only fetch from specific mirrors is allowed. continue primaryuris = primaryuri_dict.get(myfile) if primaryuris is None: primaryuris = [] primaryuri_dict[myfile] = primaryuris primaryuris.append(myuri) # Order primaryuri_dict values to match that in SRC_URI. for uris in primaryuri_dict.values(): uris.reverse() # Prefer thirdpartymirrors over normal mirrors in cases when # the file does not yet exist on the normal mirrors. for myfile, uris in thirdpartymirror_uris.items(): primaryuri_dict.setdefault(myfile, []).extend(uris) # Now merge primaryuri values into filedict (includes mirrors # explicitly referenced in SRC_URI). if "primaryuri" in restrict: for myfile, uris in filedict.items(): filedict[myfile] = primaryuri_dict.get(myfile, []) + uris else: for myfile in filedict: filedict[myfile] += primaryuri_dict.get(myfile, []) can_fetch=True if listonly: can_fetch = False if can_fetch and not fetch_to_ro: global _userpriv_test_write_file_cache dirmode = 0o070 filemode = 0o60 modemask = 0o2 dir_gid = portage_gid if "FAKED_MODE" in mysettings: # When inside fakeroot, directories with portage's gid appear # to have root's gid. Therefore, use root's gid instead of # portage's gid to avoid spurrious permissions adjustments # when inside fakeroot. dir_gid = 0 distdir_dirs = [""] try: for x in distdir_dirs: mydir = os.path.join(mysettings["DISTDIR"], x) write_test_file = os.path.join( mydir, ".__portage_test_write__") try: st = os.stat(mydir) except OSError: st = None if st is not None and stat.S_ISDIR(st.st_mode): if not (userfetch or userpriv): continue if _userpriv_test_write_file(mysettings, write_test_file): continue _userpriv_test_write_file_cache.pop(write_test_file, None) if ensure_dirs(mydir, gid=dir_gid, mode=dirmode, mask=modemask): if st is None: # The directory has just been created # and therefore it must be empty. continue writemsg(_("Adjusting permissions recursively: '%s'\n") % mydir, noiselevel=-1) def onerror(e): raise # bail out on the first error that occurs during recursion if not apply_recursive_permissions(mydir, gid=dir_gid, dirmode=dirmode, dirmask=modemask, filemode=filemode, filemask=modemask, onerror=onerror): raise OperationNotPermitted( _("Failed to apply recursive permissions for the portage group.")) except PortageException as e: if not os.path.isdir(mysettings["DISTDIR"]): writemsg("!!! %s\n" % str(e), noiselevel=-1) writemsg(_("!!! Directory Not Found: DISTDIR='%s'\n") % mysettings["DISTDIR"], noiselevel=-1) writemsg(_("!!! Fetching will fail!\n"), noiselevel=-1) if can_fetch and \ not fetch_to_ro and \ not os.access(mysettings["DISTDIR"], os.W_OK): writemsg(_("!!! No write access to '%s'\n") % mysettings["DISTDIR"], noiselevel=-1) can_fetch = False distdir_writable = can_fetch and not fetch_to_ro failed_files = set() restrict_fetch_msg = False for myfile in filedict: """ fetched status 0 nonexistent 1 partially downloaded 2 completely downloaded """ fetched = 0 orig_digests = mydigests.get(myfile, {}) if not (allow_missing_digests or listonly): verifiable_hash_types = set(orig_digests).intersection(hashfunc_map) verifiable_hash_types.discard("size") if not verifiable_hash_types: expected = set(hashfunc_map) expected.discard("size") expected = " ".join(sorted(expected)) got = set(orig_digests) got.discard("size") got = " ".join(sorted(got)) reason = (_("Insufficient data for checksum verification"), got, expected) writemsg(_("!!! Fetched file: %s VERIFY FAILED!\n") % myfile, noiselevel=-1) writemsg(_("!!! Reason: %s\n") % reason[0], noiselevel=-1) writemsg(_("!!! Got: %s\n!!! Expected: %s\n") % \ (reason[1], reason[2]), noiselevel=-1) if fetchonly: failed_files.add(myfile) continue else: return 0 size = orig_digests.get("size") if size == 0: # Zero-byte distfiles are always invalid, so discard their digests. del mydigests[myfile] orig_digests.clear() size = None pruned_digests = orig_digests if parallel_fetchonly: pruned_digests = {} if size is not None: pruned_digests["size"] = size myfile_path = os.path.join(mysettings["DISTDIR"], myfile) has_space = True has_space_superuser = True file_lock = None if listonly: writemsg_stdout("\n", noiselevel=-1) else: # check if there is enough space in DISTDIR to completely store myfile # overestimate the filesize so we aren't bitten by FS overhead vfs_stat = None if size is not None and hasattr(os, "statvfs"): try: vfs_stat = os.statvfs(mysettings["DISTDIR"]) except OSError as e: writemsg_level("!!! statvfs('%s'): %s\n" % (mysettings["DISTDIR"], e), noiselevel=-1, level=logging.ERROR) del e if vfs_stat is not None: try: mysize = os.stat(myfile_path).st_size except OSError as e: if e.errno not in (errno.ENOENT, errno.ESTALE): raise del e mysize = 0 if (size - mysize + vfs_stat.f_bsize) >= \ (vfs_stat.f_bsize * vfs_stat.f_bavail): if (size - mysize + vfs_stat.f_bsize) >= \ (vfs_stat.f_bsize * vfs_stat.f_bfree): has_space_superuser = False if not has_space_superuser: has_space = False elif secpass < 2: has_space = False elif userfetch: has_space = False if distdir_writable and use_locks: lock_kwargs = {} if fetchonly: lock_kwargs["flags"] = os.O_NONBLOCK try: file_lock = lockfile(myfile_path, wantnewlockfile=1, **lock_kwargs) except TryAgain: writemsg(_(">>> File '%s' is already locked by " "another fetcher. Continuing...\n") % myfile, noiselevel=-1) continue try: if not listonly: eout = EOutput() eout.quiet = mysettings.get("PORTAGE_QUIET") == "1" match, mystat = _check_distfile( myfile_path, pruned_digests, eout, hash_filter=hash_filter) if match: # Skip permission adjustment for symlinks, since we don't # want to modify anything outside of the primary DISTDIR, # and symlinks typically point to PORTAGE_RO_DISTDIRS. if distdir_writable and not os.path.islink(myfile_path): try: apply_secpass_permissions(myfile_path, gid=portage_gid, mode=0o664, mask=0o2, stat_cached=mystat) except PortageException as e: if not os.access(myfile_path, os.R_OK): writemsg(_("!!! Failed to adjust permissions:" " %s\n") % str(e), noiselevel=-1) del e continue if distdir_writable and mystat is None: # Remove broken symlinks if necessary. try: os.unlink(myfile_path) except OSError: pass if mystat is not None: if stat.S_ISDIR(mystat.st_mode): writemsg_level( _("!!! Unable to fetch file since " "a directory is in the way: \n" "!!! %s\n") % myfile_path, level=logging.ERROR, noiselevel=-1) return 0 if mystat.st_size == 0: if distdir_writable: try: os.unlink(myfile_path) except OSError: pass elif distdir_writable: if mystat.st_size < fetch_resume_size and \ mystat.st_size < size: # If the file already exists and the size does not # match the existing digests, it may be that the # user is attempting to update the digest. In this # case, the digestgen() function will advise the # user to use `ebuild --force foo.ebuild manifest` # in order to force the old digests to be replaced. # Since the user may want to keep this file, rename # it instead of deleting it. writemsg(_(">>> Renaming distfile with size " "%d (smaller than " "PORTAGE_FETCH_RESU" "ME_MIN_SIZE)\n") % mystat.st_size) temp_filename = \ _checksum_failure_temp_file( mysettings["DISTDIR"], myfile) writemsg_stdout(_("Refetching... " "File renamed to '%s'\n\n") % \ temp_filename, noiselevel=-1) elif mystat.st_size >= size: temp_filename = \ _checksum_failure_temp_file( mysettings["DISTDIR"], myfile) writemsg_stdout(_("Refetching... " "File renamed to '%s'\n\n") % \ temp_filename, noiselevel=-1) if distdir_writable and ro_distdirs: readonly_file = None for x in ro_distdirs: filename = os.path.join(x, myfile) match, mystat = _check_distfile( filename, pruned_digests, eout, hash_filter=hash_filter) if match: readonly_file = filename break if readonly_file is not None: try: os.unlink(myfile_path) except OSError as e: if e.errno not in (errno.ENOENT, errno.ESTALE): raise del e os.symlink(readonly_file, myfile_path) continue # this message is shown only after we know that # the file is not already fetched if not has_space: writemsg(_("!!! Insufficient space to store %s in %s\n") % \ (myfile, mysettings["DISTDIR"]), noiselevel=-1) if has_space_superuser: writemsg(_("!!! Insufficient privileges to use " "remaining space.\n"), noiselevel=-1) if userfetch: writemsg(_("!!! You may set FEATURES=\"-userfetch\"" " in /etc/make.conf in order to fetch with\n" "!!! superuser privileges.\n"), noiselevel=-1) if fsmirrors and not os.path.exists(myfile_path) and has_space: for mydir in fsmirrors: mirror_file = os.path.join(mydir, myfile) try: shutil.copyfile(mirror_file, myfile_path) writemsg(_("Local mirror has file: %s\n") % myfile) break except (IOError, OSError) as e: if e.errno not in (errno.ENOENT, errno.ESTALE): raise del e try: mystat = os.stat(myfile_path) except OSError as e: if e.errno not in (errno.ENOENT, errno.ESTALE): raise del e else: # Skip permission adjustment for symlinks, since we don't # want to modify anything outside of the primary DISTDIR, # and symlinks typically point to PORTAGE_RO_DISTDIRS. if not os.path.islink(myfile_path): try: apply_secpass_permissions(myfile_path, gid=portage_gid, mode=0o664, mask=0o2, stat_cached=mystat) except PortageException as e: if not os.access(myfile_path, os.R_OK): writemsg(_("!!! Failed to adjust permissions:" " %s\n") % (e,), noiselevel=-1) # If the file is empty then it's obviously invalid. Remove # the empty file and try to download if possible. if mystat.st_size == 0: if distdir_writable: try: os.unlink(myfile_path) except EnvironmentError: pass elif myfile not in mydigests: # We don't have a digest, but the file exists. We must # assume that it is fully downloaded. continue else: if mystat.st_size < mydigests[myfile]["size"] and \ not restrict_fetch: fetched = 1 # Try to resume this download. elif parallel_fetchonly and \ mystat.st_size == mydigests[myfile]["size"]: eout = EOutput() eout.quiet = \ mysettings.get("PORTAGE_QUIET") == "1" eout.ebegin( "%s size ;-)" % (myfile, )) eout.eend(0) continue else: digests = _filter_unaccelarated_hashes(mydigests[myfile]) if hash_filter is not None: digests = _apply_hash_filter(digests, hash_filter) verified_ok, reason = verify_all(myfile_path, digests) if not verified_ok: writemsg(_("!!! Previously fetched" " file: '%s'\n") % myfile, noiselevel=-1) writemsg(_("!!! Reason: %s\n") % reason[0], noiselevel=-1) writemsg(_("!!! Got: %s\n" "!!! Expected: %s\n") % \ (reason[1], reason[2]), noiselevel=-1) if reason[0] == _("Insufficient data for checksum verification"): return 0 if distdir_writable: temp_filename = \ _checksum_failure_temp_file( mysettings["DISTDIR"], myfile) writemsg_stdout(_("Refetching... " "File renamed to '%s'\n\n") % \ temp_filename, noiselevel=-1) else: eout = EOutput() eout.quiet = \ mysettings.get("PORTAGE_QUIET", None) == "1" if digests: digests = list(digests) digests.sort() eout.ebegin( "%s %s ;-)" % (myfile, " ".join(digests))) eout.eend(0) continue # fetch any remaining files # Create a reversed list since that is optimal for list.pop(). uri_list = filedict[myfile][:] uri_list.reverse() checksum_failure_count = 0 tried_locations = set() while uri_list: loc = uri_list.pop() # Eliminate duplicates here in case we've switched to # "primaryuri" mode on the fly due to a checksum failure. if loc in tried_locations: continue tried_locations.add(loc) if listonly: writemsg_stdout(loc+" ", noiselevel=-1) continue # allow different fetchcommands per protocol protocol = loc[0:loc.find("://")] global_config_path = GLOBAL_CONFIG_PATH if mysettings['EPREFIX']: global_config_path = os.path.join(mysettings['EPREFIX'], GLOBAL_CONFIG_PATH.lstrip(os.sep)) missing_file_param = False fetchcommand_var = "FETCHCOMMAND_" + protocol.upper() fetchcommand = mysettings.get(fetchcommand_var) if fetchcommand is None: fetchcommand_var = "FETCHCOMMAND" fetchcommand = mysettings.get(fetchcommand_var) if fetchcommand is None: writemsg_level( _("!!! %s is unset. It should " "have been defined in\n!!! %s/make.globals.\n") \ % (fetchcommand_var, global_config_path), level=logging.ERROR, noiselevel=-1) return 0 if "${FILE}" not in fetchcommand: writemsg_level( _("!!! %s does not contain the required ${FILE}" " parameter.\n") % fetchcommand_var, level=logging.ERROR, noiselevel=-1) missing_file_param = True resumecommand_var = "RESUMECOMMAND_" + protocol.upper() resumecommand = mysettings.get(resumecommand_var) if resumecommand is None: resumecommand_var = "RESUMECOMMAND" resumecommand = mysettings.get(resumecommand_var) if resumecommand is None: writemsg_level( _("!!! %s is unset. It should " "have been defined in\n!!! %s/make.globals.\n") \ % (resumecommand_var, global_config_path), level=logging.ERROR, noiselevel=-1) return 0 if "${FILE}" not in resumecommand: writemsg_level( _("!!! %s does not contain the required ${FILE}" " parameter.\n") % resumecommand_var, level=logging.ERROR, noiselevel=-1) missing_file_param = True if missing_file_param: writemsg_level( _("!!! Refer to the make.conf(5) man page for " "information about how to\n!!! correctly specify " "FETCHCOMMAND and RESUMECOMMAND.\n"), level=logging.ERROR, noiselevel=-1) if myfile != os.path.basename(loc): return 0 if not can_fetch: if fetched != 2: try: mysize = os.stat(myfile_path).st_size except OSError as e: if e.errno not in (errno.ENOENT, errno.ESTALE): raise del e mysize = 0 if mysize == 0: writemsg(_("!!! File %s isn't fetched but unable to get it.\n") % myfile, noiselevel=-1) elif size is None or size > mysize: writemsg(_("!!! File %s isn't fully fetched, but unable to complete it\n") % myfile, noiselevel=-1) else: writemsg(_("!!! File %s is incorrect size, " "but unable to retry.\n") % myfile, noiselevel=-1) return 0 else: continue if fetched != 2 and has_space: #we either need to resume or start the download if fetched == 1: try: mystat = os.stat(myfile_path) except OSError as e: if e.errno not in (errno.ENOENT, errno.ESTALE): raise del e fetched = 0 else: if mystat.st_size < fetch_resume_size: writemsg(_(">>> Deleting distfile with size " "%d (smaller than " "PORTAGE_FETCH_RESU" "ME_MIN_SIZE)\n") % mystat.st_size) try: os.unlink(myfile_path) except OSError as e: if e.errno not in \ (errno.ENOENT, errno.ESTALE): raise del e fetched = 0 if fetched == 1: #resume mode: writemsg(_(">>> Resuming download...\n")) locfetch=resumecommand command_var = resumecommand_var else: #normal mode: locfetch=fetchcommand command_var = fetchcommand_var writemsg_stdout(_(">>> Downloading '%s'\n") % \ _hide_url_passwd(loc)) variables = { "DISTDIR": mysettings["DISTDIR"], "URI": loc, "FILE": myfile } myfetch = shlex_split(locfetch) myfetch = [varexpand(x, mydict=variables) for x in myfetch] myret = -1 try: myret = _spawn_fetch(mysettings, myfetch) finally: try: apply_secpass_permissions(myfile_path, gid=portage_gid, mode=0o664, mask=0o2) except FileNotFound: pass except PortageException as e: if not os.access(myfile_path, os.R_OK): writemsg(_("!!! Failed to adjust permissions:" " %s\n") % str(e), noiselevel=-1) del e # If the file is empty then it's obviously invalid. Don't # trust the return value from the fetcher. Remove the # empty file and try to download again. try: if os.stat(myfile_path).st_size == 0: os.unlink(myfile_path) fetched = 0 continue except EnvironmentError: pass if mydigests is not None and myfile in mydigests: try: mystat = os.stat(myfile_path) except OSError as e: if e.errno not in (errno.ENOENT, errno.ESTALE): raise del e fetched = 0 else: if stat.S_ISDIR(mystat.st_mode): # This can happen if FETCHCOMMAND erroneously # contains wget's -P option where it should # instead have -O. writemsg_level( _("!!! The command specified in the " "%s variable appears to have\n!!! " "created a directory instead of a " "normal file.\n") % command_var, level=logging.ERROR, noiselevel=-1) writemsg_level( _("!!! Refer to the make.conf(5) " "man page for information about how " "to\n!!! correctly specify " "FETCHCOMMAND and RESUMECOMMAND.\n"), level=logging.ERROR, noiselevel=-1) return 0 # no exception? file exists. let digestcheck() report # an appropriately for size or checksum errors # If the fetcher reported success and the file is # too small, it's probably because the digest is # bad (upstream changed the distfile). In this # case we don't want to attempt to resume. Show a # digest verification failure to that the user gets # a clue about what just happened. if myret != os.EX_OK and \ mystat.st_size < mydigests[myfile]["size"]: # Fetch failed... Try the next one... Kill 404 files though. if (mystat[stat.ST_SIZE]<100000) and (len(myfile)>4) and not ((myfile[-5:]==".html") or (myfile[-4:]==".htm")): html404=re.compile("<title>.*(not found|404).*</title>",re.I|re.M) with io.open( _unicode_encode(myfile_path, encoding=_encodings['fs'], errors='strict'), mode='r', encoding=_encodings['content'], errors='replace' ) as f: if html404.search(f.read()): try: os.unlink(mysettings["DISTDIR"]+"/"+myfile) writemsg(_(">>> Deleting invalid distfile. (Improper 404 redirect from server.)\n")) fetched = 0 continue except (IOError, OSError): pass fetched = 1 continue if True: # File is the correct size--check the checksums for the fetched # file NOW, for those users who don't have a stable/continuous # net connection. This way we have a chance to try to download # from another mirror... digests = _filter_unaccelarated_hashes(mydigests[myfile]) if hash_filter is not None: digests = _apply_hash_filter(digests, hash_filter) verified_ok, reason = verify_all(myfile_path, digests) if not verified_ok: writemsg(_("!!! Fetched file: %s VERIFY FAILED!\n") % myfile, noiselevel=-1) writemsg(_("!!! Reason: %s\n") % reason[0], noiselevel=-1) writemsg(_("!!! Got: %s\n!!! Expected: %s\n") % \ (reason[1], reason[2]), noiselevel=-1) if reason[0] == _("Insufficient data for checksum verification"): return 0 temp_filename = \ _checksum_failure_temp_file( mysettings["DISTDIR"], myfile) writemsg_stdout(_("Refetching... " "File renamed to '%s'\n\n") % \ temp_filename, noiselevel=-1) fetched=0 checksum_failure_count += 1 if checksum_failure_count == \ checksum_failure_primaryuri: # Switch to "primaryuri" mode in order # to increase the probablility of # of success. primaryuris = \ primaryuri_dict.get(myfile) if primaryuris: uri_list.extend( reversed(primaryuris)) if checksum_failure_count >= \ checksum_failure_max_tries: break else: eout = EOutput() eout.quiet = mysettings.get("PORTAGE_QUIET", None) == "1" if digests: eout.ebegin("%s %s ;-)" % \ (myfile, " ".join(sorted(digests)))) eout.eend(0) fetched=2 break else: if not myret: fetched=2 break elif mydigests!=None: writemsg(_("No digest file available and download failed.\n\n"), noiselevel=-1) finally: if use_locks and file_lock: unlockfile(file_lock) file_lock = None if listonly: writemsg_stdout("\n", noiselevel=-1) if fetched != 2: if restrict_fetch and not restrict_fetch_msg: restrict_fetch_msg = True msg = _("\n!!! %s/%s" " has fetch restriction turned on.\n" "!!! This probably means that this " "ebuild's files must be downloaded\n" "!!! manually. See the comments in" " the ebuild for more information.\n\n") % \ (mysettings["CATEGORY"], mysettings["PF"]) writemsg_level(msg, level=logging.ERROR, noiselevel=-1) elif restrict_fetch: pass elif listonly: pass elif not filedict[myfile]: writemsg(_("Warning: No mirrors available for file" " '%s'\n") % (myfile), noiselevel=-1) else: writemsg(_("!!! Couldn't download '%s'. Aborting.\n") % myfile, noiselevel=-1) if listonly: failed_files.add(myfile) continue elif fetchonly: failed_files.add(myfile) continue return 0 if failed_files: return 0 return 1
def testBlockerFileCollision(self): debug = False install_something = """ S="${WORKDIR}" src_install() { einfo "installing something..." insinto /usr/lib echo "${PN}" > "${T}/file-collision" doins "${T}/file-collision" } """ ebuilds = { "dev-libs/A-1": { "EAPI": "6", "MISC_CONTENT": install_something, "RDEPEND": "!dev-libs/B", }, "dev-libs/B-1": { "EAPI": "6", "MISC_CONTENT": install_something, "RDEPEND": "!dev-libs/A", }, } playground = ResolverPlayground(ebuilds=ebuilds, debug=debug) settings = playground.settings eprefix = settings["EPREFIX"] eroot = settings["EROOT"] var_cache_edb = os.path.join(eprefix, "var", "cache", "edb") user_config_dir = os.path.join(eprefix, USER_CONFIG_PATH) portage_python = portage._python_interpreter emerge_cmd = (portage_python, "-b", "-Wd", os.path.join(self.bindir, "emerge")) file_collision = os.path.join(eroot, "usr/lib/file-collision") test_commands = ( emerge_cmd + ( "--oneshot", "dev-libs/A", ), (lambda: portage.util.grablines(file_collision) == ["A\n"],), emerge_cmd + ( "--oneshot", "dev-libs/B", ), (lambda: portage.util.grablines(file_collision) == ["B\n"],), emerge_cmd + ( "--oneshot", "dev-libs/A", ), (lambda: portage.util.grablines(file_collision) == ["A\n"],), ({"FEATURES": "parallel-install"},) + emerge_cmd + ( "--oneshot", "dev-libs/B", ), (lambda: portage.util.grablines(file_collision) == ["B\n"],), ({"FEATURES": "parallel-install"},) + emerge_cmd + ( "-Cq", "dev-libs/B", ), (lambda: not os.path.exists(file_collision),), ) fake_bin = os.path.join(eprefix, "bin") portage_tmpdir = os.path.join(eprefix, "var", "tmp", "portage") profile_path = settings.profile_path path = os.environ.get("PATH") if path is not None and not path.strip(): path = None if path is None: path = "" else: path = ":" + path path = fake_bin + path pythonpath = os.environ.get("PYTHONPATH") if pythonpath is not None and not pythonpath.strip(): pythonpath = None if pythonpath is not None and pythonpath.split(":")[0] == PORTAGE_PYM_PATH: pass else: if pythonpath is None: pythonpath = "" else: pythonpath = ":" + pythonpath pythonpath = PORTAGE_PYM_PATH + pythonpath env = { "PORTAGE_OVERRIDE_EPREFIX": eprefix, "PATH": path, "PORTAGE_PYTHON": portage_python, "PORTAGE_REPOSITORIES": settings.repositories.config_string(), "PYTHONDONTWRITEBYTECODE": os.environ.get("PYTHONDONTWRITEBYTECODE", ""), "PYTHONPATH": pythonpath, } if "__PORTAGE_TEST_HARDLINK_LOCKS" in os.environ: env["__PORTAGE_TEST_HARDLINK_LOCKS"] = os.environ[ "__PORTAGE_TEST_HARDLINK_LOCKS" ] dirs = [ playground.distdir, fake_bin, portage_tmpdir, user_config_dir, var_cache_edb, ] true_symlinks = ["chown", "chgrp"] true_binary = find_binary("true") self.assertEqual(true_binary is None, False, "true command not found") try: for d in dirs: ensure_dirs(d) for x in true_symlinks: os.symlink(true_binary, os.path.join(fake_bin, x)) with open(os.path.join(var_cache_edb, "counter"), "wb") as f: f.write(b"100") # non-empty system set keeps --depclean quiet with open(os.path.join(profile_path, "packages"), "w") as f: f.write("*dev-libs/token-system-pkg") if debug: # The subprocess inherits both stdout and stderr, for # debugging purposes. stdout = None else: # The subprocess inherits stderr so that any warnings # triggered by python -Wd will be visible. stdout = subprocess.PIPE for i, args in enumerate(test_commands): if hasattr(args[0], "__call__"): self.assertTrue(args[0](), "callable at index %s failed" % (i,)) continue if isinstance(args[0], dict): local_env = env.copy() local_env.update(args[0]) args = args[1:] else: local_env = env proc = subprocess.Popen(args, env=local_env, stdout=stdout) if debug: proc.wait() else: output = proc.stdout.readlines() proc.wait() proc.stdout.close() if proc.returncode != os.EX_OK: for line in output: sys.stderr.write(_unicode_decode(line)) self.assertEqual( os.EX_OK, proc.returncode, "emerge failed with args %s" % (args,) ) finally: playground.debug = False playground.cleanup()
def _create_profile(self, ebuilds, installed, profile, repo_configs, user_config, sets): user_config_dir = os.path.join(self.eroot, USER_CONFIG_PATH) try: os.makedirs(user_config_dir) except os.error: pass for repo in self._repositories: if repo == "DEFAULT": continue repo_dir = self._get_repo_dir(repo) profile_dir = os.path.join(repo_dir, "profiles") metadata_dir = os.path.join(repo_dir, "metadata") os.makedirs(metadata_dir) #Create $REPO/profiles/categories categories = set() for cpv in ebuilds: ebuilds_repo = Atom("="+cpv, allow_repo=True).repo if ebuilds_repo is None: ebuilds_repo = "test_repo" if ebuilds_repo == repo: categories.add(catsplit(cpv)[0]) categories_file = os.path.join(profile_dir, "categories") f = open(categories_file, "w") for cat in categories: f.write(cat + "\n") f.close() #Create $REPO/profiles/license_groups license_file = os.path.join(profile_dir, "license_groups") f = open(license_file, "w") f.write("EULA TEST\n") f.close() repo_config = repo_configs.get(repo) if repo_config: for config_file, lines in repo_config.items(): if config_file not in self.config_files and not any(fnmatch.fnmatch(config_file, os.path.join(x, "*")) for x in self.config_files): raise ValueError("Unknown config file: '%s'" % config_file) if config_file in ("layout.conf",): file_name = os.path.join(repo_dir, "metadata", config_file) else: file_name = os.path.join(profile_dir, config_file) if "/" in config_file and not os.path.isdir(os.path.dirname(file_name)): os.makedirs(os.path.dirname(file_name)) f = open(file_name, "w") for line in lines: f.write("%s\n" % line) f.close() #Create $profile_dir/eclass (we fail to digest the ebuilds if it's not there) os.makedirs(os.path.join(repo_dir, "eclass")) if repo == "test_repo": #Create a minimal profile in /usr/portage sub_profile_dir = os.path.join(profile_dir, "default", "linux", "x86", "test_profile") os.makedirs(sub_profile_dir) if not (profile and "eapi" in profile): eapi_file = os.path.join(sub_profile_dir, "eapi") f = open(eapi_file, "w") f.write("0\n") f.close() make_defaults_file = os.path.join(sub_profile_dir, "make.defaults") f = open(make_defaults_file, "w") f.write("ARCH=\"x86\"\n") f.write("ACCEPT_KEYWORDS=\"x86\"\n") f.close() use_force_file = os.path.join(sub_profile_dir, "use.force") f = open(use_force_file, "w") f.write("x86\n") f.close() parent_file = os.path.join(sub_profile_dir, "parent") f = open(parent_file, "w") f.write("..\n") f.close() if profile: for config_file, lines in profile.items(): if config_file not in self.config_files: raise ValueError("Unknown config file: '%s'" % config_file) file_name = os.path.join(sub_profile_dir, config_file) f = open(file_name, "w") for line in lines: f.write("%s\n" % line) f.close() #Create profile symlink os.symlink(sub_profile_dir, os.path.join(user_config_dir, "make.profile")) #Create minimal herds.xml herds_xml = """<?xml version="1.0" encoding="UTF-8"?> <!DOCTYPE herds SYSTEM "http://www.gentoo.org/dtd/herds.dtd"> <?xml-stylesheet href="/xsl/herds.xsl" type="text/xsl" ?> <?xml-stylesheet href="/xsl/guide.xsl" type="text/xsl" ?> <herds> <herd> <name>base-system</name> <email>[email protected]</email> <description>Core system utilities and libraries.</description> <maintainer> <email>[email protected]</email> <name>Base System</name> <role>Base System Maintainer</role> </maintainer> </herd> </herds> """ with open(os.path.join(metadata_dir, "metadata.xml"), 'w') as f: f.write(herds_xml) make_conf = { "ACCEPT_KEYWORDS": "x86", "CLEAN_DELAY": "0", "DISTDIR" : self.distdir, "EMERGE_WARNING_DELAY": "0", "PKGDIR": self.pkgdir, "PORTAGE_INST_GID": str(portage.data.portage_gid), "PORTAGE_INST_UID": str(portage.data.portage_uid), "PORTAGE_TMPDIR": os.path.join(self.eroot, 'var/tmp'), } if os.environ.get("NOCOLOR"): make_conf["NOCOLOR"] = os.environ["NOCOLOR"] # Pass along PORTAGE_USERNAME and PORTAGE_GRPNAME since they # need to be inherited by ebuild subprocesses. if 'PORTAGE_USERNAME' in os.environ: make_conf['PORTAGE_USERNAME'] = os.environ['PORTAGE_USERNAME'] if 'PORTAGE_GRPNAME' in os.environ: make_conf['PORTAGE_GRPNAME'] = os.environ['PORTAGE_GRPNAME'] make_conf_lines = [] for k_v in make_conf.items(): make_conf_lines.append('%s="%s"' % k_v) if "make.conf" in user_config: make_conf_lines.extend(user_config["make.conf"]) if not portage.process.sandbox_capable or \ os.environ.get("SANDBOX_ON") == "1": # avoid problems from nested sandbox instances make_conf_lines.append('FEATURES="${FEATURES} -sandbox -usersandbox"') configs = user_config.copy() configs["make.conf"] = make_conf_lines for config_file, lines in configs.items(): if config_file not in self.config_files: raise ValueError("Unknown config file: '%s'" % config_file) file_name = os.path.join(user_config_dir, config_file) f = open(file_name, "w") for line in lines: f.write("%s\n" % line) f.close() #Create /usr/share/portage/config/make.globals make_globals_path = os.path.join(self.eroot, GLOBAL_CONFIG_PATH.lstrip(os.sep), "make.globals") ensure_dirs(os.path.dirname(make_globals_path)) os.symlink(os.path.join(PORTAGE_BASE_PATH, "cnf", "make.globals"), make_globals_path) #Create /usr/share/portage/config/sets/portage.conf default_sets_conf_dir = os.path.join(self.eroot, "usr/share/portage/config/sets") try: os.makedirs(default_sets_conf_dir) except os.error: pass provided_sets_portage_conf = \ os.path.join(PORTAGE_BASE_PATH, "cnf/sets/portage.conf") os.symlink(provided_sets_portage_conf, os.path.join(default_sets_conf_dir, "portage.conf")) set_config_dir = os.path.join(user_config_dir, "sets") try: os.makedirs(set_config_dir) except os.error: pass for sets_file, lines in sets.items(): file_name = os.path.join(set_config_dir, sets_file) f = open(file_name, "w") for line in lines: f.write("%s\n" % line) f.close()
def testConfigProtect(self): """ Demonstrates many different scenarios. For example: * regular file replaces regular file * regular file replaces symlink * regular file replaces directory * symlink replaces symlink * symlink replaces regular file * symlink replaces directory * directory replaces regular file * directory replaces symlink """ debug = False content_A_1 = """ S="${WORKDIR}" src_install() { insinto /etc/A keepdir /etc/A/dir_a keepdir /etc/A/symlink_replaces_dir keepdir /etc/A/regular_replaces_dir echo regular_a_1 > "${T}"/regular_a doins "${T}"/regular_a echo regular_b_1 > "${T}"/regular_b doins "${T}"/regular_b dosym regular_a /etc/A/regular_replaces_symlink dosym regular_b /etc/A/symlink_replaces_symlink echo regular_replaces_regular_1 > \ "${T}"/regular_replaces_regular doins "${T}"/regular_replaces_regular echo symlink_replaces_regular > \ "${T}"/symlink_replaces_regular doins "${T}"/symlink_replaces_regular } """ content_A_2 = """ S="${WORKDIR}" src_install() { insinto /etc/A keepdir /etc/A/dir_a dosym dir_a /etc/A/symlink_replaces_dir echo regular_replaces_dir > "${T}"/regular_replaces_dir doins "${T}"/regular_replaces_dir echo regular_a_2 > "${T}"/regular_a doins "${T}"/regular_a echo regular_b_2 > "${T}"/regular_b doins "${T}"/regular_b echo regular_replaces_symlink > \ "${T}"/regular_replaces_symlink doins "${T}"/regular_replaces_symlink dosym regular_b /etc/A/symlink_replaces_symlink echo regular_replaces_regular_2 > \ "${T}"/regular_replaces_regular doins "${T}"/regular_replaces_regular dosym regular_a /etc/A/symlink_replaces_regular } """ ebuilds = { "dev-libs/A-1": { "EAPI" : "5", "IUSE" : "+flag", "KEYWORDS": "x86", "LICENSE": "GPL-2", "MISC_CONTENT": content_A_1, }, "dev-libs/A-2": { "EAPI" : "5", "IUSE" : "+flag", "KEYWORDS": "x86", "LICENSE": "GPL-2", "MISC_CONTENT": content_A_2, }, } playground = ResolverPlayground( ebuilds=ebuilds, debug=debug) settings = playground.settings eprefix = settings["EPREFIX"] eroot = settings["EROOT"] var_cache_edb = os.path.join(eprefix, "var", "cache", "edb") portage_python = portage._python_interpreter dispatch_conf_cmd = (portage_python, "-b", "-Wd", os.path.join(self.sbindir, "dispatch-conf")) emerge_cmd = (portage_python, "-b", "-Wd", os.path.join(self.bindir, "emerge")) etc_update_cmd = (BASH_BINARY, os.path.join(self.sbindir, "etc-update")) etc_update_auto = etc_update_cmd + ("--automode", "-5",) config_protect = "/etc" def modify_files(dir_path): for name in os.listdir(dir_path): path = os.path.join(dir_path, name) st = os.lstat(path) if stat.S_ISREG(st.st_mode): with io.open(path, mode='a', encoding=_encodings["stdio"]) as f: f.write("modified at %d\n" % time.time()) elif stat.S_ISLNK(st.st_mode): old_dest = os.readlink(path) os.unlink(path) os.symlink(old_dest + " modified at %d" % time.time(), path) def updated_config_files(count): self.assertEqual(count, sum(len(x[1]) for x in find_updated_config_files(eroot, shlex_split(config_protect)))) test_commands = ( etc_update_cmd, dispatch_conf_cmd, emerge_cmd + ("-1", "=dev-libs/A-1"), partial(updated_config_files, 0), emerge_cmd + ("-1", "=dev-libs/A-2"), partial(updated_config_files, 2), etc_update_auto, partial(updated_config_files, 0), emerge_cmd + ("-1", "=dev-libs/A-2"), partial(updated_config_files, 0), # Test bug #523684, where a file renamed or removed by the # admin forces replacement files to be merged with config # protection. partial(shutil.rmtree, os.path.join(eprefix, "etc", "A")), emerge_cmd + ("-1", "=dev-libs/A-2"), partial(updated_config_files, 8), etc_update_auto, partial(updated_config_files, 0), # Modify some config files, and verify that it triggers # config protection. partial(modify_files, os.path.join(eroot, "etc", "A")), emerge_cmd + ("-1", "=dev-libs/A-2"), partial(updated_config_files, 6), etc_update_auto, partial(updated_config_files, 0), # Modify some config files, downgrade to A-1, and verify # that config protection works properly when the file # types are changing. partial(modify_files, os.path.join(eroot, "etc", "A")), emerge_cmd + ("-1", "--noconfmem", "=dev-libs/A-1"), partial(updated_config_files, 6), etc_update_auto, partial(updated_config_files, 0), ) distdir = playground.distdir fake_bin = os.path.join(eprefix, "bin") portage_tmpdir = os.path.join(eprefix, "var", "tmp", "portage") path = os.environ.get("PATH") if path is not None and not path.strip(): path = None if path is None: path = "" else: path = ":" + path path = fake_bin + path pythonpath = os.environ.get("PYTHONPATH") if pythonpath is not None and not pythonpath.strip(): pythonpath = None if pythonpath is not None and \ pythonpath.split(":")[0] == PORTAGE_PYM_PATH: pass else: if pythonpath is None: pythonpath = "" else: pythonpath = ":" + pythonpath pythonpath = PORTAGE_PYM_PATH + pythonpath env = { "PORTAGE_OVERRIDE_EPREFIX" : eprefix, "CLEAN_DELAY" : "0", "CONFIG_PROTECT": config_protect, "DISTDIR" : distdir, "EMERGE_DEFAULT_OPTS": "-v", "EMERGE_WARNING_DELAY" : "0", "INFODIR" : "", "INFOPATH" : "", "PATH" : path, "PORTAGE_INST_GID" : str(portage.data.portage_gid), "PORTAGE_INST_UID" : str(portage.data.portage_uid), "PORTAGE_PYTHON" : portage_python, "PORTAGE_REPOSITORIES" : settings.repositories.config_string(), "PORTAGE_TMPDIR" : portage_tmpdir, "PYTHONPATH" : pythonpath, "__PORTAGE_TEST_PATH_OVERRIDE" : fake_bin, } if "__PORTAGE_TEST_HARDLINK_LOCKS" in os.environ: env["__PORTAGE_TEST_HARDLINK_LOCKS"] = \ os.environ["__PORTAGE_TEST_HARDLINK_LOCKS"] dirs = [distdir, fake_bin, portage_tmpdir, var_cache_edb] etc_symlinks = ("dispatch-conf.conf", "etc-update.conf") # Override things that may be unavailable, or may have portability # issues when running tests in exotic environments. # prepstrip - bug #447810 (bash read builtin EINTR problem) true_symlinks = ["prepstrip", "scanelf"] true_binary = find_binary("true") self.assertEqual(true_binary is None, False, "true command not found") try: for d in dirs: ensure_dirs(d) for x in true_symlinks: os.symlink(true_binary, os.path.join(fake_bin, x)) for x in etc_symlinks: os.symlink(os.path.join(self.cnf_etc_path, x), os.path.join(eprefix, "etc", x)) with open(os.path.join(var_cache_edb, "counter"), 'wb') as f: f.write(b"100") if debug: # The subprocess inherits both stdout and stderr, for # debugging purposes. stdout = None else: # The subprocess inherits stderr so that any warnings # triggered by python -Wd will be visible. stdout = subprocess.PIPE for args in test_commands: if hasattr(args, '__call__'): args() continue if isinstance(args[0], dict): local_env = env.copy() local_env.update(args[0]) args = args[1:] else: local_env = env proc = subprocess.Popen(args, env=local_env, stdout=stdout) if debug: proc.wait() else: output = proc.stdout.readlines() proc.wait() proc.stdout.close() if proc.returncode != os.EX_OK: for line in output: sys.stderr.write(_unicode_decode(line)) self.assertEqual(os.EX_OK, proc.returncode, "emerge failed with args %s" % (args,)) finally: playground.cleanup()
def testSimple(self): debug = False install_something = """ S="${WORKDIR}" pkg_pretend() { einfo "called pkg_pretend for $CATEGORY/$PF" } src_install() { einfo "installing something..." insinto /usr/lib/${P} echo "blah blah blah" > "${T}"/regular-file doins "${T}"/regular-file dosym regular-file /usr/lib/${P}/symlink || die # Test CONFIG_PROTECT insinto /etc newins "${T}"/regular-file ${PN}-${SLOT%/*} # Test code for bug #381629, using a copyright symbol encoded with latin-1. # We use $(printf "\\xa9") rather than $'\\xa9', since printf apparently # works in any case, while $'\\xa9' transforms to \\xef\\xbf\\xbd under # some conditions. TODO: Find out why it transforms to \\xef\\xbf\\xbd when # running tests for Python 3.2 (even though it's bash that is ultimately # responsible for performing the transformation). local latin_1_dir=/usr/lib/${P}/latin-1-$(printf "\\xa9")-directory insinto "${latin_1_dir}" echo "blah blah blah" > "${T}"/latin-1-$(printf "\\xa9")-regular-file || die doins "${T}"/latin-1-$(printf "\\xa9")-regular-file dosym latin-1-$(printf "\\xa9")-regular-file ${latin_1_dir}/latin-1-$(printf "\\xa9")-symlink || die } pkg_config() { einfo "called pkg_config for $CATEGORY/$PF" } pkg_info() { einfo "called pkg_info for $CATEGORY/$PF" } pkg_preinst() { einfo "called pkg_preinst for $CATEGORY/$PF" # Test that has_version and best_version work correctly with # prefix (involves internal ROOT -> EROOT calculation in order # to support ROOT override via the environment with EAPIs 3 # and later which support prefix). if has_version $CATEGORY/$PN:$SLOT ; then einfo "has_version detects an installed instance of $CATEGORY/$PN:$SLOT" einfo "best_version reports that the installed instance is $(best_version $CATEGORY/$PN:$SLOT)" else einfo "has_version does not detect an installed instance of $CATEGORY/$PN:$SLOT" fi if [[ ${EPREFIX} != ${PORTAGE_OVERRIDE_EPREFIX} ]] ; then if has_version --host-root $CATEGORY/$PN:$SLOT ; then einfo "has_version --host-root detects an installed instance of $CATEGORY/$PN:$SLOT" einfo "best_version --host-root reports that the installed instance is $(best_version $CATEGORY/$PN:$SLOT)" else einfo "has_version --host-root does not detect an installed instance of $CATEGORY/$PN:$SLOT" fi fi } """ ebuilds = { "dev-libs/A-1": { "EAPI" : "5", "IUSE" : "+flag", "KEYWORDS": "x86", "LICENSE": "GPL-2", "MISC_CONTENT": install_something, "RDEPEND": "flag? ( dev-libs/B[flag] )", }, "dev-libs/B-1": { "EAPI" : "5", "IUSE" : "+flag", "KEYWORDS": "x86", "LICENSE": "GPL-2", "MISC_CONTENT": install_something, }, "dev-libs/C-1": { "EAPI" : "6", "KEYWORDS": "~x86", "RDEPEND": "dev-libs/D[flag]", }, "dev-libs/D-1": { "EAPI" : "6", "KEYWORDS": "~x86", "IUSE" : "flag", }, "virtual/foo-0": { "EAPI" : "5", "KEYWORDS": "x86", "LICENSE": "GPL-2", }, } installed = { "dev-libs/A-1": { "EAPI" : "5", "IUSE" : "+flag", "KEYWORDS": "x86", "LICENSE": "GPL-2", "RDEPEND": "flag? ( dev-libs/B[flag] )", "USE": "flag", }, "dev-libs/B-1": { "EAPI" : "5", "IUSE" : "+flag", "KEYWORDS": "x86", "LICENSE": "GPL-2", "USE": "flag", }, "dev-libs/depclean-me-1": { "EAPI" : "5", "IUSE" : "", "KEYWORDS": "x86", "LICENSE": "GPL-2", "USE": "", }, "app-misc/depclean-me-1": { "EAPI" : "5", "IUSE" : "", "KEYWORDS": "x86", "LICENSE": "GPL-2", "RDEPEND": "dev-libs/depclean-me", "USE": "", }, } metadata_xml_files = ( ( "dev-libs/A", { "flags" : "<flag name='flag'>Description of how USE='flag' affects this package</flag>", }, ), ( "dev-libs/B", { "flags" : "<flag name='flag'>Description of how USE='flag' affects this package</flag>", }, ), ) playground = ResolverPlayground( ebuilds=ebuilds, installed=installed, debug=debug) settings = playground.settings eprefix = settings["EPREFIX"] eroot = settings["EROOT"] trees = playground.trees portdb = trees[eroot]["porttree"].dbapi test_repo_location = settings.repositories["test_repo"].location var_cache_edb = os.path.join(eprefix, "var", "cache", "edb") cachedir = os.path.join(var_cache_edb, "dep") cachedir_pregen = os.path.join(test_repo_location, "metadata", "md5-cache") portage_python = portage._python_interpreter dispatch_conf_cmd = (portage_python, "-b", "-Wd", os.path.join(self.sbindir, "dispatch-conf")) ebuild_cmd = (portage_python, "-b", "-Wd", os.path.join(self.bindir, "ebuild")) egencache_cmd = (portage_python, "-b", "-Wd", os.path.join(self.bindir, "egencache"), "--repo", "test_repo", "--repositories-configuration", settings.repositories.config_string()) emerge_cmd = (portage_python, "-b", "-Wd", os.path.join(self.bindir, "emerge")) emaint_cmd = (portage_python, "-b", "-Wd", os.path.join(self.sbindir, "emaint")) env_update_cmd = (portage_python, "-b", "-Wd", os.path.join(self.sbindir, "env-update")) etc_update_cmd = (BASH_BINARY, os.path.join(self.sbindir, "etc-update")) fixpackages_cmd = (portage_python, "-b", "-Wd", os.path.join(self.sbindir, "fixpackages")) portageq_cmd = (portage_python, "-b", "-Wd", os.path.join(self.bindir, "portageq")) quickpkg_cmd = (portage_python, "-b", "-Wd", os.path.join(self.bindir, "quickpkg")) regenworld_cmd = (portage_python, "-b", "-Wd", os.path.join(self.sbindir, "regenworld")) rm_binary = find_binary("rm") self.assertEqual(rm_binary is None, False, "rm command not found") rm_cmd = (rm_binary,) egencache_extra_args = [] if self._have_python_xml(): egencache_extra_args.append("--update-use-local-desc") test_ebuild = portdb.findname("dev-libs/A-1") self.assertFalse(test_ebuild is None) cross_prefix = os.path.join(eprefix, "cross_prefix") cross_root = os.path.join(eprefix, "cross_root") cross_eroot = os.path.join(cross_root, eprefix.lstrip(os.sep)) test_commands = ( env_update_cmd, portageq_cmd + ("envvar", "-v", "CONFIG_PROTECT", "EROOT", "PORTAGE_CONFIGROOT", "PORTAGE_TMPDIR", "USERLAND"), etc_update_cmd, dispatch_conf_cmd, emerge_cmd + ("--version",), emerge_cmd + ("--info",), emerge_cmd + ("--info", "--verbose"), emerge_cmd + ("--list-sets",), emerge_cmd + ("--check-news",), rm_cmd + ("-rf", cachedir), rm_cmd + ("-rf", cachedir_pregen), emerge_cmd + ("--regen",), rm_cmd + ("-rf", cachedir), ({"FEATURES" : "metadata-transfer"},) + \ emerge_cmd + ("--regen",), rm_cmd + ("-rf", cachedir), ({"FEATURES" : "metadata-transfer"},) + \ emerge_cmd + ("--regen",), rm_cmd + ("-rf", cachedir), egencache_cmd + ("--update",) + tuple(egencache_extra_args), ({"FEATURES" : "metadata-transfer"},) + \ emerge_cmd + ("--metadata",), rm_cmd + ("-rf", cachedir), ({"FEATURES" : "metadata-transfer"},) + \ emerge_cmd + ("--metadata",), emerge_cmd + ("--metadata",), rm_cmd + ("-rf", cachedir), emerge_cmd + ("--oneshot", "virtual/foo"), lambda: self.assertFalse(os.path.exists( os.path.join(pkgdir, "virtual", "foo-0.tbz2"))), ({"FEATURES" : "unmerge-backup"},) + \ emerge_cmd + ("--unmerge", "virtual/foo"), lambda: self.assertTrue(os.path.exists( os.path.join(pkgdir, "virtual", "foo-0.tbz2"))), emerge_cmd + ("--pretend", "dev-libs/A"), ebuild_cmd + (test_ebuild, "manifest", "clean", "package", "merge"), emerge_cmd + ("--pretend", "--tree", "--complete-graph", "dev-libs/A"), emerge_cmd + ("-p", "dev-libs/B"), emerge_cmd + ("-p", "--newrepo", "dev-libs/B"), emerge_cmd + ("-B", "dev-libs/B",), emerge_cmd + ("--oneshot", "--usepkg", "dev-libs/B",), # trigger clean prior to pkg_pretend as in bug #390711 ebuild_cmd + (test_ebuild, "unpack"), emerge_cmd + ("--oneshot", "dev-libs/A",), emerge_cmd + ("--noreplace", "dev-libs/A",), emerge_cmd + ("--config", "dev-libs/A",), emerge_cmd + ("--info", "dev-libs/A", "dev-libs/B"), emerge_cmd + ("--pretend", "--depclean", "--verbose", "dev-libs/B"), emerge_cmd + ("--pretend", "--depclean",), emerge_cmd + ("--depclean",), quickpkg_cmd + ("--include-config", "y", "dev-libs/A",), # Test bug #523684, where a file renamed or removed by the # admin forces replacement files to be merged with config # protection. lambda: self.assertEqual(0, len(list(find_updated_config_files(eroot, shlex_split(settings["CONFIG_PROTECT"]))))), lambda: os.unlink(os.path.join(eprefix, "etc", "A-0")), emerge_cmd + ("--usepkgonly", "dev-libs/A"), lambda: self.assertEqual(1, len(list(find_updated_config_files(eroot, shlex_split(settings["CONFIG_PROTECT"]))))), emaint_cmd + ("--check", "all"), emaint_cmd + ("--fix", "all"), fixpackages_cmd, regenworld_cmd, portageq_cmd + ("match", eroot, "dev-libs/A"), portageq_cmd + ("best_visible", eroot, "dev-libs/A"), portageq_cmd + ("best_visible", eroot, "binary", "dev-libs/A"), portageq_cmd + ("contents", eroot, "dev-libs/A-1"), portageq_cmd + ("metadata", eroot, "ebuild", "dev-libs/A-1", "EAPI", "IUSE", "RDEPEND"), portageq_cmd + ("metadata", eroot, "binary", "dev-libs/A-1", "EAPI", "USE", "RDEPEND"), portageq_cmd + ("metadata", eroot, "installed", "dev-libs/A-1", "EAPI", "USE", "RDEPEND"), portageq_cmd + ("owners", eroot, eroot + "usr"), emerge_cmd + ("-p", eroot + "usr"), emerge_cmd + ("-p", "--unmerge", "-q", eroot + "usr"), emerge_cmd + ("--unmerge", "--quiet", "dev-libs/A"), emerge_cmd + ("-C", "--quiet", "dev-libs/B"), emerge_cmd + ("--autounmask-continue", "dev-libs/C",), # Verify that the above --autounmask-continue command caused # USE=flag to be applied correctly to dev-libs/D. portageq_cmd + ("match", eroot, "dev-libs/D[flag]"), # Test cross-prefix usage, including chpathtool for binpkgs. ({"EPREFIX" : cross_prefix},) + \ emerge_cmd + ("--usepkgonly", "dev-libs/A"), ({"EPREFIX" : cross_prefix},) + \ portageq_cmd + ("has_version", cross_prefix, "dev-libs/A"), ({"EPREFIX" : cross_prefix},) + \ portageq_cmd + ("has_version", cross_prefix, "dev-libs/B"), ({"EPREFIX" : cross_prefix},) + \ emerge_cmd + ("-C", "--quiet", "dev-libs/B"), ({"EPREFIX" : cross_prefix},) + \ emerge_cmd + ("-C", "--quiet", "dev-libs/A"), ({"EPREFIX" : cross_prefix},) + \ emerge_cmd + ("dev-libs/A",), ({"EPREFIX" : cross_prefix},) + \ portageq_cmd + ("has_version", cross_prefix, "dev-libs/A"), ({"EPREFIX" : cross_prefix},) + \ portageq_cmd + ("has_version", cross_prefix, "dev-libs/B"), # Test ROOT support ({"ROOT": cross_root},) + emerge_cmd + ("dev-libs/B",), portageq_cmd + ("has_version", cross_eroot, "dev-libs/B"), ) distdir = playground.distdir pkgdir = playground.pkgdir fake_bin = os.path.join(eprefix, "bin") portage_tmpdir = os.path.join(eprefix, "var", "tmp", "portage") profile_path = settings.profile_path user_config_dir = os.path.join(os.sep, eprefix, USER_CONFIG_PATH) path = os.environ.get("PATH") if path is not None and not path.strip(): path = None if path is None: path = "" else: path = ":" + path path = fake_bin + path pythonpath = os.environ.get("PYTHONPATH") if pythonpath is not None and not pythonpath.strip(): pythonpath = None if pythonpath is not None and \ pythonpath.split(":")[0] == PORTAGE_PYM_PATH: pass else: if pythonpath is None: pythonpath = "" else: pythonpath = ":" + pythonpath pythonpath = PORTAGE_PYM_PATH + pythonpath env = { "PORTAGE_OVERRIDE_EPREFIX" : eprefix, "CLEAN_DELAY" : "0", "DISTDIR" : distdir, "EMERGE_WARNING_DELAY" : "0", "INFODIR" : "", "INFOPATH" : "", "PATH" : path, "PKGDIR" : pkgdir, "PORTAGE_INST_GID" : str(portage.data.portage_gid), "PORTAGE_INST_UID" : str(portage.data.portage_uid), "PORTAGE_PYTHON" : portage_python, "PORTAGE_REPOSITORIES" : settings.repositories.config_string(), "PORTAGE_TMPDIR" : portage_tmpdir, "PYTHONDONTWRITEBYTECODE" : os.environ.get("PYTHONDONTWRITEBYTECODE", ""), "PYTHONPATH" : pythonpath, "__PORTAGE_TEST_PATH_OVERRIDE" : fake_bin, } if "__PORTAGE_TEST_HARDLINK_LOCKS" in os.environ: env["__PORTAGE_TEST_HARDLINK_LOCKS"] = \ os.environ["__PORTAGE_TEST_HARDLINK_LOCKS"] updates_dir = os.path.join(test_repo_location, "profiles", "updates") dirs = [cachedir, cachedir_pregen, cross_eroot, cross_prefix, distdir, fake_bin, portage_tmpdir, updates_dir, user_config_dir, var_cache_edb] etc_symlinks = ("dispatch-conf.conf", "etc-update.conf") # Override things that may be unavailable, or may have portability # issues when running tests in exotic environments. # prepstrip - bug #447810 (bash read builtin EINTR problem) true_symlinks = ["find", "prepstrip", "sed", "scanelf"] true_binary = find_binary("true") self.assertEqual(true_binary is None, False, "true command not found") try: for d in dirs: ensure_dirs(d) for x in true_symlinks: os.symlink(true_binary, os.path.join(fake_bin, x)) for x in etc_symlinks: os.symlink(os.path.join(self.cnf_etc_path, x), os.path.join(eprefix, "etc", x)) with open(os.path.join(var_cache_edb, "counter"), 'wb') as f: f.write(b"100") # non-empty system set keeps --depclean quiet with open(os.path.join(profile_path, "packages"), 'w') as f: f.write("*dev-libs/token-system-pkg") for cp, xml_data in metadata_xml_files: with open(os.path.join(test_repo_location, cp, "metadata.xml"), 'w') as f: f.write(playground.metadata_xml_template % xml_data) with open(os.path.join(updates_dir, "1Q-2010"), 'w') as f: f.write(""" slotmove =app-doc/pms-3 2 3 move dev-util/git dev-vcs/git """) if debug: # The subprocess inherits both stdout and stderr, for # debugging purposes. stdout = None else: # The subprocess inherits stderr so that any warnings # triggered by python -Wd will be visible. stdout = subprocess.PIPE for args in test_commands: if hasattr(args, '__call__'): args() continue if isinstance(args[0], dict): local_env = env.copy() local_env.update(args[0]) args = args[1:] else: local_env = env proc = subprocess.Popen(args, env=local_env, stdout=stdout) if debug: proc.wait() else: output = proc.stdout.readlines() proc.wait() proc.stdout.close() if proc.returncode != os.EX_OK: for line in output: sys.stderr.write(_unicode_decode(line)) self.assertEqual(os.EX_OK, proc.returncode, "emerge failed with args %s" % (args,)) finally: playground.cleanup()
def testSimple(self): debug = False skip_reason = self._must_skip() if skip_reason: self.portage_skip = skip_reason self.assertFalse(True, skip_reason) return copyright_header = """# Copyright 1999-%s Gentoo Foundation # Distributed under the terms of the GNU General Public License v2 # $Header: $ """ % time.gmtime().tm_year repo_configs = { "test_repo": { "layout.conf": ("update-changelog = true", ), } } profiles = (("x86", "default/linux/x86/test_profile", "stable"), ) ebuilds = { "dev-libs/A-1": { "COPYRIGHT_HEADER": copyright_header, "DESCRIPTION": "Desc goes here", "EAPI": "4", "HOMEPAGE": "http://example.com", "IUSE": "flag", "KEYWORDS": "~x86", "LICENSE": "GPL-2", "RDEPEND": "flag? ( dev-libs/B[flag] )", }, "dev-libs/B-1": { "COPYRIGHT_HEADER": copyright_header, "DESCRIPTION": "Desc goes here", "EAPI": "4", "HOMEPAGE": "http://example.com", "IUSE": "flag", "KEYWORDS": "~x86", "LICENSE": "GPL-2", }, } licenses = ["GPL-2"] arch_list = ["x86"] metadata_dtd = os.path.join(PORTAGE_BASE_PATH, "cnf/metadata.dtd") metadata_xml_files = ( ( "dev-libs/A", { "herd": "base-system", "flags": "<flag name='flag'>Description of how USE='flag' affects this package</flag>", }, ), ( "dev-libs/B", { "herd": "no-herd", "flags": "<flag name='flag'>Description of how USE='flag' affects this package</flag>", }, ), ) use_desc = (("flag", "Description of how USE='flag' affects packages"), ) playground = ResolverPlayground(ebuilds=ebuilds, repo_configs=repo_configs, debug=debug) settings = playground.settings eprefix = settings["EPREFIX"] eroot = settings["EROOT"] portdb = playground.trees[playground.eroot]["porttree"].dbapi homedir = os.path.join(eroot, "home") distdir = os.path.join(eprefix, "distdir") portdir = settings["PORTDIR"] profiles_dir = os.path.join(portdir, "profiles") license_dir = os.path.join(portdir, "licenses") repoman_cmd = (portage._python_interpreter, "-Wd", os.path.join(PORTAGE_BIN_PATH, "repoman")) git_binary = find_binary("git") git_cmd = (git_binary, ) cp_binary = find_binary("cp") self.assertEqual(cp_binary is None, False, "cp command not found") cp_cmd = (cp_binary, ) test_ebuild = portdb.findname("dev-libs/A-1") self.assertFalse(test_ebuild is None) committer_name = "Gentoo Dev" committer_email = "*****@*****.**" git_test = ( ("", repoman_cmd + ("manifest", )), ("", git_cmd + ( "config", "--global", "user.name", committer_name, )), ("", git_cmd + ( "config", "--global", "user.email", committer_email, )), ("", git_cmd + ("init-db", )), ("", git_cmd + ("add", ".")), ("", git_cmd + ("commit", "-a", "-m", "add whole repo")), ("", cp_cmd + (test_ebuild, test_ebuild[:-8] + "2.ebuild")), ("", git_cmd + ("add", test_ebuild[:-8] + "2.ebuild")), ("", repoman_cmd + ("commit", "-m", "bump to version 2")), ("", cp_cmd + (test_ebuild, test_ebuild[:-8] + "3.ebuild")), ("", git_cmd + ("add", test_ebuild[:-8] + "3.ebuild")), ("dev-libs", repoman_cmd + ("commit", "-m", "bump to version 3")), ("", cp_cmd + (test_ebuild, test_ebuild[:-8] + "4.ebuild")), ("", git_cmd + ("add", test_ebuild[:-8] + "4.ebuild")), ("dev-libs/A", repoman_cmd + ("commit", "-m", "bump to version 4")), ) pythonpath = os.environ.get("PYTHONPATH") if pythonpath is not None and not pythonpath.strip(): pythonpath = None if pythonpath is not None and \ pythonpath.split(":")[0] == PORTAGE_PYM_PATH: pass else: if pythonpath is None: pythonpath = "" else: pythonpath = ":" + pythonpath pythonpath = PORTAGE_PYM_PATH + pythonpath env = { "PORTAGE_OVERRIDE_EPREFIX": eprefix, "DISTDIR": distdir, "GENTOO_COMMITTER_NAME": committer_name, "GENTOO_COMMITTER_EMAIL": committer_email, "HOME": homedir, "PATH": os.environ["PATH"], "PORTAGE_GRPNAME": os.environ["PORTAGE_GRPNAME"], "PORTAGE_USERNAME": os.environ["PORTAGE_USERNAME"], "PORTDIR": portdir, "PYTHONPATH": pythonpath, } if os.environ.get("SANDBOX_ON") == "1": # avoid problems from nested sandbox instances env["FEATURES"] = "-sandbox" dirs = [homedir, license_dir, profiles_dir, distdir] try: for d in dirs: ensure_dirs(d) with open(os.path.join(portdir, "skel.ChangeLog"), 'w') as f: f.write(copyright_header) with open(os.path.join(profiles_dir, "profiles.desc"), 'w') as f: for x in profiles: f.write("%s %s %s\n" % x) for x in licenses: open(os.path.join(license_dir, x), 'wb').close() with open(os.path.join(profiles_dir, "arch.list"), 'w') as f: for x in arch_list: f.write("%s\n" % x) with open(os.path.join(profiles_dir, "use.desc"), 'w') as f: for k, v in use_desc: f.write("%s - %s\n" % (k, v)) for cp, xml_data in metadata_xml_files: with open(os.path.join(portdir, cp, "metadata.xml"), 'w') as f: f.write(playground.metadata_xml_template % xml_data) # Use a symlink to portdir, in order to trigger bugs # involving canonical vs. non-canonical paths. portdir_symlink = os.path.join(eroot, "portdir_symlink") os.symlink(portdir, portdir_symlink) # repoman checks metadata.dtd for recent CTIME, so copy the file in # order to ensure that the CTIME is current shutil.copyfile(metadata_dtd, os.path.join(distdir, "metadata.dtd")) if debug: # The subprocess inherits both stdout and stderr, for # debugging purposes. stdout = None else: # The subprocess inherits stderr so that any warnings # triggered by python -Wd will be visible. stdout = subprocess.PIPE for cwd in ("", "dev-libs", "dev-libs/A", "dev-libs/B"): abs_cwd = os.path.join(portdir_symlink, cwd) proc = subprocess.Popen([ portage._python_interpreter, "-Wd", os.path.join(PORTAGE_BIN_PATH, "repoman"), "full" ], cwd=abs_cwd, env=env, stdout=stdout) if debug: proc.wait() else: output = proc.stdout.readlines() proc.wait() proc.stdout.close() if proc.returncode != os.EX_OK: for line in output: sys.stderr.write(_unicode_decode(line)) self.assertEqual(os.EX_OK, proc.returncode, "repoman failed in %s" % (cwd, )) if git_binary is not None: for cwd, cmd in git_test: abs_cwd = os.path.join(portdir_symlink, cwd) proc = subprocess.Popen(cmd, cwd=abs_cwd, env=env, stdout=stdout) if debug: proc.wait() else: output = proc.stdout.readlines() proc.wait() proc.stdout.close() if proc.returncode != os.EX_OK: for line in output: sys.stderr.write(_unicode_decode(line)) self.assertEqual(os.EX_OK, proc.returncode, "%s failed in %s" % ( cmd, cwd, )) finally: playground.cleanup()
def __init__(self, ebuilds={}, binpkgs={}, installed={}, profile={}, repo_configs={}, \ user_config={}, sets={}, world=[], world_sets=[], distfiles={}, eprefix=None, targetroot=False, debug=False): """ ebuilds: cpv -> metadata mapping simulating available ebuilds. installed: cpv -> metadata mapping simulating installed packages. If a metadata key is missing, it gets a default value. profile: settings defined by the profile. """ self.debug = debug if eprefix is None: self.eprefix = normalize_path(tempfile.mkdtemp()) # EPREFIX/bin is used by fake true_binaries. Real binaries goes into EPREFIX/usr/bin eubin = os.path.join(self.eprefix, "usr", "bin") ensure_dirs(eubin) essential_binaries = ( "awk", "basename", "bzip2", "cat", "chgrp", "chmod", "chown", "cp", "egrep", "env", "find", "grep", "head", "install", "ln", "mkdir", "mktemp", "mv", "readlink", "rm", "sed", "sort", "tar", "tr", "uname", "uniq", "xargs", ) # Exclude internal wrappers from PATH lookup. orig_path = os.environ['PATH'] included_paths = [] for path in orig_path.split(':'): if path and not fnmatch.fnmatch(path, '*/portage/*/ebuild-helpers*'): included_paths.append(path) try: os.environ['PATH'] = ':'.join(included_paths) for x in essential_binaries: path = find_binary(x) if path is None: raise portage.exception.CommandNotFound(x) os.symlink(path, os.path.join(eubin, x)) finally: os.environ['PATH'] = orig_path else: self.eprefix = normalize_path(eprefix) # Tests may override portage.const.EPREFIX in order to # simulate a prefix installation. It's reasonable to do # this because tests should be self-contained such that # the "real" value of portage.const.EPREFIX is entirely # irrelevant (see bug #492932). portage.const.EPREFIX = self.eprefix.rstrip(os.sep) self.eroot = self.eprefix + os.sep if targetroot: self.target_root = os.path.join(self.eroot, 'target_root') else: self.target_root = os.sep self.distdir = os.path.join(self.eroot, "var", "portage", "distfiles") self.pkgdir = os.path.join(self.eprefix, "pkgdir") self.vdbdir = os.path.join(self.eroot, "var/db/pkg") os.makedirs(self.vdbdir) if not debug: portage.util.noiselimit = -2 self._repositories = {} #Make sure the main repo is always created self._get_repo_dir("test_repo") self._create_distfiles(distfiles) self._create_ebuilds(ebuilds) self._create_binpkgs(binpkgs) self._create_installed(installed) self._create_profile(ebuilds, installed, profile, repo_configs, user_config, sets) self._create_world(world, world_sets) self.settings, self.trees = self._load_config() self._create_ebuild_manifests(ebuilds) portage.util.noiselimit = 0
def _create_profile(self, ebuilds, installed, profile, repo_configs, user_config, sets): user_config_dir = os.path.join(self.eroot, USER_CONFIG_PATH) try: os.makedirs(user_config_dir) except os.error: pass for repo in self.repo_dirs: repo_dir = self._get_repo_dir(repo) profile_dir = os.path.join(self._get_repo_dir(repo), "profiles") metadata_dir = os.path.join(repo_dir, "metadata") os.makedirs(metadata_dir) #Create $REPO/profiles/categories categories = set() for cpv in ebuilds: ebuilds_repo = Atom("="+cpv, allow_repo=True).repo if ebuilds_repo is None: ebuilds_repo = "test_repo" if ebuilds_repo == repo: categories.add(catsplit(cpv)[0]) categories_file = os.path.join(profile_dir, "categories") f = open(categories_file, "w") for cat in categories: f.write(cat + "\n") f.close() #Create $REPO/profiles/license_groups license_file = os.path.join(profile_dir, "license_groups") f = open(license_file, "w") f.write("EULA TEST\n") f.close() repo_config = repo_configs.get(repo) if repo_config: for config_file, lines in repo_config.items(): if config_file not in self.config_files: raise ValueError("Unknown config file: '%s'" % config_file) if config_file in ("layout.conf",): file_name = os.path.join(repo_dir, "metadata", config_file) else: file_name = os.path.join(profile_dir, config_file) f = open(file_name, "w") for line in lines: f.write("%s\n" % line) f.close() #Create $profile_dir/eclass (we fail to digest the ebuilds if it's not there) os.makedirs(os.path.join(repo_dir, "eclass")) if repo == "test_repo": #Create a minimal profile in /usr/portage sub_profile_dir = os.path.join(profile_dir, "default", "linux", "x86", "test_profile") os.makedirs(sub_profile_dir) eapi_file = os.path.join(sub_profile_dir, "eapi") f = open(eapi_file, "w") f.write("0\n") f.close() make_defaults_file = os.path.join(sub_profile_dir, "make.defaults") f = open(make_defaults_file, "w") f.write("ARCH=\"x86\"\n") f.write("ACCEPT_KEYWORDS=\"x86\"\n") f.close() use_force_file = os.path.join(sub_profile_dir, "use.force") f = open(use_force_file, "w") f.write("x86\n") f.close() parent_file = os.path.join(sub_profile_dir, "parent") f = open(parent_file, "w") f.write("..\n") f.close() if profile: for config_file, lines in profile.items(): if config_file not in self.config_files: raise ValueError("Unknown config file: '%s'" % config_file) file_name = os.path.join(sub_profile_dir, config_file) f = open(file_name, "w") for line in lines: f.write("%s\n" % line) f.close() #Create profile symlink os.symlink(sub_profile_dir, os.path.join(user_config_dir, "make.profile")) #Create minimal herds.xml herds_xml = """<?xml version="1.0" encoding="UTF-8"?> <!DOCTYPE herds SYSTEM "http://www.gentoo.org/dtd/herds.dtd"> <?xml-stylesheet href="/xsl/herds.xsl" type="text/xsl" ?> <?xml-stylesheet href="/xsl/guide.xsl" type="text/xsl" ?> <herds> <herd> <name>base-system</name> <email>[email protected]</email> <description>Core system utilities and libraries.</description> <maintainer> <email>[email protected]</email> <name>Base System</name> <role>Base System Maintainer</role> </maintainer> </herd> </herds> """ with open(os.path.join(metadata_dir, "metadata.xml"), 'w') as f: f.write(herds_xml) # Write empty entries for each repository, in order to exercise # RepoConfigLoader's repos.conf processing. repos_conf_file = os.path.join(user_config_dir, "repos.conf") f = open(repos_conf_file, "w") for repo in sorted(self.repo_dirs.keys()): f.write("[%s]\n" % repo) f.write("\n") f.close() for config_file, lines in user_config.items(): if config_file not in self.config_files: raise ValueError("Unknown config file: '%s'" % config_file) file_name = os.path.join(user_config_dir, config_file) f = open(file_name, "w") for line in lines: f.write("%s\n" % line) f.close() #Create /usr/share/portage/config/make.globals make_globals_path = os.path.join(self.eroot, GLOBAL_CONFIG_PATH.lstrip(os.sep), "make.globals") ensure_dirs(os.path.dirname(make_globals_path)) os.symlink(os.path.join(PORTAGE_BASE_PATH, "cnf", "make.globals"), make_globals_path) #Create /usr/share/portage/config/sets/portage.conf default_sets_conf_dir = os.path.join(self.eroot, "usr/share/portage/config/sets") try: os.makedirs(default_sets_conf_dir) except os.error: pass provided_sets_portage_conf = \ os.path.join(PORTAGE_BASE_PATH, "cnf/sets/portage.conf") os.symlink(provided_sets_portage_conf, os.path.join(default_sets_conf_dir, "portage.conf")) set_config_dir = os.path.join(user_config_dir, "sets") try: os.makedirs(set_config_dir) except os.error: pass for sets_file, lines in sets.items(): file_name = os.path.join(set_config_dir, sets_file) f = open(file_name, "w") for line in lines: f.write("%s\n" % line) f.close() user_config_dir = os.path.join(self.eroot, "etc", "portage") try: os.makedirs(user_config_dir) except os.error: pass for config_file, lines in user_config.items(): if config_file not in self.config_files: raise ValueError("Unknown config file: '%s'" % config_file) file_name = os.path.join(user_config_dir, config_file) f = open(file_name, "w") for line in lines: f.write("%s\n" % line) f.close()
def testDoebuild(self): """ Invoke portage.doebuild() with the fd_pipes parameter, and check that the expected output appears in the pipe. This functionality is not used by portage internally, but it is supported for API consumers (see bug #475812). """ output_fd = 200 ebuild_body = ['S=${WORKDIR}'] for phase_func in ('pkg_info', 'pkg_nofetch', 'pkg_pretend', 'pkg_setup', 'src_unpack', 'src_prepare', 'src_configure', 'src_compile', 'src_test', 'src_install'): ebuild_body.append(('%s() { echo ${EBUILD_PHASE}' ' 1>&%s; }') % (phase_func, output_fd)) ebuild_body.append('') ebuild_body = '\n'.join(ebuild_body) ebuilds = { 'app-misct/foo-1': { 'EAPI' : '5', "MISC_CONTENT": ebuild_body, } } # Override things that may be unavailable, or may have portability # issues when running tests in exotic environments. # prepstrip - bug #447810 (bash read builtin EINTR problem) true_symlinks = ("find", "prepstrip", "sed", "scanelf") true_binary = portage.process.find_binary("true") self.assertEqual(true_binary is None, False, "true command not found") dev_null = open(os.devnull, 'wb') playground = ResolverPlayground(ebuilds=ebuilds) try: QueryCommand._db = playground.trees root_config = playground.trees[playground.eroot]['root_config'] portdb = root_config.trees["porttree"].dbapi settings = portage.config(clone=playground.settings) if "__PORTAGE_TEST_HARDLINK_LOCKS" in os.environ: settings["__PORTAGE_TEST_HARDLINK_LOCKS"] = \ os.environ["__PORTAGE_TEST_HARDLINK_LOCKS"] settings.backup_changes("__PORTAGE_TEST_HARDLINK_LOCKS") settings.features.add("noauto") settings.features.add("test") settings['PORTAGE_PYTHON'] = portage._python_interpreter settings['PORTAGE_QUIET'] = "1" settings['PYTHONDONTWRITEBYTECODE'] = os.environ.get("PYTHONDONTWRITEBYTECODE", "") fake_bin = os.path.join(settings["EPREFIX"], "bin") portage.util.ensure_dirs(fake_bin) for x in true_symlinks: os.symlink(true_binary, os.path.join(fake_bin, x)) settings["__PORTAGE_TEST_PATH_OVERRIDE"] = fake_bin settings.backup_changes("__PORTAGE_TEST_PATH_OVERRIDE") cpv = 'app-misct/foo-1' metadata = dict(zip(Package.metadata_keys, portdb.aux_get(cpv, Package.metadata_keys))) pkg = Package(built=False, cpv=cpv, installed=False, metadata=metadata, root_config=root_config, type_name='ebuild') settings.setcpv(pkg) ebuildpath = portdb.findname(cpv) self.assertNotEqual(ebuildpath, None) for phase in ('info', 'nofetch', 'pretend', 'setup', 'unpack', 'prepare', 'configure', 'compile', 'test', 'install', 'qmerge', 'clean', 'merge'): pr, pw = os.pipe() producer = DoebuildProcess(doebuild_pargs=(ebuildpath, phase), doebuild_kwargs={"settings" : settings, "mydbapi": portdb, "tree": "porttree", "vartree": root_config.trees["vartree"], "fd_pipes": { 1: dev_null.fileno(), 2: dev_null.fileno(), output_fd: pw, }, "prev_mtimes": {}}) consumer = PipeReader( input_files={"producer" : pr}) task_scheduler = TaskScheduler(iter([producer, consumer]), max_jobs=2) try: task_scheduler.start() finally: # PipeReader closes pr os.close(pw) task_scheduler.wait() output = portage._unicode_decode( consumer.getvalue()).rstrip("\n") if task_scheduler.returncode != os.EX_OK: portage.writemsg(output, noiselevel=-1) self.assertEqual(task_scheduler.returncode, os.EX_OK) if phase not in ('clean', 'merge', 'qmerge'): self.assertEqual(phase, output) finally: dev_null.close() playground.cleanup() QueryCommand._db = None
def _prepare_workdir(mysettings): workdir_mode = 0o700 try: mode = mysettings["PORTAGE_WORKDIR_MODE"] if mode.isdigit(): parsed_mode = int(mode, 8) elif mode == "": raise KeyError() else: raise ValueError() if parsed_mode & 0o7777 != parsed_mode: raise ValueError("Invalid file mode: %s" % mode) else: workdir_mode = parsed_mode except KeyError as e: writemsg(_("!!! PORTAGE_WORKDIR_MODE is unset, using %s.\n") % oct(workdir_mode)) except ValueError as e: if len(str(e)) > 0: writemsg("%s\n" % e) writemsg(_("!!! Unable to parse PORTAGE_WORKDIR_MODE='%s', using %s.\n") % \ (mysettings["PORTAGE_WORKDIR_MODE"], oct(workdir_mode))) mysettings["PORTAGE_WORKDIR_MODE"] = oct(workdir_mode).replace('o', '') try: apply_secpass_permissions(mysettings["WORKDIR"], uid=portage_uid, gid=portage_gid, mode=workdir_mode) except FileNotFound: pass # ebuild.sh will create it if mysettings.get("PORTAGE_LOGDIR", "") == "": while "PORTAGE_LOGDIR" in mysettings: del mysettings["PORTAGE_LOGDIR"] if "PORTAGE_LOGDIR" in mysettings: try: modified = ensure_dirs(mysettings["PORTAGE_LOGDIR"]) if modified: # Only initialize group/mode if the directory doesn't # exist, so that we don't override permissions if they # were previously set by the administrator. # NOTE: These permissions should be compatible with our # default logrotate config as discussed in bug 374287. apply_secpass_permissions(mysettings["PORTAGE_LOGDIR"], uid=portage_uid, gid=portage_gid, mode=0o2770) except PortageException as e: writemsg("!!! %s\n" % str(e), noiselevel=-1) writemsg(_("!!! Permission issues with PORTAGE_LOGDIR='%s'\n") % \ mysettings["PORTAGE_LOGDIR"], noiselevel=-1) writemsg(_("!!! Disabling logging.\n"), noiselevel=-1) while "PORTAGE_LOGDIR" in mysettings: del mysettings["PORTAGE_LOGDIR"] compress_log_ext = '' if 'compress-build-logs' in mysettings.features: compress_log_ext = '.gz' logdir_subdir_ok = False if "PORTAGE_LOGDIR" in mysettings and \ os.access(mysettings["PORTAGE_LOGDIR"], os.W_OK): logdir = normalize_path(mysettings["PORTAGE_LOGDIR"]) logid_path = os.path.join(mysettings["PORTAGE_BUILDDIR"], ".logid") if not os.path.exists(logid_path): open(_unicode_encode(logid_path), 'w').close() logid_time = _unicode_decode(time.strftime("%Y%m%d-%H%M%S", time.gmtime(os.stat(logid_path).st_mtime)), encoding=_encodings['content'], errors='replace') if "split-log" in mysettings.features: log_subdir = os.path.join(logdir, "build", mysettings["CATEGORY"]) mysettings["PORTAGE_LOG_FILE"] = os.path.join( log_subdir, "%s:%s.log%s" % (mysettings["PF"], logid_time, compress_log_ext)) else: log_subdir = logdir mysettings["PORTAGE_LOG_FILE"] = os.path.join( logdir, "%s:%s:%s.log%s" % \ (mysettings["CATEGORY"], mysettings["PF"], logid_time, compress_log_ext)) if log_subdir is logdir: logdir_subdir_ok = True else: try: _ensure_log_subdirs(logdir, log_subdir) except PortageException as e: writemsg("!!! %s\n" % (e,), noiselevel=-1) if os.access(log_subdir, os.W_OK): logdir_subdir_ok = True else: writemsg("!!! %s: %s\n" % (_("Permission Denied"), log_subdir), noiselevel=-1) tmpdir_log_path = os.path.join( mysettings["T"], "build.log%s" % compress_log_ext) if not logdir_subdir_ok: # NOTE: When sesandbox is enabled, the local SELinux security policies # may not allow output to be piped out of the sesandbox domain. The # current policy will allow it to work when a pty is available, but # not through a normal pipe. See bug #162404. mysettings["PORTAGE_LOG_FILE"] = tmpdir_log_path else: # Create a symlink from tmpdir_log_path to PORTAGE_LOG_FILE, as # requested in bug #412865. make_new_symlink = False try: target = os.readlink(tmpdir_log_path) except OSError: make_new_symlink = True else: if target != mysettings["PORTAGE_LOG_FILE"]: make_new_symlink = True if make_new_symlink: try: os.unlink(tmpdir_log_path) except OSError: pass os.symlink(mysettings["PORTAGE_LOG_FILE"], tmpdir_log_path)
async def _async_test_simple( self, playground, metadata_xml_files, profiles, profile, licenses, arch_list, use_desc, metadata_xsd, copyright_header, debug, ): settings = playground.settings eprefix = settings["EPREFIX"] eroot = settings["EROOT"] portdb = playground.trees[playground.eroot]["porttree"].dbapi homedir = os.path.join(eroot, "home") distdir = os.path.join(eprefix, "distdir") test_repo_location = settings.repositories["test_repo"].location profiles_dir = os.path.join(test_repo_location, "profiles") license_dir = os.path.join(test_repo_location, "licenses") repoman_cmd = (portage._python_interpreter, "-b", "-Wd", os.path.join(self.bindir, "repoman")) git_binary = find_binary("git") git_cmd = (git_binary, ) cp_binary = find_binary("cp") self.assertEqual(cp_binary is None, False, "cp command not found") cp_cmd = (cp_binary, ) test_ebuild = portdb.findname("dev-libs/A-1") self.assertFalse(test_ebuild is None) committer_name = "Gentoo Dev" committer_email = "*****@*****.**" expected_warnings = { "returncode": 0, "warns": { "variable.phase": [ "dev-libs/C/C-0.ebuild: line 15: phase pkg_preinst: EAPI 7: variable A: Forbidden reference to variable specified by PMS", "dev-libs/C/C-0.ebuild: line 15: phase pkg_preinst: EAPI 7: variable BROOT: Forbidden reference to variable specified by PMS", ] }, } git_test = ( ("", RepomanRun(args=["--version"])), ("", RepomanRun(args=["manifest"])), ("", git_cmd + ( "config", "--global", "user.name", committer_name, )), ("", git_cmd + ( "config", "--global", "user.email", committer_email, )), ("", git_cmd + ("init-db", )), ("", git_cmd + ("add", ".")), ("", git_cmd + ("commit", "-a", "-m", "add whole repo")), ("", RepomanRun(args=["full", "-d"], expected=expected_warnings)), ("", RepomanRun(args=[ "full", "--include-profiles", "default/linux/x86/test_profile" ], expected=expected_warnings)), ("", cp_cmd + (test_ebuild, test_ebuild[:-8] + "2.ebuild")), ("", git_cmd + ("add", test_ebuild[:-8] + "2.ebuild")), ("", RepomanRun(args=["commit", "-m", "cat/pkg: bump to version 2"], expected=expected_warnings)), ("", cp_cmd + (test_ebuild, test_ebuild[:-8] + "3.ebuild")), ("", git_cmd + ("add", test_ebuild[:-8] + "3.ebuild")), ("dev-libs", RepomanRun(args=["commit", "-m", "cat/pkg: bump to version 3"], expected=expected_warnings)), ("", cp_cmd + (test_ebuild, test_ebuild[:-8] + "4.ebuild")), ("", git_cmd + ("add", test_ebuild[:-8] + "4.ebuild")), ("dev-libs/A", RepomanRun(args=["commit", "-m", "cat/pkg: bump to version 4"])), ) env = { "PORTAGE_OVERRIDE_EPREFIX": eprefix, "DISTDIR": distdir, "GENTOO_COMMITTER_NAME": committer_name, "GENTOO_COMMITTER_EMAIL": committer_email, "HOME": homedir, "PATH": os.environ["PATH"], "PORTAGE_GRPNAME": os.environ["PORTAGE_GRPNAME"], "PORTAGE_USERNAME": os.environ["PORTAGE_USERNAME"], "PORTAGE_REPOSITORIES": settings.repositories.config_string(), "PYTHONDONTWRITEBYTECODE": os.environ.get("PYTHONDONTWRITEBYTECODE", ""), } if os.environ.get("SANDBOX_ON") == "1": # avoid problems from nested sandbox instances env["FEATURES"] = "-sandbox -usersandbox" dirs = [homedir, license_dir, profiles_dir, distdir] try: for d in dirs: ensure_dirs(d) with open(os.path.join(test_repo_location, "skel.ChangeLog"), 'w') as f: f.write(copyright_header) with open(os.path.join(profiles_dir, "profiles.desc"), 'w') as f: for x in profiles: f.write("%s %s %s\n" % x) # ResolverPlayground only created the first profile, # so create the remaining ones. for x in profiles[1:]: sub_profile_dir = os.path.join(profiles_dir, x[1]) ensure_dirs(sub_profile_dir) for config_file, lines in profile.items(): file_name = os.path.join(sub_profile_dir, config_file) with open(file_name, "w") as f: for line in lines: f.write("%s\n" % line) for x in licenses: open(os.path.join(license_dir, x), 'wb').close() with open(os.path.join(profiles_dir, "arch.list"), 'w') as f: for x in arch_list: f.write("%s\n" % x) with open(os.path.join(profiles_dir, "use.desc"), 'w') as f: for k, v in use_desc: f.write("%s - %s\n" % (k, v)) for cp, xml_data in metadata_xml_files: with open(os.path.join(test_repo_location, cp, "metadata.xml"), 'w') as f: f.write(playground.metadata_xml_template % xml_data) # Use a symlink to test_repo, in order to trigger bugs # involving canonical vs. non-canonical paths. test_repo_symlink = os.path.join(eroot, "test_repo_symlink") os.symlink(test_repo_location, test_repo_symlink) metadata_xsd_dest = os.path.join( test_repo_location, 'metadata/xml-schema/metadata.xsd') os.makedirs(os.path.dirname(metadata_xsd_dest)) os.symlink(metadata_xsd, metadata_xsd_dest) if debug: # The subprocess inherits both stdout and stderr, for # debugging purposes. stdout = None else: # The subprocess inherits stderr so that any warnings # triggered by python -Wd will be visible. stdout = subprocess.PIPE for cwd in ("", "dev-libs", "dev-libs/A", "dev-libs/B", "dev-libs/C"): abs_cwd = os.path.join(test_repo_symlink, cwd) proc = await asyncio.create_subprocess_exec(*(repoman_cmd + ("full", )), env=env, stderr=None, stdout=stdout, cwd=abs_cwd) if debug: await proc.wait() else: output, _err = await proc.communicate() await proc.wait() if proc.returncode != os.EX_OK: portage.writemsg(output) self.assertEqual(os.EX_OK, proc.returncode, "repoman failed in %s" % (cwd, )) if git_binary is not None: for cwd, cmd in git_test: abs_cwd = os.path.join(test_repo_symlink, cwd) if isinstance(cmd, RepomanRun): cmd.cwd = abs_cwd cmd.env = env cmd.debug = debug await cmd.run() if cmd.result[ "result"] != cmd.expected and cmd.result.get( "stdio"): portage.writemsg(cmd.result["stdio"]) try: self.assertEqual(cmd.result["result"], cmd.expected) except Exception: print(cmd.result["result"], file=sys.stderr, flush=True) raise continue proc = await asyncio.create_subprocess_exec(*cmd, env=env, stderr=None, stdout=stdout, cwd=abs_cwd) if debug: await proc.wait() else: output, _err = await proc.communicate() await proc.wait() if proc.returncode != os.EX_OK: portage.writemsg(output) self.assertEqual( os.EX_OK, proc.returncode, "%s failed in %s" % ( cmd, cwd, ), ) finally: playground.cleanup()
def __init__( self, ebuilds={}, binpkgs={}, installed={}, profile={}, repo_configs={}, user_config={}, sets={}, world=[], world_sets=[], distfiles={}, eclasses={}, eprefix=None, targetroot=False, debug=False, ): """ ebuilds: cpv -> metadata mapping simulating available ebuilds. installed: cpv -> metadata mapping simulating installed packages. If a metadata key is missing, it gets a default value. profile: settings defined by the profile. """ self.debug = debug if eprefix is None: self.eprefix = normalize_path(tempfile.mkdtemp()) # EPREFIX/bin is used by fake true_binaries. Real binaries goes into EPREFIX/usr/bin eubin = os.path.join(self.eprefix, "usr", "bin") ensure_dirs(eubin) for x in self.portage_bin: os.symlink(os.path.join(PORTAGE_BIN_PATH, x), os.path.join(eubin, x)) eusbin = os.path.join(self.eprefix, "usr", "sbin") ensure_dirs(eusbin) for x in self.portage_sbin: os.symlink(os.path.join(PORTAGE_BIN_PATH, x), os.path.join(eusbin, x)) essential_binaries = ( "awk", "basename", "bzip2", "cat", "chgrp", "chmod", "chown", "comm", "cp", "egrep", "env", "find", "flock", "grep", "head", "install", "ln", "mkdir", "mkfifo", "mktemp", "mv", "readlink", "rm", "sed", "sort", "tar", "tr", "uname", "uniq", "xargs", "zstd", ) # Exclude internal wrappers from PATH lookup. orig_path = os.environ["PATH"] included_paths = [] for path in orig_path.split(":"): if path and not fnmatch.fnmatch(path, "*/portage/*/ebuild-helpers*"): included_paths.append(path) try: os.environ["PATH"] = ":".join(included_paths) for x in essential_binaries: path = find_binary(x) if path is None: raise portage.exception.CommandNotFound(x) os.symlink(path, os.path.join(eubin, x)) finally: os.environ["PATH"] = orig_path else: self.eprefix = normalize_path(eprefix) # Tests may override portage.const.EPREFIX in order to # simulate a prefix installation. It's reasonable to do # this because tests should be self-contained such that # the "real" value of portage.const.EPREFIX is entirely # irrelevant (see bug #492932). self._orig_eprefix = portage.const.EPREFIX portage.const.EPREFIX = self.eprefix.rstrip(os.sep) self.eroot = self.eprefix + os.sep if targetroot: self.target_root = os.path.join(self.eroot, "target_root") else: self.target_root = os.sep self.distdir = os.path.join(self.eroot, "var", "portage", "distfiles") self.pkgdir = os.path.join(self.eprefix, "pkgdir") self.vdbdir = os.path.join(self.eroot, "var/db/pkg") os.makedirs(self.vdbdir) if not debug: portage.util.noiselimit = -2 self._repositories = {} # Make sure the main repo is always created self._get_repo_dir("test_repo") self._create_distfiles(distfiles) self._create_ebuilds(ebuilds) self._create_installed(installed) self._create_profile(ebuilds, eclasses, installed, profile, repo_configs, user_config, sets) self._create_world(world, world_sets) self.settings, self.trees = self._load_config() self.gpg = None self._create_binpkgs(binpkgs) self._create_ebuild_manifests(ebuilds) portage.util.noiselimit = 0
def __init__(self, ebuilds={}, binpkgs={}, installed={}, profile={}, repo_configs={}, \ user_config={}, sets={}, world=[], world_sets=[], distfiles={}, eprefix=None, targetroot=False, debug=False): """ ebuilds: cpv -> metadata mapping simulating available ebuilds. installed: cpv -> metadata mapping simulating installed packages. If a metadata key is missing, it gets a default value. profile: settings defined by the profile. """ self.debug = debug if eprefix is None: self.eprefix = normalize_path(tempfile.mkdtemp()) # EPREFIX/bin is used by fake true_binaries. Real binaries goes into EPREFIX/usr/bin eubin = os.path.join(self.eprefix, "usr", "bin") ensure_dirs(eubin) essential_binaries = ( "awk", "basename", "bzip2", "cat", "chmod", "chown", "cp", "egrep", "env", "find", "grep", "head", "install", "ln", "mkdir", "mktemp", "mv", "rm", "sed", "sort", "tar", "tr", "uname", "uniq", "xargs", ) for x in essential_binaries: os.symlink(find_binary(x), os.path.join(eubin, x)) else: self.eprefix = normalize_path(eprefix) # Tests may override portage.const.EPREFIX in order to # simulate a prefix installation. It's reasonable to do # this because tests should be self-contained such that # the "real" value of portage.const.EPREFIX is entirely # irrelevant (see bug #492932). portage.const.EPREFIX = self.eprefix.rstrip(os.sep) self.eroot = self.eprefix + os.sep if targetroot: self.target_root = os.path.join(self.eroot, 'target_root') else: self.target_root = os.sep self.distdir = os.path.join(self.eroot, "var", "portage", "distfiles") self.pkgdir = os.path.join(self.eprefix, "pkgdir") self.vdbdir = os.path.join(self.eroot, "var/db/pkg") os.makedirs(self.vdbdir) if not debug: portage.util.noiselimit = -2 self._repositories = {} #Make sure the main repo is always created self._get_repo_dir("test_repo") self._create_distfiles(distfiles) self._create_ebuilds(ebuilds) self._create_binpkgs(binpkgs) self._create_installed(installed) self._create_profile(ebuilds, installed, profile, repo_configs, user_config, sets) self._create_world(world, world_sets) self.settings, self.trees = self._load_config() self._create_ebuild_manifests(ebuilds) portage.util.noiselimit = 0
def testSimple(self): debug = False skip_reason = self._must_skip() if skip_reason: self.portage_skip = skip_reason self.assertFalse(True, skip_reason) return copyright_header = """# Copyright 1999-%s Gentoo Foundation # Distributed under the terms of the GNU General Public License v2 # $Header: $ """ % time.gmtime().tm_year repo_configs = { "test_repo": { "layout.conf": ( "update-changelog = true", ), } } profiles = ( ("x86", "default/linux/x86/test_profile", "stable"), ("x86", "default/linux/x86/test_dev", "dev"), ("x86", "default/linux/x86/test_exp", "exp"), ) profile = { "eapi": ("5",), "package.use.stable.mask": ("dev-libs/A flag",) } ebuilds = { "dev-libs/A-0": { "COPYRIGHT_HEADER" : copyright_header, "DESCRIPTION" : "Desc goes here", "EAPI" : "5", "HOMEPAGE" : "https://example.com", "IUSE" : "flag", "KEYWORDS": "x86", "LICENSE": "GPL-2", "RDEPEND": "flag? ( dev-libs/B[flag] )", }, "dev-libs/A-1": { "COPYRIGHT_HEADER" : copyright_header, "DESCRIPTION" : "Desc goes here", "EAPI" : "4", "HOMEPAGE" : "https://example.com", "IUSE" : "flag", "KEYWORDS": "~x86", "LICENSE": "GPL-2", "RDEPEND": "flag? ( dev-libs/B[flag] )", }, "dev-libs/B-1": { "COPYRIGHT_HEADER" : copyright_header, "DESCRIPTION" : "Desc goes here", "EAPI" : "4", "HOMEPAGE" : "https://example.com", "IUSE" : "flag", "KEYWORDS": "~x86", "LICENSE": "GPL-2", }, "dev-libs/C-0": { "COPYRIGHT_HEADER" : copyright_header, "DESCRIPTION" : "Desc goes here", "EAPI" : "4", "HOMEPAGE" : "https://example.com", "IUSE" : "flag", # must be unstable, since dev-libs/A[flag] is stable masked "KEYWORDS": "~x86", "LICENSE": "GPL-2", "RDEPEND": "flag? ( dev-libs/A[flag] )", }, } licenses = ["GPL-2"] arch_list = ["x86"] metadata_xsd = os.path.join(REPOMAN_BASE_PATH, "cnf/metadata.xsd") metadata_xml_files = ( ( "dev-libs/A", { "flags" : "<flag name='flag' restrict='>=dev-libs/A-0'>Description of how USE='flag' affects this package</flag>", }, ), ( "dev-libs/B", { "flags" : "<flag name='flag'>Description of how USE='flag' affects this package</flag>", }, ), ( "dev-libs/C", { "flags" : "<flag name='flag'>Description of how USE='flag' affects this package</flag>", }, ), ) use_desc = ( ("flag", "Description of how USE='flag' affects packages"), ) playground = ResolverPlayground(ebuilds=ebuilds, profile=profile, repo_configs=repo_configs, debug=debug) settings = playground.settings eprefix = settings["EPREFIX"] eroot = settings["EROOT"] portdb = playground.trees[playground.eroot]["porttree"].dbapi homedir = os.path.join(eroot, "home") distdir = os.path.join(eprefix, "distdir") test_repo_location = settings.repositories["test_repo"].location profiles_dir = os.path.join(test_repo_location, "profiles") license_dir = os.path.join(test_repo_location, "licenses") repoman_cmd = (portage._python_interpreter, "-b", "-Wd", os.path.join(self.bindir, "repoman")) git_binary = find_binary("git") git_cmd = (git_binary,) cp_binary = find_binary("cp") self.assertEqual(cp_binary is None, False, "cp command not found") cp_cmd = (cp_binary,) test_ebuild = portdb.findname("dev-libs/A-1") self.assertFalse(test_ebuild is None) committer_name = "Gentoo Dev" committer_email = "*****@*****.**" git_test = ( ("", repoman_cmd + ("manifest",)), ("", git_cmd + ("config", "--global", "user.name", committer_name,)), ("", git_cmd + ("config", "--global", "user.email", committer_email,)), ("", git_cmd + ("init-db",)), ("", git_cmd + ("add", ".")), ("", git_cmd + ("commit", "-a", "-m", "add whole repo")), ("", repoman_cmd + ("full", "-d")), ("", cp_cmd + (test_ebuild, test_ebuild[:-8] + "2.ebuild")), ("", git_cmd + ("add", test_ebuild[:-8] + "2.ebuild")), ("", repoman_cmd + ("commit", "-m", "cat/pkg: bump to version 2")), ("", cp_cmd + (test_ebuild, test_ebuild[:-8] + "3.ebuild")), ("", git_cmd + ("add", test_ebuild[:-8] + "3.ebuild")), ("dev-libs", repoman_cmd + ("commit", "-m", "cat/pkg: bump to version 3")), ("", cp_cmd + (test_ebuild, test_ebuild[:-8] + "4.ebuild")), ("", git_cmd + ("add", test_ebuild[:-8] + "4.ebuild")), ("dev-libs/A", repoman_cmd + ("commit", "-m", "cat/pkg: bump to version 4")), ) env = { "PORTAGE_OVERRIDE_EPREFIX" : eprefix, "DISTDIR" : distdir, "GENTOO_COMMITTER_NAME" : committer_name, "GENTOO_COMMITTER_EMAIL" : committer_email, "HOME" : homedir, "PATH" : os.environ["PATH"], "PORTAGE_GRPNAME" : os.environ["PORTAGE_GRPNAME"], "PORTAGE_USERNAME" : os.environ["PORTAGE_USERNAME"], "PORTAGE_REPOSITORIES" : settings.repositories.config_string(), "PYTHONDONTWRITEBYTECODE" : os.environ.get("PYTHONDONTWRITEBYTECODE", ""), } if os.environ.get("SANDBOX_ON") == "1": # avoid problems from nested sandbox instances env["FEATURES"] = "-sandbox -usersandbox" dirs = [homedir, license_dir, profiles_dir, distdir] try: for d in dirs: ensure_dirs(d) with open(os.path.join(test_repo_location, "skel.ChangeLog"), 'w') as f: f.write(copyright_header) with open(os.path.join(profiles_dir, "profiles.desc"), 'w') as f: for x in profiles: f.write("%s %s %s\n" % x) # ResolverPlayground only created the first profile, # so create the remaining ones. for x in profiles[1:]: sub_profile_dir = os.path.join(profiles_dir, x[1]) ensure_dirs(sub_profile_dir) for config_file, lines in profile.items(): file_name = os.path.join(sub_profile_dir, config_file) with open(file_name, "w") as f: for line in lines: f.write("%s\n" % line) for x in licenses: open(os.path.join(license_dir, x), 'wb').close() with open(os.path.join(profiles_dir, "arch.list"), 'w') as f: for x in arch_list: f.write("%s\n" % x) with open(os.path.join(profiles_dir, "use.desc"), 'w') as f: for k, v in use_desc: f.write("%s - %s\n" % (k, v)) for cp, xml_data in metadata_xml_files: with open(os.path.join(test_repo_location, cp, "metadata.xml"), 'w') as f: f.write(playground.metadata_xml_template % xml_data) # Use a symlink to test_repo, in order to trigger bugs # involving canonical vs. non-canonical paths. test_repo_symlink = os.path.join(eroot, "test_repo_symlink") os.symlink(test_repo_location, test_repo_symlink) metadata_xsd_dest = os.path.join(test_repo_location, 'metadata/xml-schema/metadata.xsd') os.makedirs(os.path.dirname(metadata_xsd_dest)) os.symlink(metadata_xsd, metadata_xsd_dest) if debug: # The subprocess inherits both stdout and stderr, for # debugging purposes. stdout = None else: # The subprocess inherits stderr so that any warnings # triggered by python -Wd will be visible. stdout = subprocess.PIPE for cwd in ("", "dev-libs", "dev-libs/A", "dev-libs/B"): abs_cwd = os.path.join(test_repo_symlink, cwd) proc = subprocess.Popen(repoman_cmd + ("full",), cwd=abs_cwd, env=env, stdout=stdout) if debug: proc.wait() else: output = proc.stdout.readlines() proc.wait() proc.stdout.close() if proc.returncode != os.EX_OK: for line in output: sys.stderr.write(_unicode_decode(line)) self.assertEqual(os.EX_OK, proc.returncode, "repoman failed in %s" % (cwd,)) if git_binary is not None: for cwd, cmd in git_test: abs_cwd = os.path.join(test_repo_symlink, cwd) proc = subprocess.Popen(cmd, cwd=abs_cwd, env=env, stdout=stdout) if debug: proc.wait() else: output = proc.stdout.readlines() proc.wait() proc.stdout.close() if proc.returncode != os.EX_OK: for line in output: sys.stderr.write(_unicode_decode(line)) self.assertEqual(os.EX_OK, proc.returncode, "%s failed in %s" % (cmd, cwd,)) finally: playground.cleanup()
def testEbuildFetch(self): user_config = { "make.conf": ( 'GENTOO_MIRRORS="{scheme}://{host}:{port}"', ), } distfiles = { 'bar': b'bar\n', 'foo': b'foo\n', } ebuilds = { 'dev-libs/A-1': { 'EAPI': '7', 'SRC_URI': '''{scheme}://{host}:{port}/distfiles/bar.txt -> bar {scheme}://{host}:{port}/distfiles/foo.txt -> foo''', }, } loop = SchedulerInterface(global_event_loop()) def run_async(func, *args, **kwargs): with ForkExecutor(loop=loop) as executor: return loop.run_until_complete(loop.run_in_executor(executor, functools.partial(func, *args, **kwargs))) scheme = 'http' host = '127.0.0.1' content = {} content['/distfiles/layout.conf'] = b'[structure]\n0=flat\n' for k, v in distfiles.items(): # mirror path content['/distfiles/{}'.format(k)] = v # upstream path content['/distfiles/{}.txt'.format(k)] = v with AsyncHTTPServer(host, content, loop) as server: ebuilds_subst = {} for cpv, metadata in ebuilds.items(): metadata = metadata.copy() metadata['SRC_URI'] = metadata['SRC_URI'].format( scheme=scheme, host=host, port=server.server_port) ebuilds_subst[cpv] = metadata user_config_subst = user_config.copy() for configname, configdata in user_config.items(): configdata_sub = [] for line in configdata: configdata_sub.append(line.format( scheme=scheme, host=host, port=server.server_port)) user_config_subst[configname] = tuple(configdata_sub) playground = ResolverPlayground(ebuilds=ebuilds_subst, distfiles=distfiles, user_config=user_config_subst) ro_distdir = tempfile.mkdtemp() eubin = os.path.join(playground.eprefix, "usr", "bin") try: fetchcommand = portage.util.shlex_split(playground.settings['FETCHCOMMAND']) fetch_bin = portage.process.find_binary(fetchcommand[0]) if fetch_bin is None: self.skipTest('FETCHCOMMAND not found: {}'.format(playground.settings['FETCHCOMMAND'])) os.symlink(fetch_bin, os.path.join(eubin, os.path.basename(fetch_bin))) resumecommand = portage.util.shlex_split(playground.settings['RESUMECOMMAND']) resume_bin = portage.process.find_binary(resumecommand[0]) if resume_bin is None: self.skipTest('RESUMECOMMAND not found: {}'.format(playground.settings['RESUMECOMMAND'])) if resume_bin != fetch_bin: os.symlink(resume_bin, os.path.join(eubin, os.path.basename(resume_bin))) root_config = playground.trees[playground.eroot]['root_config'] portdb = root_config.trees["porttree"].dbapi settings = config(clone=playground.settings) # Demonstrate that fetch preserves a stale file in DISTDIR when no digests are given. foo_uri = {'foo': ('{scheme}://{host}:{port}/distfiles/foo'.format(scheme=scheme, host=host, port=server.server_port),)} foo_path = os.path.join(settings['DISTDIR'], 'foo') foo_stale_content = b'stale content\n' with open(foo_path, 'wb') as f: f.write(b'stale content\n') self.assertTrue(bool(run_async(fetch, foo_uri, settings, try_mirrors=False))) with open(foo_path, 'rb') as f: self.assertEqual(f.read(), foo_stale_content) with open(foo_path, 'rb') as f: self.assertNotEqual(f.read(), distfiles['foo']) # Use force=True to update the stale file. self.assertTrue(bool(run_async(fetch, foo_uri, settings, try_mirrors=False, force=True))) with open(foo_path, 'rb') as f: self.assertEqual(f.read(), distfiles['foo']) # Test force=True with FEATURES=skiprocheck, using read-only DISTDIR. # FETCHCOMMAND is set to temporarily chmod +w DISTDIR. Note that # FETCHCOMMAND must perform atomic rename itself due to read-only # DISTDIR. with open(foo_path, 'wb') as f: f.write(b'stale content\n') orig_fetchcommand = settings['FETCHCOMMAND'] orig_distdir_mode = os.stat(settings['DISTDIR']).st_mode temp_fetchcommand = os.path.join(eubin, 'fetchcommand') with open(temp_fetchcommand, 'w') as f: f.write(""" set -e URI=$1 DISTDIR=$2 FILE=$3 trap 'chmod a-w "${DISTDIR}"' EXIT chmod ug+w "${DISTDIR}" %s mv -f "${DISTDIR}/${FILE}.__download__" "${DISTDIR}/${FILE}" """ % orig_fetchcommand.replace('${FILE}', '${FILE}.__download__')) settings['FETCHCOMMAND'] = '"%s" "%s" "${URI}" "${DISTDIR}" "${FILE}"' % (BASH_BINARY, temp_fetchcommand) settings.features.add('skiprocheck') settings.features.remove('distlocks') os.chmod(settings['DISTDIR'], 0o555) try: self.assertTrue(bool(run_async(fetch, foo_uri, settings, try_mirrors=False, force=True))) finally: settings['FETCHCOMMAND'] = orig_fetchcommand os.chmod(settings['DISTDIR'], orig_distdir_mode) settings.features.remove('skiprocheck') settings.features.add('distlocks') os.unlink(temp_fetchcommand) with open(foo_path, 'rb') as f: self.assertEqual(f.read(), distfiles['foo']) # Test emirrordist invocation. emirrordist_cmd = (portage._python_interpreter, '-b', '-Wd', os.path.join(self.bindir, 'emirrordist'), '--distfiles', settings['DISTDIR'], '--config-root', settings['EPREFIX'], '--repositories-configuration', settings.repositories.config_string(), '--repo', 'test_repo', '--mirror') env = settings.environ() env['PYTHONPATH'] = ':'.join( filter(None, [PORTAGE_PYM_PATH] + os.environ.get('PYTHONPATH', '').split(':'))) for k in distfiles: os.unlink(os.path.join(settings['DISTDIR'], k)) proc = loop.run_until_complete(asyncio.create_subprocess_exec(*emirrordist_cmd, env=env)) self.assertEqual(loop.run_until_complete(proc.wait()), 0) for k in distfiles: with open(os.path.join(settings['DISTDIR'], k), 'rb') as f: self.assertEqual(f.read(), distfiles[k]) # Tests only work with one ebuild at a time, so the config # pool only needs a single config instance. class config_pool: @staticmethod def allocate(): return settings @staticmethod def deallocate(settings): pass def async_fetch(pkg, ebuild_path): fetcher = EbuildFetcher(config_pool=config_pool, ebuild_path=ebuild_path, fetchonly=False, fetchall=True, pkg=pkg, scheduler=loop) fetcher.start() return fetcher.async_wait() for cpv in ebuilds: metadata = dict(zip(Package.metadata_keys, portdb.aux_get(cpv, Package.metadata_keys))) pkg = Package(built=False, cpv=cpv, installed=False, metadata=metadata, root_config=root_config, type_name='ebuild') settings.setcpv(pkg) ebuild_path = portdb.findname(pkg.cpv) portage.doebuild_environment(ebuild_path, 'fetch', settings=settings, db=portdb) # Test good files in DISTDIR for k in settings['AA'].split(): os.stat(os.path.join(settings['DISTDIR'], k)) self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0) for k in settings['AA'].split(): with open(os.path.join(settings['DISTDIR'], k), 'rb') as f: self.assertEqual(f.read(), distfiles[k]) # Test digestgen with fetch os.unlink(os.path.join(os.path.dirname(ebuild_path), 'Manifest')) for k in settings['AA'].split(): os.unlink(os.path.join(settings['DISTDIR'], k)) with ForkExecutor(loop=loop) as executor: self.assertTrue(bool(loop.run_until_complete( loop.run_in_executor(executor, functools.partial( digestgen, mysettings=settings, myportdb=portdb))))) for k in settings['AA'].split(): with open(os.path.join(settings['DISTDIR'], k), 'rb') as f: self.assertEqual(f.read(), distfiles[k]) # Test missing files in DISTDIR for k in settings['AA'].split(): os.unlink(os.path.join(settings['DISTDIR'], k)) self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0) for k in settings['AA'].split(): with open(os.path.join(settings['DISTDIR'], k), 'rb') as f: self.assertEqual(f.read(), distfiles[k]) # Test empty files in DISTDIR for k in settings['AA'].split(): file_path = os.path.join(settings['DISTDIR'], k) with open(file_path, 'wb') as f: pass self.assertEqual(os.stat(file_path).st_size, 0) self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0) for k in settings['AA'].split(): with open(os.path.join(settings['DISTDIR'], k), 'rb') as f: self.assertEqual(f.read(), distfiles[k]) # Test non-empty files containing null bytes in DISTDIR for k in settings['AA'].split(): file_path = os.path.join(settings['DISTDIR'], k) with open(file_path, 'wb') as f: f.write(len(distfiles[k]) * b'\0') self.assertEqual(os.stat(file_path).st_size, len(distfiles[k])) self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0) for k in settings['AA'].split(): with open(os.path.join(settings['DISTDIR'], k), 'rb') as f: self.assertEqual(f.read(), distfiles[k]) # Test PORTAGE_RO_DISTDIRS settings['PORTAGE_RO_DISTDIRS'] = '"{}"'.format(ro_distdir) orig_fetchcommand = settings['FETCHCOMMAND'] orig_resumecommand = settings['RESUMECOMMAND'] try: settings['FETCHCOMMAND'] = settings['RESUMECOMMAND'] = '' for k in settings['AA'].split(): file_path = os.path.join(settings['DISTDIR'], k) os.rename(file_path, os.path.join(ro_distdir, k)) self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0) for k in settings['AA'].split(): file_path = os.path.join(settings['DISTDIR'], k) self.assertTrue(os.path.islink(file_path)) with open(file_path, 'rb') as f: self.assertEqual(f.read(), distfiles[k]) os.unlink(file_path) finally: settings.pop('PORTAGE_RO_DISTDIRS') settings['FETCHCOMMAND'] = orig_fetchcommand settings['RESUMECOMMAND'] = orig_resumecommand # Test local filesystem in GENTOO_MIRRORS orig_mirrors = settings['GENTOO_MIRRORS'] orig_fetchcommand = settings['FETCHCOMMAND'] try: settings['GENTOO_MIRRORS'] = ro_distdir settings['FETCHCOMMAND'] = settings['RESUMECOMMAND'] = '' self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0) for k in settings['AA'].split(): with open(os.path.join(settings['DISTDIR'], k), 'rb') as f: self.assertEqual(f.read(), distfiles[k]) finally: settings['GENTOO_MIRRORS'] = orig_mirrors settings['FETCHCOMMAND'] = orig_fetchcommand settings['RESUMECOMMAND'] = orig_resumecommand # Test readonly DISTDIR orig_distdir_mode = os.stat(settings['DISTDIR']).st_mode try: os.chmod(settings['DISTDIR'], 0o555) self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0) for k in settings['AA'].split(): with open(os.path.join(settings['DISTDIR'], k), 'rb') as f: self.assertEqual(f.read(), distfiles[k]) finally: os.chmod(settings['DISTDIR'], orig_distdir_mode) # Test parallel-fetch mode settings['PORTAGE_PARALLEL_FETCHONLY'] = '1' try: self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0) for k in settings['AA'].split(): with open(os.path.join(settings['DISTDIR'], k), 'rb') as f: self.assertEqual(f.read(), distfiles[k]) for k in settings['AA'].split(): os.unlink(os.path.join(settings['DISTDIR'], k)) self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0) for k in settings['AA'].split(): with open(os.path.join(settings['DISTDIR'], k), 'rb') as f: self.assertEqual(f.read(), distfiles[k]) finally: settings.pop('PORTAGE_PARALLEL_FETCHONLY') # Test RESUMECOMMAND orig_resume_min_size = settings['PORTAGE_FETCH_RESUME_MIN_SIZE'] try: settings['PORTAGE_FETCH_RESUME_MIN_SIZE'] = '2' for k in settings['AA'].split(): file_path = os.path.join(settings['DISTDIR'], k) os.unlink(file_path) with open(file_path + _download_suffix, 'wb') as f: f.write(distfiles[k][:2]) self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0) for k in settings['AA'].split(): with open(os.path.join(settings['DISTDIR'], k), 'rb') as f: self.assertEqual(f.read(), distfiles[k]) finally: settings['PORTAGE_FETCH_RESUME_MIN_SIZE'] = orig_resume_min_size # Test readonly DISTDIR + skiprocheck, with FETCHCOMMAND set to temporarily chmod DISTDIR orig_fetchcommand = settings['FETCHCOMMAND'] orig_distdir_mode = os.stat(settings['DISTDIR']).st_mode for k in settings['AA'].split(): os.unlink(os.path.join(settings['DISTDIR'], k)) try: os.chmod(settings['DISTDIR'], 0o555) settings['FETCHCOMMAND'] = '"%s" -c "chmod ug+w \\"${DISTDIR}\\"; %s; status=\\$?; chmod a-w \\"${DISTDIR}\\"; exit \\$status"' % (BASH_BINARY, orig_fetchcommand.replace('"', '\\"')) settings.features.add('skiprocheck') settings.features.remove('distlocks') self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0) finally: settings['FETCHCOMMAND'] = orig_fetchcommand os.chmod(settings['DISTDIR'], orig_distdir_mode) settings.features.remove('skiprocheck') settings.features.add('distlocks') finally: shutil.rmtree(ro_distdir) playground.cleanup()
def _create_profile(self, ebuilds, eclasses, installed, profile, repo_configs, user_config, sets): user_config_dir = os.path.join(self.eroot, USER_CONFIG_PATH) try: os.makedirs(user_config_dir) except os.error: pass for repo in self._repositories: if repo == "DEFAULT": continue repo_dir = self._get_repo_dir(repo) profile_dir = os.path.join(repo_dir, "profiles") metadata_dir = os.path.join(repo_dir, "metadata") os.makedirs(metadata_dir) # Create $REPO/profiles/categories categories = set() for cpv in ebuilds: ebuilds_repo = Atom("=" + cpv, allow_repo=True).repo if ebuilds_repo is None: ebuilds_repo = "test_repo" if ebuilds_repo == repo: categories.add(catsplit(cpv)[0]) categories_file = os.path.join(profile_dir, "categories") with open(categories_file, "w") as f: for cat in categories: f.write(cat + "\n") # Create $REPO/profiles/license_groups license_file = os.path.join(profile_dir, "license_groups") with open(license_file, "w") as f: f.write("EULA TEST\n") repo_config = repo_configs.get(repo) if repo_config: for config_file, lines in repo_config.items(): if config_file not in self.config_files and not any( fnmatch.fnmatch(config_file, os.path.join(x, "*")) for x in self.config_files): raise ValueError("Unknown config file: '%s'" % config_file) if config_file in ("layout.conf", ): file_name = os.path.join(repo_dir, "metadata", config_file) else: file_name = os.path.join(profile_dir, config_file) if "/" in config_file and not os.path.isdir( os.path.dirname(file_name)): os.makedirs(os.path.dirname(file_name)) with open(file_name, "w") as f: for line in lines: f.write("%s\n" % line) # Temporarily write empty value of masters until it becomes default. # TODO: Delete all references to "# use implicit masters" when empty value becomes default. if config_file == "layout.conf" and not any( line.startswith(("masters =", "# use implicit masters")) for line in lines): f.write("masters =\n") # Create $profile_dir/eclass (we fail to digest the ebuilds if it's not there) eclass_dir = os.path.join(repo_dir, "eclass") os.makedirs(eclass_dir) for eclass_name, eclass_content in eclasses.items(): with open( os.path.join(eclass_dir, "{}.eclass".format(eclass_name)), "wt") as f: if isinstance(eclass_content, str): eclass_content = [eclass_content] for line in eclass_content: f.write("{}\n".format(line)) # Temporarily write empty value of masters until it becomes default. if not repo_config or "layout.conf" not in repo_config: layout_conf_path = os.path.join(repo_dir, "metadata", "layout.conf") with open(layout_conf_path, "w") as f: f.write("masters =\n") if repo == "test_repo": # Create a minimal profile in /var/db/repos/gentoo sub_profile_dir = os.path.join(profile_dir, "default", "linux", "x86", "test_profile") os.makedirs(sub_profile_dir) if not (profile and "eapi" in profile): eapi_file = os.path.join(sub_profile_dir, "eapi") with open(eapi_file, "w") as f: f.write("0\n") make_defaults_file = os.path.join(sub_profile_dir, "make.defaults") with open(make_defaults_file, "w") as f: f.write('ARCH="x86"\n') f.write('ACCEPT_KEYWORDS="x86"\n') use_force_file = os.path.join(sub_profile_dir, "use.force") with open(use_force_file, "w") as f: f.write("x86\n") parent_file = os.path.join(sub_profile_dir, "parent") with open(parent_file, "w") as f: f.write("..\n") if profile: for config_file, lines in profile.items(): if config_file not in self.config_files: raise ValueError("Unknown config file: '%s'" % config_file) file_name = os.path.join(sub_profile_dir, config_file) with open(file_name, "w") as f: for line in lines: f.write("%s\n" % line) # Create profile symlink os.symlink(sub_profile_dir, os.path.join(user_config_dir, "make.profile")) gpg_test_path = os.environ["PORTAGE_GNUPGHOME"] make_conf = { "ACCEPT_KEYWORDS": "x86", "BINPKG_GPG_SIGNING_BASE_COMMAND": f"flock {gpg_test_path}/portage-binpkg-gpg.lock /usr/bin/gpg --sign --armor --yes --pinentry-mode loopback --passphrase GentooTest [PORTAGE_CONFIG]", "BINPKG_GPG_SIGNING_GPG_HOME": gpg_test_path, "BINPKG_GPG_SIGNING_KEY": "0x5D90EA06352177F6", "BINPKG_GPG_VERIFY_GPG_HOME": gpg_test_path, "CLEAN_DELAY": "0", "DISTDIR": self.distdir, "EMERGE_WARNING_DELAY": "0", "FEATURES": "${FEATURES} binpkg-signing binpkg-request-signature " "gpg-keepalive", "PKGDIR": self.pkgdir, "PORTAGE_INST_GID": str(portage.data.portage_gid), "PORTAGE_INST_UID": str(portage.data.portage_uid), "PORTAGE_TMPDIR": os.path.join(self.eroot, "var/tmp"), } if os.environ.get("NOCOLOR"): make_conf["NOCOLOR"] = os.environ["NOCOLOR"] # Pass along PORTAGE_USERNAME and PORTAGE_GRPNAME since they # need to be inherited by ebuild subprocesses. if "PORTAGE_USERNAME" in os.environ: make_conf["PORTAGE_USERNAME"] = os.environ["PORTAGE_USERNAME"] if "PORTAGE_GRPNAME" in os.environ: make_conf["PORTAGE_GRPNAME"] = os.environ["PORTAGE_GRPNAME"] make_conf_lines = [] for k_v in make_conf.items(): make_conf_lines.append('%s="%s"' % k_v) if "make.conf" in user_config: make_conf_lines.extend(user_config["make.conf"]) if not portage.process.sandbox_capable or os.environ.get( "SANDBOX_ON") == "1": # avoid problems from nested sandbox instances make_conf_lines.append( 'FEATURES="${FEATURES} -sandbox -usersandbox"') configs = user_config.copy() configs["make.conf"] = make_conf_lines for config_file, lines in configs.items(): if config_file not in self.config_files: raise ValueError("Unknown config file: '%s'" % config_file) file_name = os.path.join(user_config_dir, config_file) with open(file_name, "w") as f: for line in lines: f.write("%s\n" % line) # Create /usr/share/portage/config/make.globals make_globals_path = os.path.join(self.eroot, GLOBAL_CONFIG_PATH.lstrip(os.sep), "make.globals") ensure_dirs(os.path.dirname(make_globals_path)) os.symlink(os.path.join(cnf_path, "make.globals"), make_globals_path) # Create /usr/share/portage/config/sets/portage.conf default_sets_conf_dir = os.path.join(self.eroot, "usr/share/portage/config/sets") try: os.makedirs(default_sets_conf_dir) except os.error: pass provided_sets_portage_conf = os.path.join(cnf_path, "sets", "portage.conf") os.symlink( provided_sets_portage_conf, os.path.join(default_sets_conf_dir, "portage.conf"), ) set_config_dir = os.path.join(user_config_dir, "sets") try: os.makedirs(set_config_dir) except os.error: pass for sets_file, lines in sets.items(): file_name = os.path.join(set_config_dir, sets_file) with open(file_name, "w") as f: for line in lines: f.write("%s\n" % line) if cnf_path_repoman is not None: # Create /usr/share/repoman repoman_share_dir = os.path.join(self.eroot, "usr", "share", "repoman") os.symlink(cnf_path_repoman, repoman_share_dir)
def testSimple(self): debug = False install_something = """ S="${WORKDIR}" pkg_pretend() { einfo "called pkg_pretend for $CATEGORY/$PF" } src_install() { einfo "installing something..." insinto /usr/lib/${P} echo "blah blah blah" > "${T}"/regular-file doins "${T}"/regular-file dosym regular-file /usr/lib/${P}/symlink || die # Test code for bug #381629, using a copyright symbol encoded with latin-1. # We use $(printf "\\xa9") rather than $'\\xa9', since printf apparently # works in any case, while $'\\xa9' transforms to \\xef\\xbf\\xbd under # some conditions. TODO: Find out why it transforms to \\xef\\xbf\\xbd when # running tests for Python 3.2 (even though it's bash that is ultimately # responsible for performing the transformation). local latin_1_dir=/usr/lib/${P}/latin-1-$(printf "\\xa9")-directory insinto "${latin_1_dir}" echo "blah blah blah" > "${T}"/latin-1-$(printf "\\xa9")-regular-file || die doins "${T}"/latin-1-$(printf "\\xa9")-regular-file dosym latin-1-$(printf "\\xa9")-regular-file ${latin_1_dir}/latin-1-$(printf "\\xa9")-symlink || die } pkg_config() { einfo "called pkg_config for $CATEGORY/$PF" } pkg_info() { einfo "called pkg_info for $CATEGORY/$PF" } pkg_preinst() { einfo "called pkg_preinst for $CATEGORY/$PF" # Test that has_version and best_version work correctly with # prefix (involves internal ROOT -> EROOT calculation in order # to support ROOT override via the environment with EAPIs 3 # and later which support prefix). if has_version $CATEGORY/$PN:$SLOT ; then einfo "has_version detects an installed instance of $CATEGORY/$PN:$SLOT" einfo "best_version reports that the installed instance is $(best_version $CATEGORY/$PN:$SLOT)" else einfo "has_version does not detect an installed instance of $CATEGORY/$PN:$SLOT" fi } """ ebuilds = { "dev-libs/A-1": { "EAPI" : "4", "IUSE" : "+flag", "KEYWORDS": "x86", "LICENSE": "GPL-2", "MISC_CONTENT": install_something, "RDEPEND": "flag? ( dev-libs/B[flag] )", }, "dev-libs/B-1": { "EAPI" : "4", "IUSE" : "+flag", "KEYWORDS": "x86", "LICENSE": "GPL-2", "MISC_CONTENT": install_something, }, "virtual/foo-0": { "EAPI" : "4", "KEYWORDS": "x86", "LICENSE": "GPL-2", }, } installed = { "dev-libs/A-1": { "EAPI" : "4", "IUSE" : "+flag", "KEYWORDS": "x86", "LICENSE": "GPL-2", "RDEPEND": "flag? ( dev-libs/B[flag] )", "USE": "flag", }, "dev-libs/B-1": { "EAPI" : "4", "IUSE" : "+flag", "KEYWORDS": "x86", "LICENSE": "GPL-2", "USE": "flag", }, "dev-libs/depclean-me-1": { "EAPI" : "4", "IUSE" : "", "KEYWORDS": "x86", "LICENSE": "GPL-2", "USE": "", }, "app-misc/depclean-me-1": { "EAPI" : "4", "IUSE" : "", "KEYWORDS": "x86", "LICENSE": "GPL-2", "RDEPEND": "dev-libs/depclean-me", "USE": "", }, } metadata_xml_files = ( ( "dev-libs/A", { "herd" : "base-system", "flags" : "<flag name='flag'>Description of how USE='flag' affects this package</flag>", }, ), ( "dev-libs/B", { "herd" : "no-herd", "flags" : "<flag name='flag'>Description of how USE='flag' affects this package</flag>", }, ), ) playground = ResolverPlayground( ebuilds=ebuilds, installed=installed, debug=debug) settings = playground.settings eprefix = settings["EPREFIX"] eroot = settings["EROOT"] trees = playground.trees portdb = trees[eroot]["porttree"].dbapi portdir = settings["PORTDIR"] var_cache_edb = os.path.join(eprefix, "var", "cache", "edb") cachedir = os.path.join(var_cache_edb, "dep") cachedir_pregen = os.path.join(portdir, "metadata", "cache") portage_python = portage._python_interpreter ebuild_cmd = (portage_python, "-Wd", os.path.join(PORTAGE_BIN_PATH, "ebuild")) egencache_cmd = (portage_python, "-Wd", os.path.join(PORTAGE_BIN_PATH, "egencache")) emerge_cmd = (portage_python, "-Wd", os.path.join(PORTAGE_BIN_PATH, "emerge")) emaint_cmd = (portage_python, "-Wd", os.path.join(PORTAGE_BIN_PATH, "emaint")) env_update_cmd = (portage_python, "-Wd", os.path.join(PORTAGE_BIN_PATH, "env-update")) fixpackages_cmd = (portage_python, "-Wd", os.path.join(PORTAGE_BIN_PATH, "fixpackages")) portageq_cmd = (portage_python, "-Wd", os.path.join(PORTAGE_BIN_PATH, "portageq")) quickpkg_cmd = (portage_python, "-Wd", os.path.join(PORTAGE_BIN_PATH, "quickpkg")) regenworld_cmd = (portage_python, "-Wd", os.path.join(PORTAGE_BIN_PATH, "regenworld")) rm_binary = find_binary("rm") self.assertEqual(rm_binary is None, False, "rm command not found") rm_cmd = (rm_binary,) egencache_extra_args = [] if self._have_python_xml(): egencache_extra_args.append("--update-use-local-desc") test_ebuild = portdb.findname("dev-libs/A-1") self.assertFalse(test_ebuild is None) test_commands = ( env_update_cmd, emerge_cmd + ("--version",), emerge_cmd + ("--info",), emerge_cmd + ("--info", "--verbose"), emerge_cmd + ("--list-sets",), emerge_cmd + ("--check-news",), rm_cmd + ("-rf", cachedir), rm_cmd + ("-rf", cachedir_pregen), emerge_cmd + ("--regen",), rm_cmd + ("-rf", cachedir), ({"FEATURES" : "metadata-transfer"},) + \ emerge_cmd + ("--regen",), rm_cmd + ("-rf", cachedir), ({"FEATURES" : "metadata-transfer parse-eapi-ebuild-head"},) + \ emerge_cmd + ("--regen",), rm_cmd + ("-rf", cachedir), egencache_cmd + ("--update",) + tuple(egencache_extra_args), ({"FEATURES" : "metadata-transfer"},) + \ emerge_cmd + ("--metadata",), rm_cmd + ("-rf", cachedir), ({"FEATURES" : "metadata-transfer"},) + \ emerge_cmd + ("--metadata",), emerge_cmd + ("--metadata",), rm_cmd + ("-rf", cachedir), emerge_cmd + ("--oneshot", "virtual/foo"), emerge_cmd + ("--pretend", "dev-libs/A"), ebuild_cmd + (test_ebuild, "manifest", "clean", "package", "merge"), emerge_cmd + ("--pretend", "--tree", "--complete-graph", "dev-libs/A"), emerge_cmd + ("-p", "dev-libs/B"), emerge_cmd + ("-B", "dev-libs/B",), emerge_cmd + ("--oneshot", "--usepkg", "dev-libs/B",), # trigger clean prior to pkg_pretend as in bug #390711 ebuild_cmd + (test_ebuild, "unpack"), emerge_cmd + ("--oneshot", "dev-libs/A",), emerge_cmd + ("--noreplace", "dev-libs/A",), emerge_cmd + ("--config", "dev-libs/A",), emerge_cmd + ("--info", "dev-libs/A", "dev-libs/B"), emerge_cmd + ("--pretend", "--depclean", "--verbose", "dev-libs/B"), emerge_cmd + ("--pretend", "--depclean",), emerge_cmd + ("--depclean",), quickpkg_cmd + ("dev-libs/A",), emerge_cmd + ("--usepkgonly", "dev-libs/A"), emaint_cmd + ("--check", "all"), emaint_cmd + ("--fix", "all"), fixpackages_cmd, regenworld_cmd, portageq_cmd + ("match", eroot, "dev-libs/A"), portageq_cmd + ("best_visible", eroot, "dev-libs/A"), portageq_cmd + ("best_visible", eroot, "binary", "dev-libs/A"), portageq_cmd + ("contents", eroot, "dev-libs/A-1"), portageq_cmd + ("metadata", eroot, "ebuild", "dev-libs/A-1", "EAPI", "IUSE", "RDEPEND"), portageq_cmd + ("metadata", eroot, "binary", "dev-libs/A-1", "EAPI", "USE", "RDEPEND"), portageq_cmd + ("metadata", eroot, "installed", "dev-libs/A-1", "EAPI", "USE", "RDEPEND"), portageq_cmd + ("owners", eroot, eroot + "usr"), emerge_cmd + ("-p", eroot + "usr"), emerge_cmd + ("-p", "--unmerge", "-q", eroot + "usr"), emerge_cmd + ("--unmerge", "--quiet", "dev-libs/A"), emerge_cmd + ("-C", "--quiet", "dev-libs/B"), ) distdir = os.path.join(eprefix, "distdir") pkgdir = os.path.join(eprefix, "pkgdir") fake_bin = os.path.join(eprefix, "bin") portage_tmpdir = os.path.join(eprefix, "var", "tmp", "portage") profile_path = settings.profile_path user_config_dir = os.path.join(os.sep, eprefix, USER_CONFIG_PATH) features = [] if not portage.process.sandbox_capable or \ os.environ.get("SANDBOX_ON") == "1": features.append("-sandbox") # Since egencache ignores settings from the calling environment, # configure it via make.conf. make_conf = ( "FEATURES=\"%s\"\n" % (" ".join(features),), "PORTDIR=\"%s\"\n" % (portdir,), "PORTAGE_GRPNAME=\"%s\"\n" % (os.environ["PORTAGE_GRPNAME"],), "PORTAGE_USERNAME=\"%s\"\n" % (os.environ["PORTAGE_USERNAME"],), ) path = os.environ.get("PATH") if path is not None and not path.strip(): path = None if path is None: path = "" else: path = ":" + path path = fake_bin + path pythonpath = os.environ.get("PYTHONPATH") if pythonpath is not None and not pythonpath.strip(): pythonpath = None if pythonpath is not None and \ pythonpath.split(":")[0] == PORTAGE_PYM_PATH: pass else: if pythonpath is None: pythonpath = "" else: pythonpath = ":" + pythonpath pythonpath = PORTAGE_PYM_PATH + pythonpath env = { "PORTAGE_OVERRIDE_EPREFIX" : eprefix, "CLEAN_DELAY" : "0", "DISTDIR" : distdir, "EMERGE_WARNING_DELAY" : "0", "INFODIR" : "", "INFOPATH" : "", "PATH" : path, "PKGDIR" : pkgdir, "PORTAGE_INST_GID" : str(portage.data.portage_gid), "PORTAGE_INST_UID" : str(portage.data.portage_uid), "PORTAGE_PYTHON" : portage_python, "PORTAGE_TMPDIR" : portage_tmpdir, "PYTHONPATH" : pythonpath, } if "__PORTAGE_TEST_HARDLINK_LOCKS" in os.environ: env["__PORTAGE_TEST_HARDLINK_LOCKS"] = \ os.environ["__PORTAGE_TEST_HARDLINK_LOCKS"] updates_dir = os.path.join(portdir, "profiles", "updates") dirs = [cachedir, cachedir_pregen, distdir, fake_bin, portage_tmpdir, updates_dir, user_config_dir, var_cache_edb] true_symlinks = ["chown", "chgrp"] true_binary = find_binary("true") self.assertEqual(true_binary is None, False, "true command not found") try: for d in dirs: ensure_dirs(d) with open(os.path.join(user_config_dir, "make.conf"), 'w') as f: for line in make_conf: f.write(line) for x in true_symlinks: os.symlink(true_binary, os.path.join(fake_bin, x)) with open(os.path.join(var_cache_edb, "counter"), 'wb') as f: f.write(b"100") # non-empty system set keeps --depclean quiet with open(os.path.join(profile_path, "packages"), 'w') as f: f.write("*dev-libs/token-system-pkg") for cp, xml_data in metadata_xml_files: with open(os.path.join(portdir, cp, "metadata.xml"), 'w') as f: f.write(playground.metadata_xml_template % xml_data) with open(os.path.join(updates_dir, "1Q-2010"), 'w') as f: f.write(""" slotmove =app-doc/pms-3 2 3 move dev-util/git dev-vcs/git """) if debug: # The subprocess inherits both stdout and stderr, for # debugging purposes. stdout = None else: # The subprocess inherits stderr so that any warnings # triggered by python -Wd will be visible. stdout = subprocess.PIPE for args in test_commands: if isinstance(args[0], dict): local_env = env.copy() local_env.update(args[0]) args = args[1:] else: local_env = env proc = subprocess.Popen(args, env=local_env, stdout=stdout) if debug: proc.wait() else: output = proc.stdout.readlines() proc.wait() proc.stdout.close() if proc.returncode != os.EX_OK: for line in output: sys.stderr.write(_unicode_decode(line)) self.assertEqual(os.EX_OK, proc.returncode, "emerge failed with args %s" % (args,)) finally: playground.cleanup()
def testSlotAbiEmerge(self): debug = False ebuilds = { "dev-libs/glib-1.2.10" : { "SLOT": "1" }, "dev-libs/glib-2.30.2" : { "EAPI": "4-slot-abi", "SLOT": "2/2.30" }, "dev-libs/glib-2.32.3" : { "EAPI": "4-slot-abi", "SLOT": "2/2.32" }, "dev-libs/dbus-glib-0.98" : { "EAPI": "4-slot-abi", "DEPEND": "dev-libs/glib:2=", "RDEPEND": "dev-libs/glib:2=" }, } installed = { "dev-libs/glib-1.2.10" : { "EAPI": "4-slot-abi", "SLOT": "1" }, "dev-libs/glib-2.30.2" : { "EAPI": "4-slot-abi", "SLOT": "2/2.30" }, "dev-libs/dbus-glib-0.98" : { "EAPI": "4-slot-abi", "DEPEND": "dev-libs/glib:2/2.30=", "RDEPEND": "dev-libs/glib:2/2.30=" }, } world = ["dev-libs/glib:1", "dev-libs/dbus-glib"] playground = ResolverPlayground(ebuilds=ebuilds, installed=installed, world=world, debug=debug) settings = playground.settings eprefix = settings["EPREFIX"] eroot = settings["EROOT"] trees = playground.trees portdb = trees[eroot]["porttree"].dbapi vardb = trees[eroot]["vartree"].dbapi var_cache_edb = os.path.join(eprefix, "var", "cache", "edb") user_config_dir = os.path.join(eprefix, USER_CONFIG_PATH) package_mask_path = os.path.join(user_config_dir, "package.mask") portage_python = portage._python_interpreter ebuild_cmd = (portage_python, "-Wd", os.path.join(PORTAGE_BIN_PATH, "ebuild")) emerge_cmd = (portage_python, "-Wd", os.path.join(PORTAGE_BIN_PATH, "emerge")) test_ebuild = portdb.findname("dev-libs/dbus-glib-0.98") self.assertFalse(test_ebuild is None) test_commands = ( emerge_cmd + ("--oneshot", "dev-libs/glib",), (lambda: "dev-libs/glib:2/2.32=" in vardb.aux_get("dev-libs/dbus-glib-0.98", ["RDEPEND"])[0],), (BASH_BINARY, "-c", "echo %s >> %s" % tuple(map(portage._shell_quote, (">=dev-libs/glib-2.32", package_mask_path,)))), emerge_cmd + ("--oneshot", "dev-libs/glib",), (lambda: "dev-libs/glib:2/2.30=" in vardb.aux_get("dev-libs/dbus-glib-0.98", ["RDEPEND"])[0],), ) distdir = playground.distdir pkgdir = playground.pkgdir fake_bin = os.path.join(eprefix, "bin") portage_tmpdir = os.path.join(eprefix, "var", "tmp", "portage") profile_path = settings.profile_path path = os.environ.get("PATH") if path is not None and not path.strip(): path = None if path is None: path = "" else: path = ":" + path path = fake_bin + path pythonpath = os.environ.get("PYTHONPATH") if pythonpath is not None and not pythonpath.strip(): pythonpath = None if pythonpath is not None and \ pythonpath.split(":")[0] == PORTAGE_PYM_PATH: pass else: if pythonpath is None: pythonpath = "" else: pythonpath = ":" + pythonpath pythonpath = PORTAGE_PYM_PATH + pythonpath env = { "PORTAGE_OVERRIDE_EPREFIX" : eprefix, "PATH" : path, "PORTAGE_PYTHON" : portage_python, "PORTAGE_REPOSITORIES" : settings.repositories.config_string(), "PYTHONPATH" : pythonpath, } if "__PORTAGE_TEST_HARDLINK_LOCKS" in os.environ: env["__PORTAGE_TEST_HARDLINK_LOCKS"] = \ os.environ["__PORTAGE_TEST_HARDLINK_LOCKS"] dirs = [distdir, fake_bin, portage_tmpdir, user_config_dir, var_cache_edb] true_symlinks = ["chown", "chgrp"] true_binary = find_binary("true") self.assertEqual(true_binary is None, False, "true command not found") try: for d in dirs: ensure_dirs(d) for x in true_symlinks: os.symlink(true_binary, os.path.join(fake_bin, x)) with open(os.path.join(var_cache_edb, "counter"), 'wb') as f: f.write(b"100") # non-empty system set keeps --depclean quiet with open(os.path.join(profile_path, "packages"), 'w') as f: f.write("*dev-libs/token-system-pkg") if debug: # The subprocess inherits both stdout and stderr, for # debugging purposes. stdout = None else: # The subprocess inherits stderr so that any warnings # triggered by python -Wd will be visible. stdout = subprocess.PIPE for i, args in enumerate(test_commands): if hasattr(args[0], '__call__'): self.assertTrue(args[0](), "callable at index %s failed" % (i,)) continue proc = subprocess.Popen(args, env=env, stdout=stdout) if debug: proc.wait() else: output = proc.stdout.readlines() proc.wait() proc.stdout.close() if proc.returncode != os.EX_OK: for line in output: sys.stderr.write(_unicode_decode(line)) self.assertEqual(os.EX_OK, proc.returncode, "emerge failed with args %s" % (args,)) finally: playground.cleanup()