def _testAsynchronousLock(self): scheduler = global_event_loop() tempdir = tempfile.mkdtemp() try: path = os.path.join(tempdir, 'lock_me') for force_async, async_unlock in itertools.product( (True, False), repeat=2): for force_dummy in (True, False): async_lock = AsynchronousLock(path=path, scheduler=scheduler, _force_async=force_async, _force_thread=True, _force_dummy=force_dummy) async_lock.start() self.assertEqual(async_lock.wait(), os.EX_OK) self.assertEqual(async_lock.returncode, os.EX_OK) if async_unlock: scheduler.run_until_complete(async_lock.async_unlock()) else: async_lock.unlock() async_lock = AsynchronousLock(path=path, scheduler=scheduler, _force_async=force_async, _force_process=True) async_lock.start() self.assertEqual(async_lock.wait(), os.EX_OK) self.assertEqual(async_lock.returncode, os.EX_OK) if async_unlock: scheduler.run_until_complete(async_lock.async_unlock()) else: async_lock.unlock() finally: shutil.rmtree(tempdir)
def _testAsynchronousLockWaitCancel(self): scheduler = global_event_loop() tempdir = tempfile.mkdtemp() try: path = os.path.join(tempdir, 'lock_me') lock1 = AsynchronousLock(path=path, scheduler=scheduler) lock1.start() self.assertEqual(lock1.wait(), os.EX_OK) self.assertEqual(lock1.returncode, os.EX_OK) lock2 = AsynchronousLock(path=path, scheduler=scheduler, _force_async=True, _force_process=True) lock2.start() # lock2 should be waiting for lock1 to release self.assertEqual(lock2.poll(), None) self.assertEqual(lock2.returncode, None) # Cancel lock2 and then check wait() and returncode results. lock2.cancel() self.assertEqual(lock2.wait() == os.EX_OK, False) self.assertEqual(lock2.returncode == os.EX_OK, False) self.assertEqual(lock2.returncode is None, False) lock1.unlock() finally: shutil.rmtree(tempdir)
def _testAsynchronousLockWait(self): scheduler = global_event_loop() tempdir = tempfile.mkdtemp() try: path = os.path.join(tempdir, 'lock_me') lock1 = AsynchronousLock(path=path, scheduler=scheduler) lock1.start() self.assertEqual(lock1.wait(), os.EX_OK) self.assertEqual(lock1.returncode, os.EX_OK) # lock2 requires _force_async=True since the portage.locks # module is not designed to work as intended here if the # same process tries to lock the same file more than # one time concurrently. lock2 = AsynchronousLock(path=path, scheduler=scheduler, _force_async=True, _force_process=True) lock2.start() # lock2 should be waiting for lock1 to release self.assertEqual(lock2.poll(), None) self.assertEqual(lock2.returncode, None) lock1.unlock() self.assertEqual(lock2.wait(), os.EX_OK) self.assertEqual(lock2.returncode, os.EX_OK) lock2.unlock() finally: shutil.rmtree(tempdir)
def _chpathtool_exit(self, chpathtool): if self._final_exit(chpathtool) != os.EX_OK: self._unlock_builddir() self._writemsg_level("!!! Error Adjusting Prefix to %s\n" % (self.settings["EPREFIX"], ), noiselevel=-1, level=logging.ERROR) self.wait() return # We want to install in "our" prefix, not the binary one with io.open(_unicode_encode(os.path.join(self._infloc, "EPREFIX"), encoding=_encodings['fs'], errors='strict'), mode='w', encoding=_encodings['repo.content'], errors='strict') as f: f.write(self.settings["EPREFIX"] + "\n") # Move the files to the correct location for merge. image_tmp_dir = os.path.join(self.settings["PORTAGE_BUILDDIR"], "image_tmp") build_d = os.path.join(self.settings["D"], self._build_prefix.lstrip(os.sep)) if not os.path.isdir(build_d): # Assume this is a virtual package or something. shutil.rmtree(self._image_dir) ensure_dirs(self.settings["ED"]) else: os.rename(build_d, image_tmp_dir) shutil.rmtree(self._image_dir) ensure_dirs(os.path.dirname(self.settings["ED"].rstrip(os.sep))) os.rename(image_tmp_dir, self.settings["ED"]) self.wait()
def _testAsynchronousLockWaitKill(self): scheduler = global_event_loop() tempdir = tempfile.mkdtemp() try: path = os.path.join(tempdir, 'lock_me') lock1 = AsynchronousLock(path=path, scheduler=scheduler) lock1.start() self.assertEqual(lock1.wait(), os.EX_OK) self.assertEqual(lock1.returncode, os.EX_OK) lock2 = AsynchronousLock(path=path, scheduler=scheduler, _force_async=True, _force_process=True) lock2.start() # lock2 should be waiting for lock1 to release self.assertEqual(lock2.poll(), None) self.assertEqual(lock2.returncode, None) # Kill lock2's process and then check wait() and # returncode results. This is intended to simulate # a SIGINT sent via the controlling tty. self.assertEqual(lock2._imp is not None, True) self.assertEqual(lock2._imp._proc is not None, True) self.assertEqual(lock2._imp._proc.pid is not None, True) lock2._imp._kill_test = True os.kill(lock2._imp._proc.pid, signal.SIGTERM) self.assertEqual(lock2.wait() == os.EX_OK, False) self.assertEqual(lock2.returncode == os.EX_OK, False) self.assertEqual(lock2.returncode is None, False) lock1.unlock() finally: shutil.rmtree(tempdir)
def _testAsynchronousLock(self): scheduler = PollScheduler().sched_iface tempdir = tempfile.mkdtemp() try: path = os.path.join(tempdir, 'lock_me') for force_async in (True, False): for force_dummy in (True, False): async_lock = AsynchronousLock(path=path, scheduler=scheduler, _force_async=force_async, _force_thread=True, _force_dummy=force_dummy) async_lock.start() self.assertEqual(async_lock.wait(), os.EX_OK) self.assertEqual(async_lock.returncode, os.EX_OK) async_lock.unlock() async_lock = AsynchronousLock(path=path, scheduler=scheduler, _force_async=force_async, _force_process=True) async_lock.start() self.assertEqual(async_lock.wait(), os.EX_OK) self.assertEqual(async_lock.returncode, os.EX_OK) async_lock.unlock() finally: shutil.rmtree(tempdir)
def binTestsCleanup(): global basedir if basedir is None: return if os.access(basedir, os.W_OK): shutil.rmtree(basedir) basedir = None
def testSetCpv(self): """ Test the clone via constructor. """ ebuilds = { "dev-libs/A-1": {"IUSE": "static-libs"}, "dev-libs/B-1": {"IUSE": "static-libs"}, } env_files = { "A" : ("USE=\"static-libs\"",) } package_env = ( "dev-libs/A A", ) eprefix = normalize_path(tempfile.mkdtemp()) playground = None try: user_config_dir = os.path.join(eprefix, USER_CONFIG_PATH) os.makedirs(user_config_dir) with io.open(os.path.join(user_config_dir, "package.env"), mode='w', encoding=_encodings['content']) as f: for line in package_env: f.write(line + "\n") env_dir = os.path.join(user_config_dir, "env") os.makedirs(env_dir) for k, v in env_files.items(): with io.open(os.path.join(env_dir, k), mode='w', encoding=_encodings['content']) as f: for line in v: f.write(line + "\n") playground = ResolverPlayground(eprefix=eprefix, ebuilds=ebuilds) settings = config(clone=playground.settings) result = playground.run(["=dev-libs/A-1"]) pkg, existing_node = result.depgraph._select_package( playground.eroot, Atom("=dev-libs/A-1")) settings.setcpv(pkg) self.assertTrue("static-libs" in settings["PORTAGE_USE"].split()) # Test bug #522362, where a USE=static-libs package.env # setting leaked from one setcpv call to the next. pkg, existing_node = result.depgraph._select_package( playground.eroot, Atom("=dev-libs/B-1")) settings.setcpv(pkg) self.assertTrue("static-libs" not in settings["PORTAGE_USE"].split()) finally: if playground is None: shutil.rmtree(eprefix) else: playground.cleanup()
def _testAsynchronousLock(self): scheduler = global_event_loop() tempdir = tempfile.mkdtemp() try: path = os.path.join(tempdir, 'lock_me') for force_async in (True, False): for force_dummy in (True, False): async_lock = AsynchronousLock(path=path, scheduler=scheduler, _force_async=force_async, _force_thread=True, _force_dummy=force_dummy) async_lock.start() self.assertEqual(async_lock.wait(), os.EX_OK) self.assertEqual(async_lock.returncode, os.EX_OK) async_lock.unlock() async_lock = AsynchronousLock(path=path, scheduler=scheduler, _force_async=force_async, _force_process=True) async_lock.start() self.assertEqual(async_lock.wait(), os.EX_OK) self.assertEqual(async_lock.returncode, os.EX_OK) async_lock.unlock() finally: shutil.rmtree(tempdir)
def _testAsynchronousLock(self): scheduler = global_event_loop() tempdir = tempfile.mkdtemp() try: path = os.path.join(tempdir, "lock_me") for force_async in (True, False): for force_dummy in ((False, ) if dummy_threading is None else (True, False)): async_lock = AsynchronousLock( path=path, scheduler=scheduler, _force_async=force_async, _force_thread=True, _force_dummy=force_dummy, ) async_lock.start() self.assertEqual(async_lock.wait(), os.EX_OK) self.assertEqual(async_lock.returncode, os.EX_OK) scheduler.run_until_complete(async_lock.async_unlock()) async_lock = AsynchronousLock( path=path, scheduler=scheduler, _force_async=force_async, _force_process=True, ) async_lock.start() self.assertEqual(async_lock.wait(), os.EX_OK) self.assertEqual(async_lock.returncode, os.EX_OK) scheduler.run_until_complete(async_lock.async_unlock()) finally: shutil.rmtree(tempdir)
def testGetConfigSourceLex(self): try: tempdir = tempfile.mkdtemp() make_conf_file = os.path.join(tempdir, 'make.conf') with open(make_conf_file, 'w') as f: f.write('source "${DIR}/sourced_file"\n') sourced_file = os.path.join(tempdir, 'sourced_file') with open(sourced_file, 'w') as f: f.write('PASSES_SOURCING_TEST="True"\n') d = getconfig( make_conf_file, allow_sourcing=True, expand={"DIR": tempdir}) # PASSES_SOURCING_TEST should exist in getconfig result. self.assertTrue(d is not None) self.assertEqual("True", d['PASSES_SOURCING_TEST']) # With allow_sourcing=True and empty expand map, this should # throw a FileNotFound exception. self.assertRaisesMsg( "An empty expand map should throw an exception", ParseError, getconfig, make_conf_file, allow_sourcing=True, expand={}) finally: shutil.rmtree(tempdir)
def test_filename_hash_layout_get_filenames(self): layouts = ( FlatLayout(), FilenameHashLayout('SHA1', '4'), FilenameHashLayout('SHA1', '8'), FilenameHashLayout('SHA1', '8:16'), FilenameHashLayout('SHA1', '8:16:24'), ) filename = 'foo-1.tar.gz' for layout in layouts: distdir = tempfile.mkdtemp() try: path = os.path.join(distdir, layout.get_path(filename)) try: os.makedirs(os.path.dirname(path)) except OSError: pass with open(path, 'wb') as f: pass self.assertEqual([filename], list(layout.get_filenames(distdir))) finally: shutil.rmtree(distdir)
def _prepare_fake_distdir(settings, alist): orig_distdir = settings["DISTDIR"] edpath = os.path.join(settings["PORTAGE_BUILDDIR"], "distdir") portage.util.ensure_dirs(edpath, gid=portage_gid, mode=0o755) # Remove any unexpected files or directories. for x in os.listdir(edpath): symlink_path = os.path.join(edpath, x) st = os.lstat(symlink_path) if x in alist and stat.S_ISLNK(st.st_mode): continue if stat.S_ISDIR(st.st_mode): shutil.rmtree(symlink_path) else: os.unlink(symlink_path) # Check for existing symlinks and recreate if necessary. for x in alist: symlink_path = os.path.join(edpath, x) target = os.path.join(orig_distdir, x) try: link_target = os.readlink(symlink_path) except OSError: os.symlink(target, symlink_path) else: if link_target != target: os.unlink(symlink_path) os.symlink(target, symlink_path)
def testGetConfigSourceLex(self): try: tempdir = tempfile.mkdtemp() make_conf_file = os.path.join(tempdir, 'make.conf') with open(make_conf_file, 'w') as f: f.write('source "${DIR}/sourced_file"\n') sourced_file = os.path.join(tempdir, 'sourced_file') with open(sourced_file, 'w') as f: f.write('PASSES_SOURCING_TEST="True"\n') d = getconfig(make_conf_file, allow_sourcing=True, expand={"DIR": tempdir}) # PASSES_SOURCING_TEST should exist in getconfig result. self.assertTrue(d is not None) self.assertEqual("True", d['PASSES_SOURCING_TEST']) # With allow_sourcing=True and empty expand map, this should # throw a FileNotFound exception. self.assertRaisesMsg( "An empty expand map should throw an exception", ParseError, getconfig, make_conf_file, allow_sourcing=True, expand={}) finally: shutil.rmtree(tempdir)
def _chpathtool_exit(self, chpathtool): if self._final_exit(chpathtool) != os.EX_OK: self._unlock_builddir() self._writemsg_level("!!! Error Adjusting Prefix to %s\n" % (self.settings["EPREFIX"],), noiselevel=-1, level=logging.ERROR) self.wait() return # We want to install in "our" prefix, not the binary one with io.open(_unicode_encode(os.path.join(self._infloc, "EPREFIX"), encoding=_encodings['fs'], errors='strict'), mode='w', encoding=_encodings['repo.content'], errors='strict') as f: f.write(self.settings["EPREFIX"] + "\n") # Move the files to the correct location for merge. image_tmp_dir = os.path.join( self.settings["PORTAGE_BUILDDIR"], "image_tmp") build_d = os.path.join(self.settings["D"], self._build_prefix.lstrip(os.sep)) if not os.path.isdir(build_d): # Assume this is a virtual package or something. shutil.rmtree(self._image_dir) ensure_dirs(self.settings["ED"]) else: os.rename(build_d, image_tmp_dir) shutil.rmtree(self._image_dir) ensure_dirs(os.path.dirname(self.settings["ED"].rstrip(os.sep))) os.rename(image_tmp_dir, self.settings["ED"]) self.wait()
def _testAsynchronousLock(self): scheduler = global_event_loop() tempdir = tempfile.mkdtemp() try: path = os.path.join(tempdir, 'lock_me') for force_async, async_unlock in itertools.product((True, False), repeat=2): for force_dummy in (True, False): async_lock = AsynchronousLock(path=path, scheduler=scheduler, _force_async=force_async, _force_thread=True, _force_dummy=force_dummy) async_lock.start() self.assertEqual(async_lock.wait(), os.EX_OK) self.assertEqual(async_lock.returncode, os.EX_OK) if async_unlock: scheduler.run_until_complete(async_lock.async_unlock()) else: async_lock.unlock() async_lock = AsynchronousLock(path=path, scheduler=scheduler, _force_async=force_async, _force_process=True) async_lock.start() self.assertEqual(async_lock.wait(), os.EX_OK) self.assertEqual(async_lock.returncode, os.EX_OK) if async_unlock: scheduler.run_until_complete(async_lock.async_unlock()) else: async_lock.unlock() finally: shutil.rmtree(tempdir)
def _testLockNonblock(self): tempdir = tempfile.mkdtemp() try: path = os.path.join(tempdir, 'lock_me') lock1 = portage.locks.lockfile(path) pid = os.fork() if pid == 0: portage.locks._close_fds() # Disable close_fds since we don't exec # (see _setup_pipes docstring). portage.process._setup_pipes({0:0, 1:1, 2:2}, close_fds=False) rval = 2 try: try: lock2 = portage.locks.lockfile(path, flags=os.O_NONBLOCK) except portage.exception.TryAgain: rval = os.EX_OK else: rval = 1 portage.locks.unlockfile(lock2) except SystemExit: raise except: traceback.print_exc() finally: os._exit(rval) self.assertEqual(pid > 0, True) pid, status = os.waitpid(pid, 0) self.assertEqual(os.WIFEXITED(status), True) self.assertEqual(os.WEXITSTATUS(status), os.EX_OK) portage.locks.unlockfile(lock1) finally: shutil.rmtree(tempdir)
def test_gpkg_symlink_path(self): if sys.version_info.major < 3: self.skipTest("Not support Python 2") playground = ResolverPlayground( user_config={ "make.conf": ('BINPKG_COMPRESS="none"',), } ) tmpdir = tempfile.mkdtemp() try: settings = playground.settings orig_full_path = os.path.join(tmpdir, "orig/") os.makedirs(orig_full_path) os.symlink( "aaaabbbb/ccccdddd/eeeeffff/gggghhhh/iiiijjjj/kkkkllll/" "mmmmnnnn/oooopppp/qqqqrrrr/sssstttt/uuuuvvvv/wwwwxxxx/" "yyyyzzzz/00001111/22223333/44445555/66667777/88889999/test", os.path.join(orig_full_path, "a_long_symlink"), ) gpkg_file_loc = os.path.join(tmpdir, "test.gpkg.tar") test_gpkg = gpkg(settings, "test", gpkg_file_loc) check_result = test_gpkg._check_pre_image_files( os.path.join(tmpdir, "orig") ) self.assertEqual(check_result, (0, 14, 166, 0, 0)) test_gpkg.compress(os.path.join(tmpdir, "orig"), {"meta": "test"}) with open(gpkg_file_loc, "rb") as container: # container self.assertEqual( test_gpkg._get_tar_format(container), tarfile.USTAR_FORMAT ) with tarfile.open(gpkg_file_loc, "r") as container: metadata = io.BytesIO(container.extractfile("test/metadata.tar").read()) self.assertEqual( test_gpkg._get_tar_format(metadata), tarfile.USTAR_FORMAT ) metadata.close() image = io.BytesIO(container.extractfile("test/image.tar").read()) self.assertEqual(test_gpkg._get_tar_format(image), tarfile.GNU_FORMAT) image.close() test_gpkg.decompress(os.path.join(tmpdir, "test")) r = compare_files( os.path.join(tmpdir, "orig/", "a_long_symlink"), os.path.join(tmpdir, "test/", "a_long_symlink"), skipped_types=("atime", "mtime", "ctime"), ) self.assertEqual(r, ()) finally: shutil.rmtree(tmpdir) playground.cleanup()
def cleanup(self): portdb = self.trees[self.eroot]["porttree"].dbapi portdb.close_caches() portage.dbapi.porttree.portdbapi.portdbapi_instances.remove(portdb) if self.debug: print("\nEROOT=%s" % self.eroot) else: shutil.rmtree(self.eroot)
def cleanup(self): for eroot in self.trees: portdb = self.trees[eroot]["porttree"].dbapi portdb.close_caches() if self.debug: print("\nEROOT=%s" % self.eroot) else: shutil.rmtree(self.eroot)
def test_gpkg_short_path(self): if sys.version_info.major < 3: self.skipTest("Not support Python 2") playground = ResolverPlayground( user_config={ "make.conf": ('BINPKG_COMPRESS="none"',), } ) tmpdir = tempfile.mkdtemp() try: settings = playground.settings path_name = ( "aaaabbbb/ccccdddd/eeeeffff/gggghhhh/iiiijjjj/kkkkllll/" "mmmmnnnn/oooopppp/qqqqrrrr/sssstttt/" ) orig_full_path = os.path.join(tmpdir, "orig/" + path_name) os.makedirs(orig_full_path) with open(os.path.join(orig_full_path, "test"), "wb") as test_file: test_file.write(urandom(1048576)) gpkg_file_loc = os.path.join(tmpdir, "test.gpkg.tar") test_gpkg = gpkg(settings, "test", gpkg_file_loc) check_result = test_gpkg._check_pre_image_files( os.path.join(tmpdir, "orig") ) self.assertEqual(check_result, (95, 4, 0, 1048576, 1048576)) test_gpkg.compress(os.path.join(tmpdir, "orig"), {"meta": "test"}) with open(gpkg_file_loc, "rb") as container: # container self.assertEqual( test_gpkg._get_tar_format(container), tarfile.USTAR_FORMAT ) with tarfile.open(gpkg_file_loc, "r") as container: metadata = io.BytesIO(container.extractfile("test/metadata.tar").read()) self.assertEqual( test_gpkg._get_tar_format(metadata), tarfile.USTAR_FORMAT ) metadata.close() image = io.BytesIO(container.extractfile("test/image.tar").read()) self.assertEqual(test_gpkg._get_tar_format(image), tarfile.USTAR_FORMAT) image.close() test_gpkg.decompress(os.path.join(tmpdir, "test")) r = compare_files( os.path.join(tmpdir, "orig/" + path_name + "test"), os.path.join(tmpdir, "test/" + path_name + "test"), skipped_types=("atime", "mtime", "ctime"), ) self.assertEqual(r, ()) finally: shutil.rmtree(tmpdir) playground.cleanup()
def testFakedbapi(self): packages = ( ("sys-apps/portage-2.1.10", { "EAPI" : "2", "IUSE" : "ipc doc", "repository" : "gentoo", "SLOT" : "0", "USE" : "ipc missing-iuse", }), ("virtual/package-manager-0", { "EAPI" : "0", "repository" : "gentoo", "SLOT" : "0", }), ) match_tests = ( ("sys-apps/portage:0[ipc]", ["sys-apps/portage-2.1.10"]), ("sys-apps/portage:0[-ipc]", []), ("sys-apps/portage:0[doc]", []), ("sys-apps/portage:0[-doc]", ["sys-apps/portage-2.1.10"]), ("sys-apps/portage:0", ["sys-apps/portage-2.1.10"]), ("sys-apps/portage:0[missing-iuse]", []), ("sys-apps/portage:0[-missing-iuse]", []), ("sys-apps/portage:0::gentoo[ipc]", ["sys-apps/portage-2.1.10"]), ("sys-apps/portage:0::multilib[ipc]", []), ("virtual/package-manager", ["virtual/package-manager-0"]), ) tempdir = tempfile.mkdtemp() try: test_repo = os.path.join(tempdir, "var", "repositories", "test_repo") os.makedirs(os.path.join(test_repo, "profiles")) with open(os.path.join(test_repo, "profiles", "repo_name"), "w") as f: f.write("test_repo") env = { "PORTAGE_REPOSITORIES": "[DEFAULT]\nmain-repo = test_repo\n[test_repo]\nlocation = %s" % test_repo } # Tests may override portage.const.EPREFIX in order to # simulate a prefix installation. It's reasonable to do # this because tests should be self-contained such that # the "real" value of portage.const.EPREFIX is entirely # irrelevant (see bug #492932). portage.const.EPREFIX = tempdir fakedb = fakedbapi(settings=config(config_profile_path="", env=env, eprefix=tempdir)) for cpv, metadata in packages: fakedb.cpv_inject(cpv, metadata=metadata) for atom, expected_result in match_tests: result = fakedb.match(atom) self.assertEqual(fakedb.match(atom), expected_result, "fakedb.match('%s') = %s != %s" % (atom, result, expected_result)) finally: shutil.rmtree(tempdir)
def test_gpkg_missing_manifest_signature(self): if sys.version_info.major < 3: self.skipTest("Not support Python 2") playground = ResolverPlayground( user_config={ "make.conf": ( 'FEATURES="${FEATURES} binpkg-signing ' 'binpkg-request-signature"', 'BINPKG_FORMAT="gpkg"', ), }) tmpdir = tempfile.mkdtemp() try: settings = playground.settings gpg = GPG(settings) gpg.unlock() orig_full_path = os.path.join(tmpdir, "orig/") os.makedirs(orig_full_path) data = urandom(1048576) with open(os.path.join(orig_full_path, "data"), "wb") as f: f.write(data) binpkg_1 = gpkg(settings, "test", os.path.join(tmpdir, "test-1.gpkg.tar")) binpkg_1.compress(orig_full_path, {}) with tarfile.open(os.path.join(tmpdir, "test-1.gpkg.tar"), "r") as tar_1: with tarfile.open(os.path.join(tmpdir, "test-2.gpkg.tar"), "w") as tar_2: for f in tar_1.getmembers(): if f.name == "Manifest": manifest = tar_1.extractfile(f).read().decode( "UTF-8") manifest = manifest.replace( "-----BEGIN PGP SIGNATURE-----", "") manifest = manifest.replace( "-----END PGP SIGNATURE-----", "") manifest_data = io.BytesIO( manifest.encode("UTF-8")) manifest_data.seek(0, io.SEEK_END) f.size = manifest_data.tell() manifest_data.seek(0) tar_2.addfile(f, manifest_data) else: tar_2.addfile(f, tar_1.extractfile(f)) binpkg_2 = gpkg(settings, "test", os.path.join(tmpdir, "test-2.gpkg.tar")) self.assertRaises(InvalidSignature, binpkg_2.decompress, os.path.join(tmpdir, "test")) finally: shutil.rmtree(tmpdir) playground.cleanup()
def testEbuildFetch(self): user_config = { "make.conf": ('GENTOO_MIRRORS="{scheme}://{host}:{port}"', ), } distfiles = { 'bar': b'bar\n', 'foo': b'foo\n', } ebuilds = { 'dev-libs/A-1': { 'EAPI': '7', 'SRC_URI': '''{scheme}://{host}:{port}/distfiles/bar.txt -> bar {scheme}://{host}:{port}/distfiles/foo.txt -> foo''', }, } loop = SchedulerInterface(global_event_loop()) scheme = 'http' host = '127.0.0.1' content = {} with AsyncHTTPServer(host, content, loop) as server: ebuilds_subst = {} for cpv, metadata in ebuilds.items(): metadata = metadata.copy() metadata['SRC_URI'] = metadata['SRC_URI'].format( scheme=scheme, host=host, port=server.server_port) ebuilds_subst[cpv] = metadata user_config_subst = user_config.copy() for configname, configdata in user_config.items(): configdata_sub = [] for line in configdata: configdata_sub.append( line.format(scheme=scheme, host=host, port=server.server_port)) user_config_subst[configname] = tuple(configdata_sub) playground = ResolverPlayground(ebuilds=ebuilds_subst, distfiles=distfiles, user_config=user_config_subst) ro_distdir = tempfile.mkdtemp() try: self._testEbuildFetch(loop, scheme, host, distfiles, ebuilds, content, server, playground, ro_distdir) finally: shutil.rmtree(ro_distdir) playground.cleanup()
def cleanup(self): for eroot in self.trees: portdb = self.trees[eroot]["porttree"].dbapi portdb.close_caches() if self.debug: print("\nEROOT=%s" % self.eroot) else: shutil.rmtree(self.eroot) if hasattr(self, '_orig_eprefix'): portage.const.EPREFIX = self._orig_eprefix
def get_commit_message_with_editor(editor, message=None, prefix=""): """ Execute editor with a temporary file as it's argument and return the file content afterwards. @param editor: An EDITOR value from the environment @type: string @param message: An iterable of lines to show in the editor. @type: iterable @param prefix: Suggested prefix for the commit message summary line. @type: string @rtype: string or None @return: A string on success or None if an error occurs. """ commitmessagedir = tempfile.mkdtemp(".repoman.msg") filename = os.path.join(commitmessagedir, "COMMIT_EDITMSG") try: with open(filename, "wb") as mymsg: mymsg.write( _unicode_encode( _(prefix + "\n\n# Please enter the commit message " "for your changes.\n# (Comment lines starting " "with '#' will not be included)\n"), encoding=_encodings['content'], errors='backslashreplace')) if message: mymsg.write(b"#\n") for line in message: mymsg.write( _unicode_encode("#" + line, encoding=_encodings['content'], errors='backslashreplace')) retval = os.system(editor + " '%s'" % filename) if not (os.WIFEXITED(retval) and os.WEXITSTATUS(retval) == os.EX_OK): return None try: with io.open(_unicode_encode(filename, encoding=_encodings['fs'], errors='strict'), mode='r', encoding=_encodings['content'], errors='replace') as f: mylines = f.readlines() except OSError as e: if e.errno != errno.ENOENT: raise del e return None return "".join(line for line in mylines if not line.startswith("#")) finally: try: shutil.rmtree(commitmessagedir) except OSError: pass
def cleanup(self, datadir): datadir_split = os.path.split(datadir) if len(datadir_split) >= 2 and len(datadir_split[1]) > 0: # This is potentially dangerous, # thus the above sanity check. try: shutil.rmtree(datadir) except OSError as oe: if oe.errno == errno.ENOENT: pass else: raise oe
def test_gpkg_manifest_duplicate_files(self): if sys.version_info.major < 3: self.skipTest("Not support Python 2") playground = ResolverPlayground( user_config={ "make.conf": ( 'FEATURES="${FEATURES} -binpkg-signing ' '-binpkg-request-signature -gpg-keepalive"', ), } ) tmpdir = tempfile.mkdtemp() try: settings = playground.settings orig_full_path = os.path.join(tmpdir, "orig/") os.makedirs(orig_full_path) data = urandom(100) with open(os.path.join(orig_full_path, "data"), "wb") as f: f.write(data) binpkg_1 = gpkg(settings, "test", os.path.join(tmpdir, "test-1.gpkg.tar")) binpkg_1.compress(orig_full_path, {}) with tarfile.open(os.path.join(tmpdir, "test-1.gpkg.tar"), "r") as tar_1: with tarfile.open( os.path.join(tmpdir, "test-2.gpkg.tar"), "w" ) as tar_2: for f in tar_1.getmembers(): if f.name == "Manifest": manifest = tar_1.extractfile(f).read() data = io.BytesIO(manifest) data.seek(io.SEEK_END) data.write(b"\n") data.write(manifest) f.size = data.tell() data.seek(0) tar_2.addfile(f, data) data.close() else: tar_2.addfile(f, tar_1.extractfile(f)) binpkg_2 = gpkg(settings, "test", os.path.join(tmpdir, "test-2.gpkg.tar")) self.assertRaises( DigestException, binpkg_2.decompress, os.path.join(tmpdir, "test") ) finally: shutil.rmtree(tmpdir) playground.cleanup()
def test_competition_with_same_process(self): """ Test that at attempt to lock the same file multiple times in the same process will behave as intended (bug 714480). """ tempdir = tempfile.mkdtemp() try: path = os.path.join(tempdir, 'lock_me') lock = portage.locks.lockfile(path) self.assertRaises(TryAgain, portage.locks.lockfile, path, flags=os.O_NONBLOCK) portage.locks.unlockfile(lock) finally: shutil.rmtree(tempdir)
def get_commit_message_with_editor(editor, message=None, prefix=""): """ Execute editor with a temporary file as it's argument and return the file content afterwards. @param editor: An EDITOR value from the environment @type: string @param message: An iterable of lines to show in the editor. @type: iterable @param prefix: Suggested prefix for the commit message summary line. @type: string @rtype: string or None @return: A string on success or None if an error occurs. """ commitmessagedir = tempfile.mkdtemp(".repoman.msg") filename = os.path.join(commitmessagedir, "COMMIT_EDITMSG") try: with open(filename, "wb") as mymsg: mymsg.write( _unicode_encode(_( prefix + "\n\n# Please enter the commit message " "for your changes.\n# (Comment lines starting " "with '#' will not be included)\n"), encoding=_encodings['content'], errors='backslashreplace')) if message: mymsg.write(b"#\n") for line in message: mymsg.write( _unicode_encode( "#" + line, encoding=_encodings['content'], errors='backslashreplace')) retval = os.system(editor + " '%s'" % filename) if not (os.WIFEXITED(retval) and os.WEXITSTATUS(retval) == os.EX_OK): return None try: with io.open(_unicode_encode( filename, encoding=_encodings['fs'], errors='strict'), mode='r', encoding=_encodings['content'], errors='replace') as f: mylines = f.readlines() except OSError as e: if e.errno != errno.ENOENT: raise del e return None return "".join(line for line in mylines if not line.startswith("#")) finally: try: shutil.rmtree(commitmessagedir) except OSError: pass
def test_gpkg_get_metadata_url(self): if sys.version_info.major < 3: self.skipTest("Not support Python 2") if sys.version_info.major == 3 and sys.version_info.minor <= 6: self.skipTest("http server not support change root dir") playground = ResolverPlayground( user_config={ "make.conf": ( 'BINPKG_COMPRESS="gzip"', 'FEATURES="${FEATURES} -binpkg-signing ' '-binpkg-request-signature"', ), }) tmpdir = tempfile.mkdtemp() try: settings = playground.settings for _ in range(0, 5): port = random.randint(30000, 60000) try: server = self.start_http_server(tmpdir, port) except OSError: continue break orig_full_path = os.path.join(tmpdir, "orig/") os.makedirs(orig_full_path) with open(os.path.join(orig_full_path, "test"), "wb") as test_file: test_file.write(urandom(1048576)) gpkg_file_loc = os.path.join(tmpdir, "test.gpkg.tar") test_gpkg = gpkg(settings, "test", gpkg_file_loc) meta = { "test1": b"{abcdefghijklmnopqrstuvwxyz, 1234567890}", "test2": urandom(102400), } test_gpkg.compress(os.path.join(tmpdir, "orig"), meta) meta_from_url = test_gpkg.get_metadata_url("http://127.0.0.1:" + str(port) + "/test.gpkg.tar") self.assertEqual(meta, meta_from_url) finally: shutil.rmtree(tmpdir) playground.cleanup()
def test_gpkg_incorrect_checksum(self): if sys.version_info.major < 3: self.skipTest("Not support Python 2") playground = ResolverPlayground( user_config={ "make.conf": ( 'FEATURES="${FEATURES} -binpkg-signing ' '-binpkg-request-signature -gpg-keepalive"', ), } ) tmpdir = tempfile.mkdtemp() try: settings = playground.settings orig_full_path = os.path.join(tmpdir, "orig/") os.makedirs(orig_full_path) data = urandom(1048576) with open(os.path.join(orig_full_path, "data"), "wb") as f: f.write(data) binpkg_1 = gpkg(settings, "test", os.path.join(tmpdir, "test-1.gpkg.tar")) binpkg_1.compress(orig_full_path, {}) with tarfile.open(os.path.join(tmpdir, "test-1.gpkg.tar"), "r") as tar_1: with tarfile.open( os.path.join(tmpdir, "test-2.gpkg.tar"), "w" ) as tar_2: for f in tar_1.getmembers(): if f.name == "Manifest": data = io.BytesIO(tar_1.extractfile(f).read()) data_view = data.getbuffer() data_view[-16:] = b"20a6d80ab0320fh9" del data_view tar_2.addfile(f, data) data.close() else: tar_2.addfile(f, tar_1.extractfile(f)) binpkg_2 = gpkg(settings, "test", os.path.join(tmpdir, "test-2.gpkg.tar")) self.assertRaises( DigestException, binpkg_2.decompress, os.path.join(tmpdir, "test") ) finally: shutil.rmtree(tmpdir) playground.cleanup()
def testFakedbapi(self): packages = ( ("sys-apps/portage-2.1.10", { "EAPI" : "2", "IUSE" : "ipc doc", "repository" : "gentoo", "SLOT" : "0", "USE" : "ipc missing-iuse", }), ("virtual/package-manager-0", { "EAPI" : "0", "repository" : "gentoo", "SLOT" : "0", }), ) match_tests = ( ("sys-apps/portage:0[ipc]", ["sys-apps/portage-2.1.10"]), ("sys-apps/portage:0[-ipc]", []), ("sys-apps/portage:0[doc]", []), ("sys-apps/portage:0[-doc]", ["sys-apps/portage-2.1.10"]), ("sys-apps/portage:0", ["sys-apps/portage-2.1.10"]), ("sys-apps/portage:0[missing-iuse]", []), ("sys-apps/portage:0[-missing-iuse]", []), ("sys-apps/portage:0::gentoo[ipc]", ["sys-apps/portage-2.1.10"]), ("sys-apps/portage:0::multilib[ipc]", []), ("virtual/package-manager", ["virtual/package-manager-0"]), ) tempdir = tempfile.mkdtemp() try: portdir = os.path.join(tempdir, "usr/portage") os.makedirs(portdir) env = { "PORTDIR": portdir, } fakedb = fakedbapi(settings=config(config_profile_path="", env=env, eprefix=tempdir)) for cpv, metadata in packages: fakedb.cpv_inject(cpv, metadata=metadata) for atom, expected_result in match_tests: result = fakedb.match(atom) self.assertEqual(fakedb.match(atom), expected_result, "fakedb.match('%s') = %s != %s" % (atom, result, expected_result)) finally: shutil.rmtree(tempdir)
def diff_mixed(func, file1, file2): tempdir = None try: if os.path.islink(file1) and \ not os.path.islink(file2) and \ os.path.isfile(file1) and \ os.path.isfile(file2): # If a regular file replaces a symlink to a regular # file, then show the diff between the regular files # (bug #330221). diff_files = (file2, file2) else: files = [file1, file2] diff_files = [file1, file2] for i in range(len(diff_files)): try: st = os.lstat(diff_files[i]) except OSError: st = None if st is not None and stat.S_ISREG(st.st_mode): continue if tempdir is None: tempdir = tempfile.mkdtemp() diff_files[i] = os.path.join(tempdir, "%d" % i) if st is None: content = "/dev/null\n" elif stat.S_ISLNK(st.st_mode): link_dest = os.readlink(files[i]) content = "SYM: %s -> %s\n" % \ (file1, link_dest) elif stat.S_ISDIR(st.st_mode): content = "DIR: %s\n" % (file1, ) elif stat.S_ISFIFO(st.st_mode): content = "FIF: %s\n" % (file1, ) else: content = "DEV: %s\n" % (file1, ) with io.open(diff_files[i], mode='w', encoding=_encodings['stdio']) as f: f.write(content) return func(diff_files[0], diff_files[1]) finally: if tempdir is not None: shutil.rmtree(tempdir)
def test_gpkg_missing_signature(self): if sys.version_info.major < 3: self.skipTest("Not support Python 2") playground = ResolverPlayground( user_config={ "make.conf": ( 'FEATURES="${FEATURES} binpkg-signing ' 'binpkg-request-signature"', 'BINPKG_FORMAT="gpkg"', ), }) tmpdir = tempfile.mkdtemp() try: settings = playground.settings gpg = GPG(settings) gpg.unlock() orig_full_path = os.path.join(tmpdir, "orig/") os.makedirs(orig_full_path) data = urandom(1048576) with open(os.path.join(orig_full_path, "data"), "wb") as f: f.write(data) binpkg_1 = gpkg(settings, "test", os.path.join(tmpdir, "test-1.gpkg.tar")) binpkg_1.compress(orig_full_path, {}) with tarfile.open(os.path.join(tmpdir, "test-1.gpkg.tar"), "r") as tar_1: with tarfile.open(os.path.join(tmpdir, "test-2.gpkg.tar"), "w") as tar_2: for f in tar_1.getmembers(): if f.name.endswith(".sig"): pass else: tar_2.addfile(f, tar_1.extractfile(f)) binpkg_2 = gpkg(settings, "test", os.path.join(tmpdir, "test-2.gpkg.tar")) self.assertRaises(MissingSignature, binpkg_2.decompress, os.path.join(tmpdir, "test")) finally: shutil.rmtree(tmpdir) playground.cleanup()
def diff_mixed(func, file1, file2): tempdir = None try: if os.path.islink(file1) and \ not os.path.islink(file2) and \ os.path.isfile(file1) and \ os.path.isfile(file2): # If a regular file replaces a symlink to a regular # file, then show the diff between the regular files # (bug #330221). diff_files = (file2, file2) else: files = [file1, file2] diff_files = [file1, file2] for i in range(len(diff_files)): try: st = os.lstat(diff_files[i]) except OSError: st = None if st is not None and stat.S_ISREG(st.st_mode): continue if tempdir is None: tempdir = tempfile.mkdtemp() diff_files[i] = os.path.join(tempdir, "%d" % i) if st is None: content = "/dev/null\n" elif stat.S_ISLNK(st.st_mode): link_dest = os.readlink(files[i]) content = "SYM: %s -> %s\n" % \ (file1, link_dest) elif stat.S_ISDIR(st.st_mode): content = "DIR: %s\n" % (file1,) elif stat.S_ISFIFO(st.st_mode): content = "FIF: %s\n" % (file1,) else: content = "DEV: %s\n" % (file1,) with io.open(diff_files[i], mode='w', encoding=_encodings['stdio']) as f: f.write(content) return func(diff_files[0], diff_files[1]) finally: if tempdir is not None: shutil.rmtree(tempdir)
def test_gpkg_extra_files(self): if sys.version_info.major < 3: self.skipTest("Not support Python 2") playground = ResolverPlayground( user_config={ "make.conf": ( 'FEATURES="${FEATURES} -binpkg-signing ' '-binpkg-request-signature -gpg-keepalive"', ), } ) tmpdir = tempfile.mkdtemp() try: settings = playground.settings orig_full_path = os.path.join(tmpdir, "orig/") os.makedirs(orig_full_path) data = urandom(1048576) with open(os.path.join(orig_full_path, "data"), "wb") as f: f.write(data) binpkg_1 = gpkg(settings, "test", os.path.join(tmpdir, "test-1.gpkg.tar")) binpkg_1.compress(orig_full_path, {}) with tarfile.open(os.path.join(tmpdir, "test-1.gpkg.tar"), "r") as tar_1: with tarfile.open( os.path.join(tmpdir, "test-2.gpkg.tar"), "w" ) as tar_2: for f in tar_1.getmembers(): tar_2.addfile(f, tar_1.extractfile(f)) data_tarinfo = tarfile.TarInfo("data2") data_tarinfo.size = len(data) data2 = io.BytesIO(data) tar_2.addfile(data_tarinfo, data2) data2.close() binpkg_2 = gpkg(settings, "test", os.path.join(tmpdir, "test-2.gpkg.tar")) self.assertRaises( DigestException, binpkg_2.decompress, os.path.join(tmpdir, "test") ) finally: shutil.rmtree(tmpdir) playground.cleanup()
def testFakedbapi(self): packages = ( ( "sys-apps/portage-2.1.10", {"EAPI": "2", "IUSE": "ipc doc", "repository": "gentoo", "SLOT": "0", "USE": "ipc missing-iuse"}, ), ("virtual/package-manager-0", {"EAPI": "0", "repository": "gentoo", "SLOT": "0"}), ) match_tests = ( ("sys-apps/portage:0[ipc]", ["sys-apps/portage-2.1.10"]), ("sys-apps/portage:0[-ipc]", []), ("sys-apps/portage:0[doc]", []), ("sys-apps/portage:0[-doc]", ["sys-apps/portage-2.1.10"]), ("sys-apps/portage:0", ["sys-apps/portage-2.1.10"]), ("sys-apps/portage:0[missing-iuse]", []), ("sys-apps/portage:0[-missing-iuse]", []), ("sys-apps/portage:0::gentoo[ipc]", ["sys-apps/portage-2.1.10"]), ("sys-apps/portage:0::multilib[ipc]", []), ("virtual/package-manager", ["virtual/package-manager-0"]), ) tempdir = tempfile.mkdtemp() try: test_repo = os.path.join(tempdir, "var", "repositories", "test_repo") os.makedirs(os.path.join(test_repo, "profiles")) with open(os.path.join(test_repo, "profiles", "repo_name"), "w") as f: f.write("test_repo") env = {"PORTAGE_REPOSITORIES": "[DEFAULT]\nmain-repo = test_repo\n[test_repo]\nlocation = %s" % test_repo} fakedb = fakedbapi(settings=config(config_profile_path="", env=env, eprefix=tempdir)) for cpv, metadata in packages: fakedb.cpv_inject(cpv, metadata=metadata) for atom, expected_result in match_tests: result = fakedb.match(atom) self.assertEqual( fakedb.match(atom), expected_result, "fakedb.match('%s') = %s != %s" % (atom, result, expected_result), ) finally: shutil.rmtree(tempdir)
def _set_returncode(self, wait_retval): SpawnProcess._set_returncode(self, wait_retval) if self.cgroup is not None: try: shutil.rmtree(self.cgroup) except EnvironmentError as e: if e.errno != errno.ENOENT: raise if self._exit_timeout_id is not None: self.scheduler.source_remove(self._exit_timeout_id) self._exit_timeout_id = None if self._ipc_daemon is not None: self._ipc_daemon.cancel() if self._exit_command.exitcode is not None: self.returncode = self._exit_command.exitcode else: if self.returncode < 0: if not self.cancelled: self._killed_by_signal(-self.returncode) else: self.returncode = 1 if not self.cancelled: self._unexpected_exit() if self._build_dir is not None: self._build_dir.unlock() self._build_dir = None elif not self.cancelled: exit_file = self.settings.get('PORTAGE_EBUILD_EXIT_FILE') if exit_file and not os.path.exists(exit_file): if self.returncode < 0: if not self.cancelled: self._killed_by_signal(-self.returncode) else: self.returncode = 1 if not self.cancelled: self._unexpected_exit()
def testEbuildFetch(self): distfiles = { 'bar': b'bar\n', 'foo': b'foo\n', } ebuilds = { 'dev-libs/A-1': { 'EAPI': '7', 'RESTRICT': 'primaryuri', 'SRC_URI': '''{scheme}://{host}:{port}/distfiles/bar.txt -> bar {scheme}://{host}:{port}/distfiles/foo.txt -> foo''', }, } loop = SchedulerInterface(global_event_loop()) scheme = 'http' host = '127.0.0.1' content = {} for k, v in distfiles.items(): content['/distfiles/{}.txt'.format(k)] = v with AsyncHTTPServer(host, content, loop) as server: ebuilds_subst = {} for cpv, metadata in ebuilds.items(): metadata = metadata.copy() metadata['SRC_URI'] = metadata['SRC_URI'].format( scheme=scheme, host=host, port=server.server_port) ebuilds_subst[cpv] = metadata playground = ResolverPlayground(ebuilds=ebuilds_subst, distfiles=distfiles) ro_distdir = tempfile.mkdtemp() try: fetchcommand = portage.util.shlex_split(playground.settings['FETCHCOMMAND']) fetch_bin = portage.process.find_binary(fetchcommand[0]) if fetch_bin is None: self.skipTest('FETCHCOMMAND not found: {}'.format(playground.settings['FETCHCOMMAND'])) resumecommand = portage.util.shlex_split(playground.settings['RESUMECOMMAND']) resume_bin = portage.process.find_binary(resumecommand[0]) if resume_bin is None: self.skipTest('RESUMECOMMAND not found: {}'.format(playground.settings['RESUMECOMMAND'])) root_config = playground.trees[playground.eroot]['root_config'] portdb = root_config.trees["porttree"].dbapi settings = config(clone=playground.settings) # Tests only work with one ebuild at a time, so the config # pool only needs a single config instance. class config_pool: @staticmethod def allocate(): return settings @staticmethod def deallocate(settings): pass def async_fetch(pkg, ebuild_path): fetcher = EbuildFetcher(config_pool=config_pool, ebuild_path=ebuild_path, fetchonly=False, fetchall=True, pkg=pkg, scheduler=loop) fetcher.start() return fetcher.async_wait() for cpv in ebuilds: metadata = dict(zip(Package.metadata_keys, portdb.aux_get(cpv, Package.metadata_keys))) pkg = Package(built=False, cpv=cpv, installed=False, metadata=metadata, root_config=root_config, type_name='ebuild') settings.setcpv(pkg) ebuild_path = portdb.findname(pkg.cpv) portage.doebuild_environment(ebuild_path, 'fetch', settings=settings, db=portdb) # Test good files in DISTDIR for k in settings['AA'].split(): os.stat(os.path.join(settings['DISTDIR'], k)) self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0) for k in settings['AA'].split(): with open(os.path.join(settings['DISTDIR'], k), 'rb') as f: self.assertEqual(f.read(), distfiles[k]) # Test digestgen with fetch os.unlink(os.path.join(os.path.dirname(ebuild_path), 'Manifest')) for k in settings['AA'].split(): os.unlink(os.path.join(settings['DISTDIR'], k)) with ForkExecutor(loop=loop) as executor: self.assertTrue(bool(loop.run_until_complete( loop.run_in_executor(executor, functools.partial( digestgen, mysettings=settings, myportdb=portdb))))) for k in settings['AA'].split(): with open(os.path.join(settings['DISTDIR'], k), 'rb') as f: self.assertEqual(f.read(), distfiles[k]) # Test missing files in DISTDIR for k in settings['AA'].split(): os.unlink(os.path.join(settings['DISTDIR'], k)) self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0) for k in settings['AA'].split(): with open(os.path.join(settings['DISTDIR'], k), 'rb') as f: self.assertEqual(f.read(), distfiles[k]) # Test empty files in DISTDIR for k in settings['AA'].split(): file_path = os.path.join(settings['DISTDIR'], k) with open(file_path, 'wb') as f: pass self.assertEqual(os.stat(file_path).st_size, 0) self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0) for k in settings['AA'].split(): with open(os.path.join(settings['DISTDIR'], k), 'rb') as f: self.assertEqual(f.read(), distfiles[k]) # Test non-empty files containing null bytes in DISTDIR for k in settings['AA'].split(): file_path = os.path.join(settings['DISTDIR'], k) with open(file_path, 'wb') as f: f.write(len(distfiles[k]) * b'\0') self.assertEqual(os.stat(file_path).st_size, len(distfiles[k])) self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0) for k in settings['AA'].split(): with open(os.path.join(settings['DISTDIR'], k), 'rb') as f: self.assertEqual(f.read(), distfiles[k]) # Test PORTAGE_RO_DISTDIRS settings['PORTAGE_RO_DISTDIRS'] = '"{}"'.format(ro_distdir) orig_fetchcommand = settings['FETCHCOMMAND'] orig_resumecommand = settings['RESUMECOMMAND'] try: settings['FETCHCOMMAND'] = settings['RESUMECOMMAND'] = '' for k in settings['AA'].split(): file_path = os.path.join(settings['DISTDIR'], k) os.rename(file_path, os.path.join(ro_distdir, k)) self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0) for k in settings['AA'].split(): file_path = os.path.join(settings['DISTDIR'], k) self.assertTrue(os.path.islink(file_path)) with open(file_path, 'rb') as f: self.assertEqual(f.read(), distfiles[k]) os.unlink(file_path) finally: settings.pop('PORTAGE_RO_DISTDIRS') settings['FETCHCOMMAND'] = orig_fetchcommand settings['RESUMECOMMAND'] = orig_resumecommand # Test local filesystem in GENTOO_MIRRORS orig_mirrors = settings['GENTOO_MIRRORS'] orig_fetchcommand = settings['FETCHCOMMAND'] try: settings['GENTOO_MIRRORS'] = ro_distdir settings['FETCHCOMMAND'] = settings['RESUMECOMMAND'] = '' self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0) for k in settings['AA'].split(): with open(os.path.join(settings['DISTDIR'], k), 'rb') as f: self.assertEqual(f.read(), distfiles[k]) finally: settings['GENTOO_MIRRORS'] = orig_mirrors settings['FETCHCOMMAND'] = orig_fetchcommand settings['RESUMECOMMAND'] = orig_resumecommand # Test readonly DISTDIR orig_distdir_mode = os.stat(settings['DISTDIR']).st_mode try: os.chmod(settings['DISTDIR'], 0o555) self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0) for k in settings['AA'].split(): with open(os.path.join(settings['DISTDIR'], k), 'rb') as f: self.assertEqual(f.read(), distfiles[k]) finally: os.chmod(settings['DISTDIR'], orig_distdir_mode) # Test parallel-fetch mode settings['PORTAGE_PARALLEL_FETCHONLY'] = '1' try: self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0) for k in settings['AA'].split(): with open(os.path.join(settings['DISTDIR'], k), 'rb') as f: self.assertEqual(f.read(), distfiles[k]) for k in settings['AA'].split(): os.unlink(os.path.join(settings['DISTDIR'], k)) self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0) for k in settings['AA'].split(): with open(os.path.join(settings['DISTDIR'], k), 'rb') as f: self.assertEqual(f.read(), distfiles[k]) finally: settings.pop('PORTAGE_PARALLEL_FETCHONLY') # Test RESUMECOMMAND orig_resume_min_size = settings['PORTAGE_FETCH_RESUME_MIN_SIZE'] try: settings['PORTAGE_FETCH_RESUME_MIN_SIZE'] = '2' for k in settings['AA'].split(): file_path = os.path.join(settings['DISTDIR'], k) os.unlink(file_path) with open(file_path + _download_suffix, 'wb') as f: f.write(distfiles[k][:2]) self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0) for k in settings['AA'].split(): with open(os.path.join(settings['DISTDIR'], k), 'rb') as f: self.assertEqual(f.read(), distfiles[k]) finally: settings['PORTAGE_FETCH_RESUME_MIN_SIZE'] = orig_resume_min_size finally: shutil.rmtree(ro_distdir) playground.cleanup()
def testIpcDaemon(self): tmpdir = tempfile.mkdtemp() build_dir = None try: env = {} # Pass along PORTAGE_USERNAME and PORTAGE_GRPNAME since they # need to be inherited by ebuild subprocesses. if 'PORTAGE_USERNAME' in os.environ: env['PORTAGE_USERNAME'] = os.environ['PORTAGE_USERNAME'] if 'PORTAGE_GRPNAME' in os.environ: env['PORTAGE_GRPNAME'] = os.environ['PORTAGE_GRPNAME'] env['PORTAGE_PYTHON'] = _python_interpreter env['PORTAGE_BIN_PATH'] = PORTAGE_BIN_PATH env['PORTAGE_PYM_PATH'] = PORTAGE_PYM_PATH env['PORTAGE_BUILDDIR'] = os.path.join(tmpdir, 'cat', 'pkg-1') if "__PORTAGE_TEST_HARDLINK_LOCKS" in os.environ: env["__PORTAGE_TEST_HARDLINK_LOCKS"] = \ os.environ["__PORTAGE_TEST_HARDLINK_LOCKS"] task_scheduler = TaskScheduler(max_jobs=2) build_dir = EbuildBuildDir( scheduler=task_scheduler.sched_iface, settings=env) build_dir.lock() ensure_dirs(env['PORTAGE_BUILDDIR']) input_fifo = os.path.join(env['PORTAGE_BUILDDIR'], '.ipc_in') output_fifo = os.path.join(env['PORTAGE_BUILDDIR'], '.ipc_out') os.mkfifo(input_fifo) os.mkfifo(output_fifo) for exitcode in (0, 1, 2): exit_command = ExitCommand() commands = {'exit' : exit_command} daemon = EbuildIpcDaemon(commands=commands, input_fifo=input_fifo, output_fifo=output_fifo, scheduler=task_scheduler.sched_iface) proc = SpawnProcess( args=[BASH_BINARY, "-c", '"$PORTAGE_BIN_PATH"/ebuild-ipc exit %d' % exitcode], env=env, scheduler=task_scheduler.sched_iface) self.received_command = False def exit_command_callback(): self.received_command = True task_scheduler.clear() task_scheduler.wait() exit_command.reply_hook = exit_command_callback start_time = time.time() task_scheduler.add(daemon) task_scheduler.add(proc) task_scheduler.run(timeout=self._SCHEDULE_TIMEOUT) task_scheduler.clear() task_scheduler.wait() hardlock_cleanup(env['PORTAGE_BUILDDIR'], remove_all_locks=True) self.assertEqual(self.received_command, True, "command not received after %d seconds" % \ (time.time() - start_time,)) self.assertEqual(proc.isAlive(), False) self.assertEqual(daemon.isAlive(), False) self.assertEqual(exit_command.exitcode, exitcode) # Intentionally short timeout test for QueueScheduler.run() sleep_time_s = 10 # 10.000 seconds short_timeout_ms = 10 # 0.010 seconds for i in range(3): exit_command = ExitCommand() commands = {'exit' : exit_command} daemon = EbuildIpcDaemon(commands=commands, input_fifo=input_fifo, output_fifo=output_fifo, scheduler=task_scheduler.sched_iface) proc = SpawnProcess( args=[BASH_BINARY, "-c", 'exec sleep %d' % sleep_time_s], env=env, scheduler=task_scheduler.sched_iface) self.received_command = False def exit_command_callback(): self.received_command = True task_scheduler.clear() task_scheduler.wait() exit_command.reply_hook = exit_command_callback start_time = time.time() task_scheduler.add(daemon) task_scheduler.add(proc) task_scheduler.run(timeout=short_timeout_ms) task_scheduler.clear() task_scheduler.wait() hardlock_cleanup(env['PORTAGE_BUILDDIR'], remove_all_locks=True) self.assertEqual(self.received_command, False, "command received after %d seconds" % \ (time.time() - start_time,)) self.assertEqual(proc.isAlive(), False) self.assertEqual(daemon.isAlive(), False) self.assertEqual(proc.returncode == os.EX_OK, False) finally: if build_dir is not None: build_dir.unlock() shutil.rmtree(tmpdir)
def tearDown(self): super(RepomanEchangelogTestCase, self).tearDown() shutil.rmtree(self.tmpdir)
def _nofetch_exit(self, ebuild_phase): self._final_exit(ebuild_phase) elog_process(self.settings.mycpv, self.settings) shutil.rmtree(self._private_tmpdir) self._async_wait()
def testIpcDaemon(self): event_loop = global_event_loop() tmpdir = tempfile.mkdtemp() build_dir = None try: env = {} # Pass along PORTAGE_USERNAME and PORTAGE_GRPNAME since they # need to be inherited by ebuild subprocesses. if 'PORTAGE_USERNAME' in os.environ: env['PORTAGE_USERNAME'] = os.environ['PORTAGE_USERNAME'] if 'PORTAGE_GRPNAME' in os.environ: env['PORTAGE_GRPNAME'] = os.environ['PORTAGE_GRPNAME'] env['PORTAGE_PYTHON'] = _python_interpreter env['PORTAGE_BIN_PATH'] = PORTAGE_BIN_PATH env['PORTAGE_PYM_PATH'] = PORTAGE_PYM_PATH env['PORTAGE_BUILDDIR'] = os.path.join(tmpdir, 'cat', 'pkg-1') if "__PORTAGE_TEST_HARDLINK_LOCKS" in os.environ: env["__PORTAGE_TEST_HARDLINK_LOCKS"] = \ os.environ["__PORTAGE_TEST_HARDLINK_LOCKS"] build_dir = EbuildBuildDir( scheduler=event_loop, settings=env) build_dir.lock() ensure_dirs(env['PORTAGE_BUILDDIR']) input_fifo = os.path.join(env['PORTAGE_BUILDDIR'], '.ipc_in') output_fifo = os.path.join(env['PORTAGE_BUILDDIR'], '.ipc_out') os.mkfifo(input_fifo) os.mkfifo(output_fifo) for exitcode in (0, 1, 2): exit_command = ExitCommand() commands = {'exit' : exit_command} daemon = EbuildIpcDaemon(commands=commands, input_fifo=input_fifo, output_fifo=output_fifo) proc = SpawnProcess( args=[BASH_BINARY, "-c", '"$PORTAGE_BIN_PATH"/ebuild-ipc exit %d' % exitcode], env=env) task_scheduler = TaskScheduler(iter([daemon, proc]), max_jobs=2, event_loop=event_loop) self.received_command = False def exit_command_callback(): self.received_command = True task_scheduler.cancel() exit_command.reply_hook = exit_command_callback start_time = time.time() self._run(event_loop, task_scheduler, self._SCHEDULE_TIMEOUT) hardlock_cleanup(env['PORTAGE_BUILDDIR'], remove_all_locks=True) self.assertEqual(self.received_command, True, "command not received after %d seconds" % \ (time.time() - start_time,)) self.assertEqual(proc.isAlive(), False) self.assertEqual(daemon.isAlive(), False) self.assertEqual(exit_command.exitcode, exitcode) # Intentionally short timeout test for EventLoop/AsyncScheduler. # Use a ridiculously long sleep_time_s in case the user's # system is heavily loaded (see bug #436334). sleep_time_s = 600 #600.000 seconds short_timeout_ms = 10 # 0.010 seconds for i in range(3): exit_command = ExitCommand() commands = {'exit' : exit_command} daemon = EbuildIpcDaemon(commands=commands, input_fifo=input_fifo, output_fifo=output_fifo) proc = SleepProcess(seconds=sleep_time_s) task_scheduler = TaskScheduler(iter([daemon, proc]), max_jobs=2, event_loop=event_loop) self.received_command = False def exit_command_callback(): self.received_command = True task_scheduler.cancel() exit_command.reply_hook = exit_command_callback start_time = time.time() self._run(event_loop, task_scheduler, short_timeout_ms) hardlock_cleanup(env['PORTAGE_BUILDDIR'], remove_all_locks=True) self.assertEqual(self.received_command, False, "command received after %d seconds" % \ (time.time() - start_time,)) self.assertEqual(proc.isAlive(), False) self.assertEqual(daemon.isAlive(), False) self.assertEqual(proc.returncode == os.EX_OK, False) finally: if build_dir is not None: build_dir.unlock() shutil.rmtree(tmpdir)
def testSyncLocal(self): debug = False skip_reason = self._must_skip() if skip_reason: self.portage_skip = skip_reason self.assertFalse(True, skip_reason) return repos_conf = textwrap.dedent(""" [DEFAULT] %(default_keys)s [test_repo] location = %(EPREFIX)s/var/repositories/test_repo sync-type = %(sync-type)s sync-uri = file://%(EPREFIX)s/var/repositories/test_repo_sync auto-sync = %(auto-sync)s %(repo_extra_keys)s """) profile = { "eapi": ("5",), "package.use.stable.mask": ("dev-libs/A flag",) } ebuilds = { "dev-libs/A-0": {} } user_config = { 'make.conf': ('FEATURES="metadata-transfer"',) } playground = ResolverPlayground(ebuilds=ebuilds, profile=profile, user_config=user_config, debug=debug) settings = playground.settings eprefix = settings["EPREFIX"] eroot = settings["EROOT"] homedir = os.path.join(eroot, "home") distdir = os.path.join(eprefix, "distdir") repo = settings.repositories["test_repo"] metadata_dir = os.path.join(repo.location, "metadata") cmds = {} for cmd in ("emerge", "emaint"): path = os.path.join(self.bindir, cmd) assert os.path.exists(path) cmds[cmd] = (portage._python_interpreter, "-b", "-Wd", path) git_binary = find_binary("git") git_cmd = (git_binary,) committer_name = "Gentoo Dev" committer_email = "*****@*****.**" def repos_set_conf(sync_type, dflt_keys=None, xtra_keys=None, auto_sync="yes"): env["PORTAGE_REPOSITORIES"] = repos_conf % {\ "EPREFIX": eprefix, "sync-type": sync_type, "auto-sync": auto_sync, "default_keys": "" if dflt_keys is None else dflt_keys, "repo_extra_keys": "" if xtra_keys is None else xtra_keys} def alter_ebuild(): with open(os.path.join(repo.location + "_sync", "dev-libs", "A", "A-0.ebuild"), "a") as f: f.write("\n") os.unlink(os.path.join(metadata_dir, 'timestamp.chk')) sync_cmds = ( (homedir, cmds["emerge"] + ("--sync",)), (homedir, lambda: self.assertTrue(os.path.exists( os.path.join(repo.location, "dev-libs", "A") ), "dev-libs/A expected, but missing")), (homedir, cmds["emaint"] + ("sync", "-A")), ) sync_cmds_auto_sync = ( (homedir, lambda: repos_set_conf("rsync", auto_sync="no")), (homedir, cmds["emerge"] + ("--sync",)), (homedir, lambda: self.assertFalse(os.path.exists( os.path.join(repo.location, "dev-libs", "A") ), "dev-libs/A found, expected missing")), (homedir, lambda: repos_set_conf("rsync", auto_sync="yes")), ) rename_repo = ( (homedir, lambda: os.rename(repo.location, repo.location + "_sync")), ) rsync_opts_repos = ( (homedir, alter_ebuild), (homedir, lambda: repos_set_conf("rsync", None, "sync-rsync-extra-opts = --backup --backup-dir=%s" % _shell_quote(repo.location + "_back"))), (homedir, cmds['emerge'] + ("--sync",)), (homedir, lambda: self.assertTrue(os.path.exists( repo.location + "_back"))), (homedir, lambda: shutil.rmtree(repo.location + "_back")), (homedir, lambda: repos_set_conf("rsync")), ) rsync_opts_repos_default = ( (homedir, alter_ebuild), (homedir, lambda: repos_set_conf("rsync", "sync-rsync-extra-opts = --backup --backup-dir=%s" % _shell_quote(repo.location+"_back"))), (homedir, cmds['emerge'] + ("--sync",)), (homedir, lambda: self.assertTrue(os.path.exists(repo.location + "_back"))), (homedir, lambda: shutil.rmtree(repo.location + "_back")), (homedir, lambda: repos_set_conf("rsync")), ) rsync_opts_repos_default_ovr = ( (homedir, alter_ebuild), (homedir, lambda: repos_set_conf("rsync", "sync-rsync-extra-opts = --backup --backup-dir=%s" % _shell_quote(repo.location + "_back_nowhere"), "sync-rsync-extra-opts = --backup --backup-dir=%s" % _shell_quote(repo.location + "_back"))), (homedir, cmds['emerge'] + ("--sync",)), (homedir, lambda: self.assertTrue(os.path.exists(repo.location + "_back"))), (homedir, lambda: shutil.rmtree(repo.location + "_back")), (homedir, lambda: repos_set_conf("rsync")), ) rsync_opts_repos_default_cancel = ( (homedir, alter_ebuild), (homedir, lambda: repos_set_conf("rsync", "sync-rsync-extra-opts = --backup --backup-dir=%s" % _shell_quote(repo.location + "_back_nowhere"), "sync-rsync-extra-opts = ")), (homedir, cmds['emerge'] + ("--sync",)), (homedir, lambda: self.assertFalse(os.path.exists(repo.location + "_back"))), (homedir, lambda: repos_set_conf("rsync")), ) delete_sync_repo = ( (homedir, lambda: shutil.rmtree( repo.location + "_sync")), ) git_repo_create = ( (repo.location, git_cmd + ("config", "--global", "user.name", committer_name,)), (repo.location, git_cmd + ("config", "--global", "user.email", committer_email,)), (repo.location, git_cmd + ("init-db",)), (repo.location, git_cmd + ("add", ".")), (repo.location, git_cmd + ("commit", "-a", "-m", "add whole repo")), ) sync_type_git = ( (homedir, lambda: repos_set_conf("git")), ) pythonpath = os.environ.get("PYTHONPATH") if pythonpath is not None and not pythonpath.strip(): pythonpath = None if pythonpath is not None and \ pythonpath.split(":")[0] == PORTAGE_PYM_PATH: pass else: if pythonpath is None: pythonpath = "" else: pythonpath = ":" + pythonpath pythonpath = PORTAGE_PYM_PATH + pythonpath env = { "PORTAGE_OVERRIDE_EPREFIX" : eprefix, "DISTDIR" : distdir, "GENTOO_COMMITTER_NAME" : committer_name, "GENTOO_COMMITTER_EMAIL" : committer_email, "HOME" : homedir, "PATH" : os.environ["PATH"], "PORTAGE_GRPNAME" : os.environ["PORTAGE_GRPNAME"], "PORTAGE_USERNAME" : os.environ["PORTAGE_USERNAME"], "PYTHONDONTWRITEBYTECODE" : os.environ.get("PYTHONDONTWRITEBYTECODE", ""), "PYTHONPATH" : pythonpath, } repos_set_conf("rsync") if os.environ.get("SANDBOX_ON") == "1": # avoid problems from nested sandbox instances env["FEATURES"] = "-sandbox -usersandbox" dirs = [homedir, metadata_dir] try: for d in dirs: ensure_dirs(d) timestamp_path = os.path.join(metadata_dir, 'timestamp.chk') with open(timestamp_path, 'w') as f: f.write(time.strftime('%s\n' % TIMESTAMP_FORMAT, time.gmtime())) if debug: # The subprocess inherits both stdout and stderr, for # debugging purposes. stdout = None else: # The subprocess inherits stderr so that any warnings # triggered by python -Wd will be visible. stdout = subprocess.PIPE for cwd, cmd in rename_repo + sync_cmds_auto_sync + sync_cmds + \ rsync_opts_repos + rsync_opts_repos_default + \ rsync_opts_repos_default_ovr + rsync_opts_repos_default_cancel + \ delete_sync_repo + git_repo_create + sync_type_git + \ rename_repo + sync_cmds: if hasattr(cmd, '__call__'): cmd() continue abs_cwd = os.path.join(repo.location, cwd) proc = subprocess.Popen(cmd, cwd=abs_cwd, env=env, stdout=stdout) if debug: proc.wait() else: output = proc.stdout.readlines() proc.wait() proc.stdout.close() if proc.returncode != os.EX_OK: for line in output: sys.stderr.write(_unicode_decode(line)) self.assertEqual(os.EX_OK, proc.returncode, "%s failed in %s" % (cmd, cwd,)) finally: playground.cleanup()
def prepare_build_dirs(myroot=None, settings=None, cleanup=False): """ The myroot parameter is ignored. """ myroot = None if settings is None: raise TypeError("settings argument is required") mysettings = settings clean_dirs = [mysettings["HOME"]] # We enable cleanup when we want to make sure old cruft (such as the old # environment) doesn't interfere with the current phase. if cleanup and 'keeptemp' not in mysettings.features: clean_dirs.append(mysettings["T"]) for clean_dir in clean_dirs: try: shutil.rmtree(clean_dir) except OSError as oe: if errno.ENOENT == oe.errno: pass elif errno.EPERM == oe.errno: writemsg("%s\n" % oe, noiselevel=-1) writemsg(_("Operation Not Permitted: rmtree('%s')\n") % \ clean_dir, noiselevel=-1) return 1 else: # Wrap with PermissionDenied if appropriate, so that callers # display a short error message without a traceback. _raise_exc(oe) def makedirs(dir_path): try: os.makedirs(dir_path) except OSError as oe: if errno.EEXIST == oe.errno: pass elif errno.EPERM == oe.errno: writemsg("%s\n" % oe, noiselevel=-1) writemsg(_("Operation Not Permitted: makedirs('%s')\n") % \ dir_path, noiselevel=-1) return False else: raise return True mysettings["PKG_LOGDIR"] = os.path.join(mysettings["T"], "logging") mydirs = [os.path.dirname(mysettings["PORTAGE_BUILDDIR"])] mydirs.append(os.path.dirname(mydirs[-1])) try: for mydir in mydirs: ensure_dirs(mydir) try: apply_secpass_permissions(mydir, gid=portage_gid, uid=portage_uid, mode=0o700, mask=0) except PortageException: if not os.path.isdir(mydir): raise for dir_key in ("PORTAGE_BUILDDIR", "HOME", "PKG_LOGDIR", "T"): ensure_dirs(mysettings[dir_key], mode=0o755) apply_secpass_permissions(mysettings[dir_key], uid=portage_uid, gid=portage_gid) except PermissionDenied as e: writemsg(_("Permission Denied: %s\n") % str(e), noiselevel=-1) return 1 except OperationNotPermitted as e: writemsg(_("Operation Not Permitted: %s\n") % str(e), noiselevel=-1) return 1 except FileNotFound as e: writemsg(_("File Not Found: '%s'\n") % str(e), noiselevel=-1) return 1 # Reset state for things like noauto and keepwork in FEATURES. for x in ('.die_hooks',): try: os.unlink(os.path.join(mysettings['PORTAGE_BUILDDIR'], x)) except OSError: pass _prepare_workdir(mysettings) if mysettings.get("EBUILD_PHASE") not in ("info", "fetch", "pretend"): # Avoid spurious permissions adjustments when fetching with # a temporary PORTAGE_TMPDIR setting (for fetchonly). _prepare_features_dirs(mysettings)
def spawn_nofetch(portdb, ebuild_path, settings=None, fd_pipes=None): """ This spawns pkg_nofetch if appropriate. The settings parameter is useful only if setcpv has already been called in order to cache metadata. It will be cloned internally, in order to prevent any changes from interfering with the calling code. If settings is None then a suitable config instance will be acquired from the given portdbapi instance. Do not use the settings parameter unless setcpv has been called on the given instance, since otherwise it's possible to trigger issues like bug #408817 due to fragile assumptions involving the config state inside doebuild_environment(). A private PORTAGE_BUILDDIR will be created and cleaned up, in order to avoid any interference with any other processes. If PORTAGE_TMPDIR is writable, that will be used, otherwise the default directory for the tempfile module will be used. We only call the pkg_nofetch phase if either RESTRICT=fetch is set or the package has explicitly overridden the default pkg_nofetch implementation. This allows specialized messages to be displayed for problematic packages even though they do not set RESTRICT=fetch (bug #336499). This function does nothing if the PORTAGE_PARALLEL_FETCHONLY variable is set in the config instance. """ if settings is None: settings = config(clone=portdb.settings) else: settings = config(clone=settings) if 'PORTAGE_PARALLEL_FETCHONLY' in settings: return os.EX_OK # We must create our private PORTAGE_TMPDIR before calling # doebuild_environment(), since lots of variables such # as PORTAGE_BUILDDIR refer to paths inside PORTAGE_TMPDIR. portage_tmpdir = settings.get('PORTAGE_TMPDIR') if not portage_tmpdir or not os.access(portage_tmpdir, os.W_OK): portage_tmpdir = None private_tmpdir = tempfile.mkdtemp(dir=portage_tmpdir) settings['PORTAGE_TMPDIR'] = private_tmpdir settings.backup_changes('PORTAGE_TMPDIR') # private temp dir was just created, so it's not locked yet settings.pop('PORTAGE_BUILDDIR_LOCKED', None) try: doebuild_environment(ebuild_path, 'nofetch', settings=settings, db=portdb) restrict = settings['PORTAGE_RESTRICT'].split() defined_phases = settings['DEFINED_PHASES'].split() if not defined_phases: # When DEFINED_PHASES is undefined, assume all # phases are defined. defined_phases = EBUILD_PHASES if 'fetch' not in restrict and \ 'nofetch' not in defined_phases: return os.EX_OK prepare_build_dirs(settings=settings) ebuild_phase = EbuildPhase(background=False, phase='nofetch', scheduler=SchedulerInterface(portage._internal_caller and global_event_loop() or EventLoop(main=False)), fd_pipes=fd_pipes, settings=settings) ebuild_phase.start() ebuild_phase.wait() elog_process(settings.mycpv, settings) finally: shutil.rmtree(private_tmpdir) return ebuild_phase.returncode