def _pkg(self, cpv): """ The RootConfig instance that will become the Package.root_config attribute can be overridden by the FakeVartree pkg_root_config constructory argument, since we want to be consistent with the depgraph._pkg() method which uses a specially optimized RootConfig that has a FakeVartree instead of a real vartree. """ pkg = Package(cpv=cpv, built=True, installed=True, metadata=zip( self._db_keys, self._real_vardb.aux_get(cpv, self._db_keys)), root_config=self._pkg_root_config, type_name="installed") try: mycounter = long(pkg.metadata["COUNTER"]) except ValueError: mycounter = 0 pkg.metadata["COUNTER"] = str(mycounter) self._pkg_cache[pkg] = pkg return pkg
def _pkg(self, cpv): root_config = self._root_config real_vardb = root_config.trees["vartree"].dbapi pkg = Package(cpv=cpv, built=True, installed=True, metadata=zip(self._db_keys, real_vardb.aux_get(cpv, self._db_keys)), root_config=root_config, type_name="installed") try: mycounter = long(pkg.metadata["COUNTER"]) except ValueError: mycounter = 0 pkg.metadata["COUNTER"] = str(mycounter) return pkg
def testDoebuildSpawn(self): playground = ResolverPlayground() try: settings = config(clone=playground.settings) cpv = 'sys-apps/portage-2.1' metadata = { 'EAPI' : '2', 'INHERITED' : 'python eutils', 'IUSE' : 'build doc epydoc python3 selinux', 'LICENSE' : 'GPL-2', 'PROVIDE' : 'virtual/portage', 'RDEPEND' : '>=app-shells/bash-3.2_p17 >=dev-lang/python-2.6', 'SLOT' : '0', } root_config = playground.trees[playground.root]['root_config'] pkg = Package(built=False, cpv=cpv, installed=False, metadata=metadata, root_config=root_config, type_name='ebuild') settings.setcpv(pkg) settings['PORTAGE_PYTHON'] = _python_interpreter settings['PORTAGE_BUILDDIR'] = os.path.join( settings['PORTAGE_TMPDIR'], cpv) settings['T'] = os.path.join( settings['PORTAGE_BUILDDIR'], 'temp') for x in ('PORTAGE_BUILDDIR', 'T'): os.makedirs(settings[x]) # Create a fake environment, to pretend as if the ebuild # has been sourced already. open(os.path.join(settings['T'], 'environment'), 'wb') task_scheduler = TaskScheduler() for phase in ('_internal_test',): # Test EbuildSpawnProcess by calling doebuild.spawn() with # returnpid=False. This case is no longer used by portage # internals since EbuildPhase is used instead and that passes # returnpid=True to doebuild.spawn(). rval = doebuild_spawn("%s %s" % (_shell_quote( os.path.join(settings["PORTAGE_BIN_PATH"], os.path.basename(EBUILD_SH_BINARY))), phase), settings, free=1) self.assertEqual(rval, os.EX_OK) ebuild_phase = EbuildPhase(background=False, phase=phase, scheduler=task_scheduler.sched_iface, settings=settings) task_scheduler.add(ebuild_phase) task_scheduler.run() self.assertEqual(ebuild_phase.returncode, os.EX_OK) ebuild_phase = MiscFunctionsProcess(background=False, commands=['success_hooks'], scheduler=task_scheduler.sched_iface, settings=settings) task_scheduler.add(ebuild_phase) task_scheduler.run() self.assertEqual(ebuild_phase.returncode, os.EX_OK) finally: playground.cleanup()
def _pkg(cpv): pkg = pkg_cache.get(cpv) if pkg is None: pkg = Package(built=True, cpv=cpv, installed=True, metadata=zip(db_keys, vartree.dbapi.aux_get(cpv, db_keys)), operation="uninstall", root_config=root_config, type_name="installed") pkg_cache[cpv] = pkg return pkg
def _visible(self, db, cpv, metadata): installed = db is self._vardb built = installed or db is not self._portdb pkg_type = "ebuild" if installed: pkg_type = "installed" elif built: pkg_type = "binary" return Package(type_name=pkg_type, root_config=self.root_config, cpv=cpv, built=built, installed=installed, metadata=metadata).visible
def check(self, checkdirlist, checkdir, xpkg): self.continue_ = False ebuildlist = [] pkgs = {} allvalid = True for y in checkdirlist: file_is_ebuild = y.endswith(".ebuild") file_should_be_non_executable = y in no_exec or file_is_ebuild if file_should_be_non_executable: file_is_executable = stat.S_IMODE( os.stat(os.path.join(checkdir, y)).st_mode) & 0o111 if file_is_executable: self.qatracker.add_error("file.executable", os.path.join(checkdir, y)) if file_is_ebuild: pf = y[:-7] ebuildlist.append(pf) catdir = xpkg.split("/")[0] cpv = "%s/%s" % (catdir, pf) try: myaux = dict( zip(allvars, self.portdb.aux_get(cpv, allvars))) except KeyError: allvalid = False self.qatracker.add_error("ebuild.syntax", os.path.join(xpkg, y)) continue except IOError: allvalid = False self.qatracker.add_error("ebuild.output", os.path.join(xpkg, y)) continue if not portage.eapi_is_supported(myaux["EAPI"]): allvalid = False self.qatracker.add_error("EAPI.unsupported", os.path.join(xpkg, y)) continue pkgs[pf] = Package(cpv=cpv, metadata=myaux, root_config=self.root_config, type_name="ebuild") if len(pkgs) != len(ebuildlist): # If we can't access all the metadata then it's totally unsafe to # commit since there's no way to generate a correct Manifest. # Do not try to do any more QA checks on this package since missing # metadata leads to false positives for several checks, and false # positives confuse users. self.continue_ = True return pkgs, allvalid
def _pkg(self, cpv): """ The RootConfig instance that will become the Package.root_config attribute can be overridden by the FakeVartree pkg_root_config constructory argument, since we want to be consistent with the depgraph._pkg() method which uses a specially optimized RootConfig that has a FakeVartree instead of a real vartree. """ pkg = Package(cpv=cpv, built=True, installed=True, metadata=zip(self._db_keys, self._real_vardb.aux_get(cpv, self._db_keys)), root_config=self._pkg_root_config, type_name="installed") try: mycounter = long(pkg.metadata["COUNTER"]) except ValueError: mycounter = 0 pkg.metadata["COUNTER"] = str(mycounter) self._pkg_cache[pkg] = pkg return pkg
def _sync(self): real_vardb = self._root_config.trees["vartree"].dbapi current_cpv_set = frozenset(real_vardb.cpv_all()) pkg_vardb = self.dbapi pkg_cache = self._pkg_cache aux_get_history = self._aux_get_history # Remove any packages that have been uninstalled. for pkg in list(pkg_vardb): if pkg.cpv not in current_cpv_set: self.cpv_discard(pkg) # Validate counters and timestamps. slot_counters = {} root_config = self._pkg_root_config validation_keys = ["COUNTER", "_mtime_"] for cpv in current_cpv_set: pkg_hash_key = Package._gen_hash_key(cpv=cpv, installed=True, root_config=root_config, type_name="installed") pkg = pkg_vardb.get(pkg_hash_key) if pkg is not None: counter, mtime = real_vardb.aux_get(cpv, validation_keys) try: counter = long(counter) except ValueError: counter = 0 if counter != pkg.counter or \ mtime != pkg.mtime: self.cpv_discard(pkg) pkg = None if pkg is None: pkg = self._pkg(cpv) other_counter = slot_counters.get(pkg.slot_atom) if other_counter is not None: if other_counter > pkg.counter: continue slot_counters[pkg.slot_atom] = pkg.counter pkg_vardb.cpv_inject(pkg) real_vardb.flush_cache()
def _sync(self): real_vardb = self._root_config.trees["vartree"].dbapi current_cpv_set = frozenset(real_vardb.cpv_all()) pkg_vardb = self.dbapi # Remove any packages that have been uninstalled. for pkg in list(pkg_vardb): if pkg.cpv not in current_cpv_set: self.cpv_discard(pkg) # Validate counters and timestamps. slot_counters = {} root_config = self._pkg_root_config validation_keys = ["COUNTER", "_mtime_"] for cpv in current_cpv_set: pkg_hash_key = Package._gen_hash_key(cpv=cpv, installed=True, root_config=root_config, type_name="installed") pkg = pkg_vardb.get(pkg_hash_key) if pkg is not None: counter, mtime = real_vardb.aux_get(cpv, validation_keys) try: counter = int(counter) except ValueError: counter = 0 if counter != pkg.counter or \ mtime != pkg.mtime: self.cpv_discard(pkg) pkg = None if pkg is None: pkg = self._pkg(cpv) other_counter = slot_counters.get(pkg.slot_atom) if other_counter is not None: if other_counter > pkg.counter: continue slot_counters[pkg.slot_atom] = pkg.counter pkg_vardb.cpv_inject(pkg) real_vardb.flush_cache()
def _match_use(self, atom, pkg, metadata, ignore_profile=False): iuse_implicit_match = self._iuse_implicit_cnstr(pkg, metadata) usealiases = self.settings._use_manager.getUseAliases(pkg) iuse = Package._iuse( None, metadata["IUSE"].split(), iuse_implicit_match, usealiases, metadata["EAPI"], ) for x in atom.unevaluated_atom.use.required: if iuse.get_real_flag(x) is None: return False if atom.use is None: pass elif not self._use_mutable: # Use IUSE to validate USE settings for built packages, # in case the package manager that built this package # failed to do that for some reason (or in case of # data corruption). The enabled flags must be consistent # with implicit IUSE, in order to avoid potential # inconsistencies in USE dep matching (see bug #453400). use = frozenset(x for x in metadata["USE"].split() if iuse.get_real_flag(x) is not None) missing_enabled = frozenset(x for x in atom.use.missing_enabled if iuse.get_real_flag(x) is None) missing_disabled = frozenset(x for x in atom.use.missing_disabled if iuse.get_real_flag(x) is None) enabled = frozenset( (iuse.get_real_flag(x) or x) for x in atom.use.enabled) disabled = frozenset( (iuse.get_real_flag(x) or x) for x in atom.use.disabled) if enabled: if any(x in enabled for x in missing_disabled): return False need_enabled = enabled.difference(use) if need_enabled: if any(x not in missing_enabled for x in need_enabled): return False if disabled: if any(x in disabled for x in missing_enabled): return False need_disabled = disabled.intersection(use) if need_disabled: if any(x not in missing_disabled for x in need_disabled): return False elif not self.settings.local_config: if not ignore_profile: # Check masked and forced flags for repoman. usemask = self.settings._getUseMask( pkg, stable=self.settings._parent_stable) if any(x in usemask and iuse.get_real_flag(x) is not None for x in atom.use.enabled): return False useforce = self.settings._getUseForce( pkg, stable=self.settings._parent_stable) if any(x in useforce and x not in usemask and iuse.get_real_flag(x) is not None for x in atom.use.disabled): return False # Check unsatisfied use-default deps if atom.use.enabled: missing_disabled = frozenset(x for x in atom.use.missing_disabled if iuse.get_real_flag(x) is None) if any(x in atom.use.enabled for x in missing_disabled): return False if atom.use.disabled: missing_enabled = frozenset(x for x in atom.use.missing_enabled if iuse.get_real_flag(x) is None) if any(x in atom.use.disabled for x in missing_enabled): return False return True
def output(self): """Outputs the results of the search.""" class msg(object): @staticmethod def append(msg): writemsg_stdout(msg, noiselevel=-1) msg.append("\b\b \n[ Results for search key : " + \ bold(self.searchkey) + " ]\n") vardb = self._vardb metadata_keys = set(Package.metadata_keys) metadata_keys.update(["DESCRIPTION", "HOMEPAGE", "LICENSE", "SRC_URI"]) metadata_keys = tuple(metadata_keys) if self._results_specified: # Handle results added via addCP addCP_matches = [] for mytype, matches in self.matches.items(): for match in matches: addCP_matches.append((mytype, match)) iterator = iter(addCP_matches) else: # Do a normal search iterator = self._iter_search() for mtype, match in iterator: self.mlen += 1 masked = False full_package = None if mtype in ("pkg", "desc"): full_package = self._xmatch("bestmatch-visible", match) if not full_package: masked = True full_package = self._xmatch("match-all", match) if full_package: full_package = full_package[-1] elif mtype == "set": msg.append(green("*") + " " + bold(match) + "\n") if self.verbose: msg.append(" " + darkgreen("Description:") + \ " " + \ self.sdict[match].getMetadata("DESCRIPTION") \ + "\n\n") if full_package: try: metadata = dict( zip(metadata_keys, self._aux_get(full_package, metadata_keys))) except KeyError: self._aux_get_error(full_package) continue desc = metadata["DESCRIPTION"] homepage = metadata["HOMEPAGE"] license = metadata["LICENSE"] if masked: msg.append(green("*") + " " + \ white(match) + " " + red("[ Masked ]") + "\n") else: msg.append(green("*") + " " + bold(match) + "\n") myversion = self.getVersion(full_package, search.VERSION_RELEASE) mysum = [0, 0] file_size_str = None mycat = match.split("/")[0] mypkg = match.split("/")[1] mycpv = match + "-" + myversion myebuild = self._findname(mycpv) if myebuild: pkg = Package(built=False, cpv=mycpv, installed=False, metadata=metadata, root_config=self.root_config, type_name="ebuild") pkgdir = os.path.dirname(myebuild) mf = self.settings.repositories.get_repo_for_location( os.path.dirname(os.path.dirname(pkgdir))) mf = mf.load_manifest(pkgdir, self.settings["DISTDIR"]) try: uri_map = _parse_uri_map(mycpv, metadata, use=pkg.use.enabled) except portage.exception.InvalidDependString as e: file_size_str = "Unknown (%s)" % (e, ) del e else: try: mysum[0] = mf.getDistfilesSize(uri_map) except KeyError as e: file_size_str = "Unknown (missing " + \ "digest for %s)" % (e,) del e available = False for db in self._dbs: if db is not vardb and \ db.cpv_exists(mycpv): available = True if not myebuild and hasattr(db, "bintree"): myebuild = db.bintree.getname(mycpv) try: mysum[0] = os.stat(myebuild).st_size except OSError: myebuild = None break if myebuild and file_size_str is None: file_size_str = localized_size(mysum[0]) if self.verbose: if available: msg.append(" %s %s\n" % \ (darkgreen("Latest version available:"), myversion)) msg.append(" %s\n" % \ self.getInstallationStatus(mycat+'/'+mypkg)) if myebuild: msg.append(" %s %s\n" % \ (darkgreen("Size of files:"), file_size_str)) msg.append(" " + darkgreen("Homepage:") + \ " " + homepage + "\n") msg.append(" " + darkgreen("Description:") \ + " " + desc + "\n") msg.append(" " + darkgreen("License:") + \ " " + license + "\n\n") msg.append("[ Applications found : " + \ bold(str(self.mlen)) + " ]\n\n") # This method can be called multiple times, so # reset the match count for the next call. Don't # reset it at the beginning of this method, since # that would lose modfications from the addCP # method. self.mlen = 0
def _match_use(self, atom, pkg, metadata, ignore_profile=False): eapi_attrs = _get_eapi_attrs(metadata["EAPI"]) if eapi_attrs.iuse_effective: iuse_implicit_match = self.settings._iuse_effective_match else: iuse_implicit_match = self.settings._iuse_implicit_match usealiases = self.settings._use_manager.getUseAliases(pkg) iuse = Package._iuse(None, metadata["IUSE"].split(), iuse_implicit_match, usealiases, metadata["EAPI"]) for x in atom.unevaluated_atom.use.required: if iuse.get_real_flag(x) is None: return False if atom.use is None: pass elif not self._use_mutable: # Use IUSE to validate USE settings for built packages, # in case the package manager that built this package # failed to do that for some reason (or in case of # data corruption). The enabled flags must be consistent # with implicit IUSE, in order to avoid potential # inconsistencies in USE dep matching (see bug #453400). use = frozenset(x for x in metadata["USE"].split() if iuse.get_real_flag(x) is not None) missing_enabled = frozenset(x for x in atom.use.missing_enabled if iuse.get_real_flag(x) is None) missing_disabled = frozenset(x for x in atom.use.missing_disabled if iuse.get_real_flag(x) is None) enabled = frozenset((iuse.get_real_flag(x) or x) for x in atom.use.enabled) disabled = frozenset((iuse.get_real_flag(x) or x) for x in atom.use.disabled) if enabled: if any(x in enabled for x in missing_disabled): return False need_enabled = enabled.difference(use) if need_enabled: if any(x not in missing_enabled for x in need_enabled): return False if disabled: if any(x in disabled for x in missing_enabled): return False need_disabled = disabled.intersection(use) if need_disabled: if any(x not in missing_disabled for x in need_disabled): return False elif not self.settings.local_config: if not ignore_profile: # Check masked and forced flags for repoman. usemask = self.settings._getUseMask(pkg, stable=self.settings._parent_stable) if any(x in usemask for x in atom.use.enabled): return False useforce = self.settings._getUseForce(pkg, stable=self.settings._parent_stable) if any(x in useforce and x not in usemask for x in atom.use.disabled): return False # Check unsatisfied use-default deps if atom.use.enabled: missing_disabled = frozenset(x for x in atom.use.missing_disabled if iuse.get_real_flag(x) is None) if any(x in atom.use.enabled for x in missing_disabled): return False if atom.use.disabled: missing_enabled = frozenset(x for x in atom.use.missing_enabled if iuse.get_real_flag(x) is None) if any(x in atom.use.disabled for x in missing_enabled): return False return True
def testDoebuildSpawn(self): ebuild_body = textwrap.dedent(""" pkg_nofetch() { : ; } """) ebuilds = { "sys-apps/portage-2.1": { "EAPI": "2", "IUSE": "build doc epydoc python3 selinux", "KEYWORDS": "x86", "LICENSE": "GPL-2", "RDEPEND": ">=app-shells/bash-3.2_p17 >=dev-lang/python-2.6", "SLOT": "0", "MISC_CONTENT": ebuild_body, } } playground = ResolverPlayground(ebuilds=ebuilds) try: root_config = playground.trees[playground.eroot]["root_config"] portdb = root_config.trees["porttree"].dbapi settings = config(clone=playground.settings) if "__PORTAGE_TEST_HARDLINK_LOCKS" in os.environ: settings["__PORTAGE_TEST_HARDLINK_LOCKS"] = os.environ[ "__PORTAGE_TEST_HARDLINK_LOCKS"] settings.backup_changes("__PORTAGE_TEST_HARDLINK_LOCKS") cpv = "sys-apps/portage-2.1" metadata = dict( zip(Package.metadata_keys, portdb.aux_get(cpv, Package.metadata_keys))) pkg = Package( built=False, cpv=cpv, installed=False, metadata=metadata, root_config=root_config, type_name="ebuild", ) settings.setcpv(pkg) settings["PORTAGE_PYTHON"] = _python_interpreter settings["PORTAGE_BUILDDIR"] = os.path.join( settings["PORTAGE_TMPDIR"], cpv) settings["PYTHONDONTWRITEBYTECODE"] = os.environ.get( "PYTHONDONTWRITEBYTECODE", "") settings["HOME"] = os.path.join(settings["PORTAGE_BUILDDIR"], "homedir") settings["T"] = os.path.join(settings["PORTAGE_BUILDDIR"], "temp") for x in ("PORTAGE_BUILDDIR", "HOME", "T"): os.makedirs(settings[x]) # Create a fake environment, to pretend as if the ebuild # has been sourced already. open(os.path.join(settings["T"], "environment"), "wb").close() scheduler = SchedulerInterface(global_event_loop()) for phase in ("_internal_test", ): # Test EbuildSpawnProcess by calling doebuild.spawn() with # returnpid=False. This case is no longer used by portage # internals since EbuildPhase is used instead and that passes # returnpid=True to doebuild.spawn(). rval = doebuild_spawn( "%s %s" % ( _shell_quote( os.path.join( settings["PORTAGE_BIN_PATH"], os.path.basename(EBUILD_SH_BINARY), )), phase, ), settings, free=1, ) self.assertEqual(rval, os.EX_OK) ebuild_phase = EbuildPhase( background=False, phase=phase, scheduler=scheduler, settings=settings, ) ebuild_phase.start() ebuild_phase.wait() self.assertEqual(ebuild_phase.returncode, os.EX_OK) ebuild_phase = MiscFunctionsProcess( background=False, commands=["success_hooks"], scheduler=scheduler, settings=settings, ) ebuild_phase.start() ebuild_phase.wait() self.assertEqual(ebuild_phase.returncode, os.EX_OK) spawn_nofetch(portdb, portdb.findname(cpv), settings=settings) finally: playground.cleanup()
def testDoebuild(self): """ Invoke portage.doebuild() with the fd_pipes parameter, and check that the expected output appears in the pipe. This functionality is not used by portage internally, but it is supported for API consumers (see bug #475812). """ output_fd = 200 ebuild_body = ["S=${WORKDIR}"] for phase_func in ( "pkg_info", "pkg_nofetch", "pkg_pretend", "pkg_setup", "src_unpack", "src_prepare", "src_configure", "src_compile", "src_test", "src_install", ): ebuild_body.append(("%s() { echo ${EBUILD_PHASE}" " 1>&%s; }") % (phase_func, output_fd)) ebuild_body.append("") ebuild_body = "\n".join(ebuild_body) ebuilds = { "app-misct/foo-1": { "EAPI": "5", "MISC_CONTENT": ebuild_body, } } # Override things that may be unavailable, or may have portability # issues when running tests in exotic environments. # prepstrip - bug #447810 (bash read builtin EINTR problem) true_symlinks = ("find", "prepstrip", "sed", "scanelf") true_binary = portage.process.find_binary("true") self.assertEqual(true_binary is None, False, "true command not found") dev_null = open(os.devnull, "wb") playground = ResolverPlayground(ebuilds=ebuilds) try: QueryCommand._db = playground.trees root_config = playground.trees[playground.eroot]["root_config"] portdb = root_config.trees["porttree"].dbapi settings = portage.config(clone=playground.settings) if "__PORTAGE_TEST_HARDLINK_LOCKS" in os.environ: settings["__PORTAGE_TEST_HARDLINK_LOCKS"] = os.environ[ "__PORTAGE_TEST_HARDLINK_LOCKS"] settings.backup_changes("__PORTAGE_TEST_HARDLINK_LOCKS") settings.features.add("noauto") settings.features.add("test") settings["PORTAGE_PYTHON"] = portage._python_interpreter settings["PORTAGE_QUIET"] = "1" settings["PYTHONDONTWRITEBYTECODE"] = os.environ.get( "PYTHONDONTWRITEBYTECODE", "") fake_bin = os.path.join(settings["EPREFIX"], "bin") portage.util.ensure_dirs(fake_bin) for x in true_symlinks: os.symlink(true_binary, os.path.join(fake_bin, x)) settings["__PORTAGE_TEST_PATH_OVERRIDE"] = fake_bin settings.backup_changes("__PORTAGE_TEST_PATH_OVERRIDE") cpv = "app-misct/foo-1" metadata = dict( zip(Package.metadata_keys, portdb.aux_get(cpv, Package.metadata_keys))) pkg = Package( built=False, cpv=cpv, installed=False, metadata=metadata, root_config=root_config, type_name="ebuild", ) settings.setcpv(pkg) ebuildpath = portdb.findname(cpv) self.assertNotEqual(ebuildpath, None) for phase in ( "info", "nofetch", "pretend", "setup", "unpack", "prepare", "configure", "compile", "test", "install", "qmerge", "clean", "merge", ): pr, pw = os.pipe() producer = DoebuildProcess( doebuild_pargs=(ebuildpath, phase), doebuild_kwargs={ "settings": settings, "mydbapi": portdb, "tree": "porttree", "vartree": root_config.trees["vartree"], "fd_pipes": { 1: dev_null.fileno(), 2: dev_null.fileno(), output_fd: pw, }, "prev_mtimes": {}, }, ) consumer = PipeReader(input_files={"producer": pr}) task_scheduler = TaskScheduler(iter([producer, consumer]), max_jobs=2) try: task_scheduler.start() finally: # PipeReader closes pr os.close(pw) task_scheduler.wait() output = portage._unicode_decode( consumer.getvalue()).rstrip("\n") if task_scheduler.returncode != os.EX_OK: portage.writemsg(output, noiselevel=-1) self.assertEqual(task_scheduler.returncode, os.EX_OK) if phase not in ("clean", "merge", "qmerge"): self.assertEqual(phase, output) finally: dev_null.close() playground.cleanup() QueryCommand._db = None
def testEbuildFetch(self): user_config = { "make.conf": ( 'GENTOO_MIRRORS="{scheme}://{host}:{port}"', ), } distfiles = { 'bar': b'bar\n', 'foo': b'foo\n', } ebuilds = { 'dev-libs/A-1': { 'EAPI': '7', 'SRC_URI': '''{scheme}://{host}:{port}/distfiles/bar.txt -> bar {scheme}://{host}:{port}/distfiles/foo.txt -> foo''', }, } loop = SchedulerInterface(global_event_loop()) def run_async(func, *args, **kwargs): with ForkExecutor(loop=loop) as executor: return loop.run_until_complete(loop.run_in_executor(executor, functools.partial(func, *args, **kwargs))) scheme = 'http' host = '127.0.0.1' content = {} content['/distfiles/layout.conf'] = b'[structure]\n0=flat\n' for k, v in distfiles.items(): # mirror path content['/distfiles/{}'.format(k)] = v # upstream path content['/distfiles/{}.txt'.format(k)] = v with AsyncHTTPServer(host, content, loop) as server: ebuilds_subst = {} for cpv, metadata in ebuilds.items(): metadata = metadata.copy() metadata['SRC_URI'] = metadata['SRC_URI'].format( scheme=scheme, host=host, port=server.server_port) ebuilds_subst[cpv] = metadata user_config_subst = user_config.copy() for configname, configdata in user_config.items(): configdata_sub = [] for line in configdata: configdata_sub.append(line.format( scheme=scheme, host=host, port=server.server_port)) user_config_subst[configname] = tuple(configdata_sub) playground = ResolverPlayground(ebuilds=ebuilds_subst, distfiles=distfiles, user_config=user_config_subst) ro_distdir = tempfile.mkdtemp() eubin = os.path.join(playground.eprefix, "usr", "bin") try: fetchcommand = portage.util.shlex_split(playground.settings['FETCHCOMMAND']) fetch_bin = portage.process.find_binary(fetchcommand[0]) if fetch_bin is None: self.skipTest('FETCHCOMMAND not found: {}'.format(playground.settings['FETCHCOMMAND'])) os.symlink(fetch_bin, os.path.join(eubin, os.path.basename(fetch_bin))) resumecommand = portage.util.shlex_split(playground.settings['RESUMECOMMAND']) resume_bin = portage.process.find_binary(resumecommand[0]) if resume_bin is None: self.skipTest('RESUMECOMMAND not found: {}'.format(playground.settings['RESUMECOMMAND'])) if resume_bin != fetch_bin: os.symlink(resume_bin, os.path.join(eubin, os.path.basename(resume_bin))) root_config = playground.trees[playground.eroot]['root_config'] portdb = root_config.trees["porttree"].dbapi settings = config(clone=playground.settings) # Demonstrate that fetch preserves a stale file in DISTDIR when no digests are given. foo_uri = {'foo': ('{scheme}://{host}:{port}/distfiles/foo'.format(scheme=scheme, host=host, port=server.server_port),)} foo_path = os.path.join(settings['DISTDIR'], 'foo') foo_stale_content = b'stale content\n' with open(foo_path, 'wb') as f: f.write(b'stale content\n') self.assertTrue(bool(run_async(fetch, foo_uri, settings, try_mirrors=False))) with open(foo_path, 'rb') as f: self.assertEqual(f.read(), foo_stale_content) with open(foo_path, 'rb') as f: self.assertNotEqual(f.read(), distfiles['foo']) # Use force=True to update the stale file. self.assertTrue(bool(run_async(fetch, foo_uri, settings, try_mirrors=False, force=True))) with open(foo_path, 'rb') as f: self.assertEqual(f.read(), distfiles['foo']) # Test force=True with FEATURES=skiprocheck, using read-only DISTDIR. # FETCHCOMMAND is set to temporarily chmod +w DISTDIR. Note that # FETCHCOMMAND must perform atomic rename itself due to read-only # DISTDIR. with open(foo_path, 'wb') as f: f.write(b'stale content\n') orig_fetchcommand = settings['FETCHCOMMAND'] orig_distdir_mode = os.stat(settings['DISTDIR']).st_mode temp_fetchcommand = os.path.join(eubin, 'fetchcommand') with open(temp_fetchcommand, 'w') as f: f.write(""" set -e URI=$1 DISTDIR=$2 FILE=$3 trap 'chmod a-w "${DISTDIR}"' EXIT chmod ug+w "${DISTDIR}" %s mv -f "${DISTDIR}/${FILE}.__download__" "${DISTDIR}/${FILE}" """ % orig_fetchcommand.replace('${FILE}', '${FILE}.__download__')) settings['FETCHCOMMAND'] = '"%s" "%s" "${URI}" "${DISTDIR}" "${FILE}"' % (BASH_BINARY, temp_fetchcommand) settings.features.add('skiprocheck') settings.features.remove('distlocks') os.chmod(settings['DISTDIR'], 0o555) try: self.assertTrue(bool(run_async(fetch, foo_uri, settings, try_mirrors=False, force=True))) finally: settings['FETCHCOMMAND'] = orig_fetchcommand os.chmod(settings['DISTDIR'], orig_distdir_mode) settings.features.remove('skiprocheck') settings.features.add('distlocks') os.unlink(temp_fetchcommand) with open(foo_path, 'rb') as f: self.assertEqual(f.read(), distfiles['foo']) # Test emirrordist invocation. emirrordist_cmd = (portage._python_interpreter, '-b', '-Wd', os.path.join(self.bindir, 'emirrordist'), '--distfiles', settings['DISTDIR'], '--config-root', settings['EPREFIX'], '--repositories-configuration', settings.repositories.config_string(), '--repo', 'test_repo', '--mirror') env = settings.environ() env['PYTHONPATH'] = ':'.join( filter(None, [PORTAGE_PYM_PATH] + os.environ.get('PYTHONPATH', '').split(':'))) for k in distfiles: os.unlink(os.path.join(settings['DISTDIR'], k)) proc = loop.run_until_complete(asyncio.create_subprocess_exec(*emirrordist_cmd, env=env)) self.assertEqual(loop.run_until_complete(proc.wait()), 0) for k in distfiles: with open(os.path.join(settings['DISTDIR'], k), 'rb') as f: self.assertEqual(f.read(), distfiles[k]) # Tests only work with one ebuild at a time, so the config # pool only needs a single config instance. class config_pool: @staticmethod def allocate(): return settings @staticmethod def deallocate(settings): pass def async_fetch(pkg, ebuild_path): fetcher = EbuildFetcher(config_pool=config_pool, ebuild_path=ebuild_path, fetchonly=False, fetchall=True, pkg=pkg, scheduler=loop) fetcher.start() return fetcher.async_wait() for cpv in ebuilds: metadata = dict(zip(Package.metadata_keys, portdb.aux_get(cpv, Package.metadata_keys))) pkg = Package(built=False, cpv=cpv, installed=False, metadata=metadata, root_config=root_config, type_name='ebuild') settings.setcpv(pkg) ebuild_path = portdb.findname(pkg.cpv) portage.doebuild_environment(ebuild_path, 'fetch', settings=settings, db=portdb) # Test good files in DISTDIR for k in settings['AA'].split(): os.stat(os.path.join(settings['DISTDIR'], k)) self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0) for k in settings['AA'].split(): with open(os.path.join(settings['DISTDIR'], k), 'rb') as f: self.assertEqual(f.read(), distfiles[k]) # Test digestgen with fetch os.unlink(os.path.join(os.path.dirname(ebuild_path), 'Manifest')) for k in settings['AA'].split(): os.unlink(os.path.join(settings['DISTDIR'], k)) with ForkExecutor(loop=loop) as executor: self.assertTrue(bool(loop.run_until_complete( loop.run_in_executor(executor, functools.partial( digestgen, mysettings=settings, myportdb=portdb))))) for k in settings['AA'].split(): with open(os.path.join(settings['DISTDIR'], k), 'rb') as f: self.assertEqual(f.read(), distfiles[k]) # Test missing files in DISTDIR for k in settings['AA'].split(): os.unlink(os.path.join(settings['DISTDIR'], k)) self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0) for k in settings['AA'].split(): with open(os.path.join(settings['DISTDIR'], k), 'rb') as f: self.assertEqual(f.read(), distfiles[k]) # Test empty files in DISTDIR for k in settings['AA'].split(): file_path = os.path.join(settings['DISTDIR'], k) with open(file_path, 'wb') as f: pass self.assertEqual(os.stat(file_path).st_size, 0) self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0) for k in settings['AA'].split(): with open(os.path.join(settings['DISTDIR'], k), 'rb') as f: self.assertEqual(f.read(), distfiles[k]) # Test non-empty files containing null bytes in DISTDIR for k in settings['AA'].split(): file_path = os.path.join(settings['DISTDIR'], k) with open(file_path, 'wb') as f: f.write(len(distfiles[k]) * b'\0') self.assertEqual(os.stat(file_path).st_size, len(distfiles[k])) self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0) for k in settings['AA'].split(): with open(os.path.join(settings['DISTDIR'], k), 'rb') as f: self.assertEqual(f.read(), distfiles[k]) # Test PORTAGE_RO_DISTDIRS settings['PORTAGE_RO_DISTDIRS'] = '"{}"'.format(ro_distdir) orig_fetchcommand = settings['FETCHCOMMAND'] orig_resumecommand = settings['RESUMECOMMAND'] try: settings['FETCHCOMMAND'] = settings['RESUMECOMMAND'] = '' for k in settings['AA'].split(): file_path = os.path.join(settings['DISTDIR'], k) os.rename(file_path, os.path.join(ro_distdir, k)) self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0) for k in settings['AA'].split(): file_path = os.path.join(settings['DISTDIR'], k) self.assertTrue(os.path.islink(file_path)) with open(file_path, 'rb') as f: self.assertEqual(f.read(), distfiles[k]) os.unlink(file_path) finally: settings.pop('PORTAGE_RO_DISTDIRS') settings['FETCHCOMMAND'] = orig_fetchcommand settings['RESUMECOMMAND'] = orig_resumecommand # Test local filesystem in GENTOO_MIRRORS orig_mirrors = settings['GENTOO_MIRRORS'] orig_fetchcommand = settings['FETCHCOMMAND'] try: settings['GENTOO_MIRRORS'] = ro_distdir settings['FETCHCOMMAND'] = settings['RESUMECOMMAND'] = '' self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0) for k in settings['AA'].split(): with open(os.path.join(settings['DISTDIR'], k), 'rb') as f: self.assertEqual(f.read(), distfiles[k]) finally: settings['GENTOO_MIRRORS'] = orig_mirrors settings['FETCHCOMMAND'] = orig_fetchcommand settings['RESUMECOMMAND'] = orig_resumecommand # Test readonly DISTDIR orig_distdir_mode = os.stat(settings['DISTDIR']).st_mode try: os.chmod(settings['DISTDIR'], 0o555) self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0) for k in settings['AA'].split(): with open(os.path.join(settings['DISTDIR'], k), 'rb') as f: self.assertEqual(f.read(), distfiles[k]) finally: os.chmod(settings['DISTDIR'], orig_distdir_mode) # Test parallel-fetch mode settings['PORTAGE_PARALLEL_FETCHONLY'] = '1' try: self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0) for k in settings['AA'].split(): with open(os.path.join(settings['DISTDIR'], k), 'rb') as f: self.assertEqual(f.read(), distfiles[k]) for k in settings['AA'].split(): os.unlink(os.path.join(settings['DISTDIR'], k)) self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0) for k in settings['AA'].split(): with open(os.path.join(settings['DISTDIR'], k), 'rb') as f: self.assertEqual(f.read(), distfiles[k]) finally: settings.pop('PORTAGE_PARALLEL_FETCHONLY') # Test RESUMECOMMAND orig_resume_min_size = settings['PORTAGE_FETCH_RESUME_MIN_SIZE'] try: settings['PORTAGE_FETCH_RESUME_MIN_SIZE'] = '2' for k in settings['AA'].split(): file_path = os.path.join(settings['DISTDIR'], k) os.unlink(file_path) with open(file_path + _download_suffix, 'wb') as f: f.write(distfiles[k][:2]) self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0) for k in settings['AA'].split(): with open(os.path.join(settings['DISTDIR'], k), 'rb') as f: self.assertEqual(f.read(), distfiles[k]) finally: settings['PORTAGE_FETCH_RESUME_MIN_SIZE'] = orig_resume_min_size # Test readonly DISTDIR + skiprocheck, with FETCHCOMMAND set to temporarily chmod DISTDIR orig_fetchcommand = settings['FETCHCOMMAND'] orig_distdir_mode = os.stat(settings['DISTDIR']).st_mode for k in settings['AA'].split(): os.unlink(os.path.join(settings['DISTDIR'], k)) try: os.chmod(settings['DISTDIR'], 0o555) settings['FETCHCOMMAND'] = '"%s" -c "chmod ug+w \\"${DISTDIR}\\"; %s; status=\\$?; chmod a-w \\"${DISTDIR}\\"; exit \\$status"' % (BASH_BINARY, orig_fetchcommand.replace('"', '\\"')) settings.features.add('skiprocheck') settings.features.remove('distlocks') self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0) finally: settings['FETCHCOMMAND'] = orig_fetchcommand os.chmod(settings['DISTDIR'], orig_distdir_mode) settings.features.remove('skiprocheck') settings.features.add('distlocks') finally: shutil.rmtree(ro_distdir) playground.cleanup()
def check_isebuild(self, **kwargs): """Test the file for qualifications that is is an ebuild @param checkdirlist: list of files in the current package directory @param checkdir: current package directory path @param xpkg: current package directory being checked @param validity_future: Future instance @returns: dictionary, including {pkgs, can_force} """ checkdirlist = kwargs.get("checkdirlist").get() checkdir = kwargs.get("checkdir") xpkg = kwargs.get("xpkg") fuse = kwargs.get("validity_future") can_force = kwargs.get("can_force") self.continue_ = False ebuildlist = [] pkgs = {} for y in checkdirlist: file_is_ebuild = y.endswith(".ebuild") file_should_be_non_executable = ( y in self.repo_settings.qadata.no_exec or file_is_ebuild) if file_should_be_non_executable: file_is_executable = (stat.S_IMODE( os.stat(os.path.join(checkdir, y)).st_mode) & 0o111) if file_is_executable: self.qatracker.add_error("file.executable", os.path.join(checkdir, y)) if file_is_ebuild: pf = y[:-7] ebuildlist.append(pf) catdir = xpkg.split("/")[0] cpv = "%s/%s" % (catdir, pf) allvars = self.repo_settings.qadata.allvars try: myaux = dict( zip(allvars, self.portdb.aux_get(cpv, allvars))) except KeyError: fuse.set(False, ignore_InvalidState=True) self.qatracker.add_error("ebuild.syntax", os.path.join(xpkg, y)) continue except IOError: fuse.set(False, ignore_InvalidState=True) self.qatracker.add_error("ebuild.output", os.path.join(xpkg, y)) continue except InvalidPackageName: fuse.set(False, ignore_InvalidState=True) self.qatracker.add_error("ebuild.invalidname", os.path.join(xpkg, y)) continue if not portage.eapi_is_supported(myaux["EAPI"]): fuse.set(False, ignore_InvalidState=True) self.qatracker.add_error("EAPI.unsupported", os.path.join(xpkg, y)) continue pkgs[pf] = Package( cpv=cpv, metadata=myaux, root_config=self.root_config, type_name="ebuild", ) if len(pkgs) != len(ebuildlist): # If we can't access all the metadata then it's totally unsafe to # commit since there's no way to generate a correct Manifest. # Do not try to do any more QA checks on this package since missing # metadata leads to false positives for several checks, and false # positives confuse users. self.continue_ = True can_force.set(False, ignore_InvalidState=True) self.pkgs = pkgs # set our updated data dyn_pkgs = kwargs.get("pkgs") dyn_pkgs.set(pkgs) return self.continue_
def output(self): """Outputs the results of the search.""" msg = [] msg.append("\b\b \n[ Results for search key : " + \ bold(self.searchkey) + " ]\n") msg.append("[ Applications found : " + \ bold(str(self.mlen)) + " ]\n\n") vardb = self.vartree.dbapi metadata_keys = set(Package.metadata_keys) metadata_keys.update(["DESCRIPTION", "HOMEPAGE", "LICENSE", "SRC_URI"]) metadata_keys = tuple(metadata_keys) for mtype in self.matches: for match, masked in self.matches[mtype]: full_package = None if mtype == "pkg": full_package = self._xmatch("bestmatch-visible", match) if not full_package: #no match found; we don't want to query description masked = 1 full_package = portage.best( self._xmatch("match-all", match)) elif mtype == "desc": full_package = match match = portage.cpv_getkey(match) elif mtype == "set": msg.append(green("*") + " " + bold(match) + "\n") if self.verbose: msg.append(" " + darkgreen("Description:") + \ " " + \ self.sdict[match].getMetadata("DESCRIPTION") \ + "\n\n") if full_package: try: metadata = dict( zip(metadata_keys, self._aux_get(full_package, metadata_keys))) except KeyError: msg.append( "emerge: search: aux_get() failed, skipping\n") continue desc = metadata["DESCRIPTION"] homepage = metadata["HOMEPAGE"] license = metadata["LICENSE"] if masked: msg.append(green("*") + " " + \ white(match) + " " + red("[ Masked ]") + "\n") else: msg.append(green("*") + " " + bold(match) + "\n") myversion = self.getVersion(full_package, search.VERSION_RELEASE) mysum = [0, 0] file_size_str = None mycat = match.split("/")[0] mypkg = match.split("/")[1] mycpv = match + "-" + myversion myebuild = self._findname(mycpv) if myebuild: pkg = Package(built=False, cpv=mycpv, installed=False, metadata=metadata, root_config=self.root_config, type_name="ebuild") pkgdir = os.path.dirname(myebuild) mf = self.settings.repositories.get_repo_for_location( os.path.dirname(os.path.dirname(pkgdir))) mf = mf.load_manifest(pkgdir, self.settings["DISTDIR"]) try: uri_map = _parse_uri_map(mycpv, metadata, use=pkg.use.enabled) except portage.exception.InvalidDependString as e: file_size_str = "Unknown (%s)" % (e, ) del e else: try: mysum[0] = mf.getDistfilesSize(uri_map) except KeyError as e: file_size_str = "Unknown (missing " + \ "digest for %s)" % (e,) del e available = False for db in self._dbs: if db is not vardb and \ db.cpv_exists(mycpv): available = True if not myebuild and hasattr(db, "bintree"): myebuild = db.bintree.getname(mycpv) try: mysum[0] = os.stat(myebuild).st_size except OSError: myebuild = None break if myebuild and file_size_str is None: file_size_str = localized_size(mysum[0]) if self.verbose: if available: msg.append(" %s %s\n" % \ (darkgreen("Latest version available:"), myversion)) msg.append(" %s\n" % \ self.getInstallationStatus(mycat+'/'+mypkg)) if myebuild: msg.append(" %s %s\n" % \ (darkgreen("Size of files:"), file_size_str)) msg.append(" " + darkgreen("Homepage:") + \ " " + homepage + "\n") msg.append(" " + darkgreen("Description:") \ + " " + desc + "\n") msg.append(" " + darkgreen("License:") + \ " " + license + "\n\n") writemsg_stdout(''.join(msg), noiselevel=-1)
def testEbuildFetch(self): distfiles = { 'bar': b'bar\n', 'foo': b'foo\n', } ebuilds = { 'dev-libs/A-1': { 'EAPI': '7', 'RESTRICT': 'primaryuri', 'SRC_URI': '''{scheme}://{host}:{port}/distfiles/bar.txt -> bar {scheme}://{host}:{port}/distfiles/foo.txt -> foo''', }, } loop = SchedulerInterface(global_event_loop()) scheme = 'http' host = '127.0.0.1' content = {} for k, v in distfiles.items(): content['/distfiles/{}.txt'.format(k)] = v with AsyncHTTPServer(host, content, loop) as server: ebuilds_subst = {} for cpv, metadata in ebuilds.items(): metadata = metadata.copy() metadata['SRC_URI'] = metadata['SRC_URI'].format( scheme=scheme, host=host, port=server.server_port) ebuilds_subst[cpv] = metadata playground = ResolverPlayground(ebuilds=ebuilds_subst, distfiles=distfiles) ro_distdir = tempfile.mkdtemp() try: fetchcommand = portage.util.shlex_split(playground.settings['FETCHCOMMAND']) fetch_bin = portage.process.find_binary(fetchcommand[0]) if fetch_bin is None: self.skipTest('FETCHCOMMAND not found: {}'.format(playground.settings['FETCHCOMMAND'])) resumecommand = portage.util.shlex_split(playground.settings['RESUMECOMMAND']) resume_bin = portage.process.find_binary(resumecommand[0]) if resume_bin is None: self.skipTest('RESUMECOMMAND not found: {}'.format(playground.settings['RESUMECOMMAND'])) root_config = playground.trees[playground.eroot]['root_config'] portdb = root_config.trees["porttree"].dbapi settings = config(clone=playground.settings) # Tests only work with one ebuild at a time, so the config # pool only needs a single config instance. class config_pool: @staticmethod def allocate(): return settings @staticmethod def deallocate(settings): pass def async_fetch(pkg, ebuild_path): fetcher = EbuildFetcher(config_pool=config_pool, ebuild_path=ebuild_path, fetchonly=False, fetchall=True, pkg=pkg, scheduler=loop) fetcher.start() return fetcher.async_wait() for cpv in ebuilds: metadata = dict(zip(Package.metadata_keys, portdb.aux_get(cpv, Package.metadata_keys))) pkg = Package(built=False, cpv=cpv, installed=False, metadata=metadata, root_config=root_config, type_name='ebuild') settings.setcpv(pkg) ebuild_path = portdb.findname(pkg.cpv) portage.doebuild_environment(ebuild_path, 'fetch', settings=settings, db=portdb) # Test good files in DISTDIR for k in settings['AA'].split(): os.stat(os.path.join(settings['DISTDIR'], k)) self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0) for k in settings['AA'].split(): with open(os.path.join(settings['DISTDIR'], k), 'rb') as f: self.assertEqual(f.read(), distfiles[k]) # Test digestgen with fetch os.unlink(os.path.join(os.path.dirname(ebuild_path), 'Manifest')) for k in settings['AA'].split(): os.unlink(os.path.join(settings['DISTDIR'], k)) with ForkExecutor(loop=loop) as executor: self.assertTrue(bool(loop.run_until_complete( loop.run_in_executor(executor, functools.partial( digestgen, mysettings=settings, myportdb=portdb))))) for k in settings['AA'].split(): with open(os.path.join(settings['DISTDIR'], k), 'rb') as f: self.assertEqual(f.read(), distfiles[k]) # Test missing files in DISTDIR for k in settings['AA'].split(): os.unlink(os.path.join(settings['DISTDIR'], k)) self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0) for k in settings['AA'].split(): with open(os.path.join(settings['DISTDIR'], k), 'rb') as f: self.assertEqual(f.read(), distfiles[k]) # Test empty files in DISTDIR for k in settings['AA'].split(): file_path = os.path.join(settings['DISTDIR'], k) with open(file_path, 'wb') as f: pass self.assertEqual(os.stat(file_path).st_size, 0) self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0) for k in settings['AA'].split(): with open(os.path.join(settings['DISTDIR'], k), 'rb') as f: self.assertEqual(f.read(), distfiles[k]) # Test non-empty files containing null bytes in DISTDIR for k in settings['AA'].split(): file_path = os.path.join(settings['DISTDIR'], k) with open(file_path, 'wb') as f: f.write(len(distfiles[k]) * b'\0') self.assertEqual(os.stat(file_path).st_size, len(distfiles[k])) self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0) for k in settings['AA'].split(): with open(os.path.join(settings['DISTDIR'], k), 'rb') as f: self.assertEqual(f.read(), distfiles[k]) # Test PORTAGE_RO_DISTDIRS settings['PORTAGE_RO_DISTDIRS'] = '"{}"'.format(ro_distdir) orig_fetchcommand = settings['FETCHCOMMAND'] orig_resumecommand = settings['RESUMECOMMAND'] try: settings['FETCHCOMMAND'] = settings['RESUMECOMMAND'] = '' for k in settings['AA'].split(): file_path = os.path.join(settings['DISTDIR'], k) os.rename(file_path, os.path.join(ro_distdir, k)) self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0) for k in settings['AA'].split(): file_path = os.path.join(settings['DISTDIR'], k) self.assertTrue(os.path.islink(file_path)) with open(file_path, 'rb') as f: self.assertEqual(f.read(), distfiles[k]) os.unlink(file_path) finally: settings.pop('PORTAGE_RO_DISTDIRS') settings['FETCHCOMMAND'] = orig_fetchcommand settings['RESUMECOMMAND'] = orig_resumecommand # Test local filesystem in GENTOO_MIRRORS orig_mirrors = settings['GENTOO_MIRRORS'] orig_fetchcommand = settings['FETCHCOMMAND'] try: settings['GENTOO_MIRRORS'] = ro_distdir settings['FETCHCOMMAND'] = settings['RESUMECOMMAND'] = '' self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0) for k in settings['AA'].split(): with open(os.path.join(settings['DISTDIR'], k), 'rb') as f: self.assertEqual(f.read(), distfiles[k]) finally: settings['GENTOO_MIRRORS'] = orig_mirrors settings['FETCHCOMMAND'] = orig_fetchcommand settings['RESUMECOMMAND'] = orig_resumecommand # Test readonly DISTDIR orig_distdir_mode = os.stat(settings['DISTDIR']).st_mode try: os.chmod(settings['DISTDIR'], 0o555) self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0) for k in settings['AA'].split(): with open(os.path.join(settings['DISTDIR'], k), 'rb') as f: self.assertEqual(f.read(), distfiles[k]) finally: os.chmod(settings['DISTDIR'], orig_distdir_mode) # Test parallel-fetch mode settings['PORTAGE_PARALLEL_FETCHONLY'] = '1' try: self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0) for k in settings['AA'].split(): with open(os.path.join(settings['DISTDIR'], k), 'rb') as f: self.assertEqual(f.read(), distfiles[k]) for k in settings['AA'].split(): os.unlink(os.path.join(settings['DISTDIR'], k)) self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0) for k in settings['AA'].split(): with open(os.path.join(settings['DISTDIR'], k), 'rb') as f: self.assertEqual(f.read(), distfiles[k]) finally: settings.pop('PORTAGE_PARALLEL_FETCHONLY') # Test RESUMECOMMAND orig_resume_min_size = settings['PORTAGE_FETCH_RESUME_MIN_SIZE'] try: settings['PORTAGE_FETCH_RESUME_MIN_SIZE'] = '2' for k in settings['AA'].split(): file_path = os.path.join(settings['DISTDIR'], k) os.unlink(file_path) with open(file_path + _download_suffix, 'wb') as f: f.write(distfiles[k][:2]) self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0) for k in settings['AA'].split(): with open(os.path.join(settings['DISTDIR'], k), 'rb') as f: self.assertEqual(f.read(), distfiles[k]) finally: settings['PORTAGE_FETCH_RESUME_MIN_SIZE'] = orig_resume_min_size finally: shutil.rmtree(ro_distdir) playground.cleanup()
def _testEbuildFetch( self, loop, scheme, host, orig_distfiles, ebuilds, content, server, playground, ro_distdir, ): mirror_layouts = ( ( "[structure]", "0=filename-hash BLAKE2B 8", "1=flat", ), ( "[structure]", "1=filename-hash BLAKE2B 8", "0=flat", ), ( "[structure]", "0=content-hash SHA512 8:8:8", "1=flat", ), ) fetchcommand = portage.util.shlex_split( playground.settings["FETCHCOMMAND"]) fetch_bin = portage.process.find_binary(fetchcommand[0]) if fetch_bin is None: self.skipTest("FETCHCOMMAND not found: {}".format( playground.settings["FETCHCOMMAND"])) eubin = os.path.join(playground.eprefix, "usr", "bin") os.symlink(fetch_bin, os.path.join(eubin, os.path.basename(fetch_bin))) resumecommand = portage.util.shlex_split( playground.settings["RESUMECOMMAND"]) resume_bin = portage.process.find_binary(resumecommand[0]) if resume_bin is None: self.skipTest("RESUMECOMMAND not found: {}".format( playground.settings["RESUMECOMMAND"])) if resume_bin != fetch_bin: os.symlink(resume_bin, os.path.join(eubin, os.path.basename(resume_bin))) root_config = playground.trees[playground.eroot]["root_config"] portdb = root_config.trees["porttree"].dbapi def run_async(func, *args, **kwargs): with ForkExecutor(loop=loop) as executor: return loop.run_until_complete( loop.run_in_executor( executor, functools.partial(func, *args, **kwargs))) for layout_lines in mirror_layouts: settings = config(clone=playground.settings) layout_data = "".join("{}\n".format(line) for line in layout_lines) mirror_conf = MirrorLayoutConfig() mirror_conf.read_from_file(io.StringIO(layout_data)) layouts = mirror_conf.get_all_layouts() content["/distfiles/layout.conf"] = layout_data.encode("utf8") distfiles = {} for k, v in orig_distfiles.items(): filename = DistfileName( k, digests=dict((algo, checksum_str(v, hashname=algo)) for algo in MANIFEST2_HASH_DEFAULTS), ) distfiles[filename] = v # mirror path for layout in layouts: content["/distfiles/" + layout.get_path(filename)] = v # upstream path content["/distfiles/{}.txt".format(k)] = v shutil.rmtree(settings["DISTDIR"]) os.makedirs(settings["DISTDIR"]) with open(os.path.join(settings["DISTDIR"], "layout.conf"), "wt") as f: f.write(layout_data) if any( isinstance(layout, ContentHashLayout) for layout in layouts): content_db = os.path.join(playground.eprefix, "var/db/emirrordist/content.db") os.makedirs(os.path.dirname(content_db), exist_ok=True) try: os.unlink(content_db) except OSError: pass else: content_db = None # Demonstrate that fetch preserves a stale file in DISTDIR when no digests are given. foo_uri = { "foo": ("{scheme}://{host}:{port}/distfiles/foo".format( scheme=scheme, host=host, port=server.server_port), ) } foo_path = os.path.join(settings["DISTDIR"], "foo") foo_stale_content = b"stale content\n" with open(foo_path, "wb") as f: f.write(b"stale content\n") self.assertTrue( bool(run_async(fetch, foo_uri, settings, try_mirrors=False))) with open(foo_path, "rb") as f: self.assertEqual(f.read(), foo_stale_content) with open(foo_path, "rb") as f: self.assertNotEqual(f.read(), distfiles["foo"]) # Use force=True to update the stale file. self.assertTrue( bool( run_async(fetch, foo_uri, settings, try_mirrors=False, force=True))) with open(foo_path, "rb") as f: self.assertEqual(f.read(), distfiles["foo"]) # Test force=True with FEATURES=skiprocheck, using read-only DISTDIR. # FETCHCOMMAND is set to temporarily chmod +w DISTDIR. Note that # FETCHCOMMAND must perform atomic rename itself due to read-only # DISTDIR. with open(foo_path, "wb") as f: f.write(b"stale content\n") orig_fetchcommand = settings["FETCHCOMMAND"] orig_distdir_mode = os.stat(settings["DISTDIR"]).st_mode temp_fetchcommand = os.path.join(eubin, "fetchcommand") with open(temp_fetchcommand, "w") as f: f.write(""" set -e URI=$1 DISTDIR=$2 FILE=$3 trap 'chmod a-w "${DISTDIR}"' EXIT chmod ug+w "${DISTDIR}" %s mv -f "${DISTDIR}/${FILE}.__download__" "${DISTDIR}/${FILE}" """ % orig_fetchcommand.replace("${FILE}", "${FILE}.__download__")) settings[ "FETCHCOMMAND"] = '"%s" "%s" "${URI}" "${DISTDIR}" "${FILE}"' % ( BASH_BINARY, temp_fetchcommand, ) settings.features.add("skiprocheck") settings.features.remove("distlocks") os.chmod(settings["DISTDIR"], 0o555) try: self.assertTrue( bool( run_async(fetch, foo_uri, settings, try_mirrors=False, force=True))) finally: settings["FETCHCOMMAND"] = orig_fetchcommand os.chmod(settings["DISTDIR"], orig_distdir_mode) settings.features.remove("skiprocheck") settings.features.add("distlocks") os.unlink(temp_fetchcommand) with open(foo_path, "rb") as f: self.assertEqual(f.read(), distfiles["foo"]) # Test emirrordist invocation. emirrordist_cmd = ( portage._python_interpreter, "-b", "-Wd", os.path.join(self.bindir, "emirrordist"), "--distfiles", settings["DISTDIR"], "--config-root", settings["EPREFIX"], "--delete", "--repositories-configuration", settings.repositories.config_string(), "--repo", "test_repo", "--mirror", ) if content_db is not None: emirrordist_cmd = emirrordist_cmd + ( "--content-db", content_db, ) env = settings.environ() env["PYTHONPATH"] = ":".join( filter( None, [PORTAGE_PYM_PATH] + os.environ.get("PYTHONPATH", "").split(":"), )) for k in distfiles: try: os.unlink(os.path.join(settings["DISTDIR"], k)) except OSError: pass proc = loop.run_until_complete( asyncio.create_subprocess_exec(*emirrordist_cmd, env=env)) self.assertEqual(loop.run_until_complete(proc.wait()), 0) for k in distfiles: with open( os.path.join(settings["DISTDIR"], layouts[0].get_path(k)), "rb") as f: self.assertEqual(f.read(), distfiles[k]) if content_db is not None: loop.run_until_complete( self._test_content_db( emirrordist_cmd, env, layouts, content_db, distfiles, settings, portdb, )) # Tests only work with one ebuild at a time, so the config # pool only needs a single config instance. class config_pool: @staticmethod def allocate(): return settings @staticmethod def deallocate(settings): pass def async_fetch(pkg, ebuild_path): fetcher = EbuildFetcher( config_pool=config_pool, ebuild_path=ebuild_path, fetchonly=False, fetchall=True, pkg=pkg, scheduler=loop, ) fetcher.start() return fetcher.async_wait() for cpv in ebuilds: metadata = dict( zip( Package.metadata_keys, portdb.aux_get(cpv, Package.metadata_keys), )) pkg = Package( built=False, cpv=cpv, installed=False, metadata=metadata, root_config=root_config, type_name="ebuild", ) settings.setcpv(pkg) ebuild_path = portdb.findname(pkg.cpv) portage.doebuild_environment(ebuild_path, "fetch", settings=settings, db=portdb) # Test good files in DISTDIR for k in settings["AA"].split(): os.stat(os.path.join(settings["DISTDIR"], k)) self.assertEqual( loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0) for k in settings["AA"].split(): with open(os.path.join(settings["DISTDIR"], k), "rb") as f: self.assertEqual(f.read(), distfiles[k]) # Test digestgen with fetch os.unlink( os.path.join(os.path.dirname(ebuild_path), "Manifest")) for k in settings["AA"].split(): os.unlink(os.path.join(settings["DISTDIR"], k)) with ForkExecutor(loop=loop) as executor: self.assertTrue( bool( loop.run_until_complete( loop.run_in_executor( executor, functools.partial(digestgen, mysettings=settings, myportdb=portdb), )))) for k in settings["AA"].split(): with open(os.path.join(settings["DISTDIR"], k), "rb") as f: self.assertEqual(f.read(), distfiles[k]) # Test missing files in DISTDIR for k in settings["AA"].split(): os.unlink(os.path.join(settings["DISTDIR"], k)) self.assertEqual( loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0) for k in settings["AA"].split(): with open(os.path.join(settings["DISTDIR"], k), "rb") as f: self.assertEqual(f.read(), distfiles[k]) # Test empty files in DISTDIR for k in settings["AA"].split(): file_path = os.path.join(settings["DISTDIR"], k) with open(file_path, "wb") as f: pass self.assertEqual(os.stat(file_path).st_size, 0) self.assertEqual( loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0) for k in settings["AA"].split(): with open(os.path.join(settings["DISTDIR"], k), "rb") as f: self.assertEqual(f.read(), distfiles[k]) # Test non-empty files containing null bytes in DISTDIR for k in settings["AA"].split(): file_path = os.path.join(settings["DISTDIR"], k) with open(file_path, "wb") as f: f.write(len(distfiles[k]) * b"\0") self.assertEqual( os.stat(file_path).st_size, len(distfiles[k])) self.assertEqual( loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0) for k in settings["AA"].split(): with open(os.path.join(settings["DISTDIR"], k), "rb") as f: self.assertEqual(f.read(), distfiles[k]) # Test PORTAGE_RO_DISTDIRS settings["PORTAGE_RO_DISTDIRS"] = '"{}"'.format(ro_distdir) orig_fetchcommand = settings["FETCHCOMMAND"] orig_resumecommand = settings["RESUMECOMMAND"] try: settings["FETCHCOMMAND"] = settings["RESUMECOMMAND"] = "" for k in settings["AA"].split(): file_path = os.path.join(settings["DISTDIR"], k) os.rename(file_path, os.path.join(ro_distdir, k)) self.assertEqual( loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0) for k in settings["AA"].split(): file_path = os.path.join(settings["DISTDIR"], k) self.assertTrue(os.path.islink(file_path)) with open(file_path, "rb") as f: self.assertEqual(f.read(), distfiles[k]) os.unlink(file_path) finally: settings.pop("PORTAGE_RO_DISTDIRS") settings["FETCHCOMMAND"] = orig_fetchcommand settings["RESUMECOMMAND"] = orig_resumecommand # Test local filesystem in GENTOO_MIRRORS orig_mirrors = settings["GENTOO_MIRRORS"] orig_fetchcommand = settings["FETCHCOMMAND"] try: settings["GENTOO_MIRRORS"] = ro_distdir settings["FETCHCOMMAND"] = settings["RESUMECOMMAND"] = "" self.assertEqual( loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0) for k in settings["AA"].split(): with open(os.path.join(settings["DISTDIR"], k), "rb") as f: self.assertEqual(f.read(), distfiles[k]) finally: settings["GENTOO_MIRRORS"] = orig_mirrors settings["FETCHCOMMAND"] = orig_fetchcommand settings["RESUMECOMMAND"] = orig_resumecommand # Test readonly DISTDIR orig_distdir_mode = os.stat(settings["DISTDIR"]).st_mode try: os.chmod(settings["DISTDIR"], 0o555) self.assertEqual( loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0) for k in settings["AA"].split(): with open(os.path.join(settings["DISTDIR"], k), "rb") as f: self.assertEqual(f.read(), distfiles[k]) finally: os.chmod(settings["DISTDIR"], orig_distdir_mode) # Test parallel-fetch mode settings["PORTAGE_PARALLEL_FETCHONLY"] = "1" try: self.assertEqual( loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0) for k in settings["AA"].split(): with open(os.path.join(settings["DISTDIR"], k), "rb") as f: self.assertEqual(f.read(), distfiles[k]) for k in settings["AA"].split(): os.unlink(os.path.join(settings["DISTDIR"], k)) self.assertEqual( loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0) for k in settings["AA"].split(): with open(os.path.join(settings["DISTDIR"], k), "rb") as f: self.assertEqual(f.read(), distfiles[k]) finally: settings.pop("PORTAGE_PARALLEL_FETCHONLY") # Test RESUMECOMMAND orig_resume_min_size = settings[ "PORTAGE_FETCH_RESUME_MIN_SIZE"] try: settings["PORTAGE_FETCH_RESUME_MIN_SIZE"] = "2" for k in settings["AA"].split(): file_path = os.path.join(settings["DISTDIR"], k) os.unlink(file_path) with open(file_path + _download_suffix, "wb") as f: f.write(distfiles[k][:2]) self.assertEqual( loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0) for k in settings["AA"].split(): with open(os.path.join(settings["DISTDIR"], k), "rb") as f: self.assertEqual(f.read(), distfiles[k]) finally: settings[ "PORTAGE_FETCH_RESUME_MIN_SIZE"] = orig_resume_min_size # Test readonly DISTDIR + skiprocheck, with FETCHCOMMAND set to temporarily chmod DISTDIR orig_fetchcommand = settings["FETCHCOMMAND"] orig_distdir_mode = os.stat(settings["DISTDIR"]).st_mode for k in settings["AA"].split(): os.unlink(os.path.join(settings["DISTDIR"], k)) try: os.chmod(settings["DISTDIR"], 0o555) settings["FETCHCOMMAND"] = ( '"%s" -c "chmod ug+w \\"${DISTDIR}\\"; %s; status=\\$?; chmod a-w \\"${DISTDIR}\\"; exit \\$status"' % (BASH_BINARY, orig_fetchcommand.replace('"', '\\"'))) settings.features.add("skiprocheck") settings.features.remove("distlocks") self.assertEqual( loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0) finally: settings["FETCHCOMMAND"] = orig_fetchcommand os.chmod(settings["DISTDIR"], orig_distdir_mode) settings.features.remove("skiprocheck") settings.features.add("distlocks")
def testDoebuildSpawn(self): ebuild_body = textwrap.dedent(""" pkg_nofetch() { : ; } """) ebuilds = { 'sys-apps/portage-2.1': { 'EAPI': '2', 'IUSE': 'build doc epydoc python3 selinux', 'KEYWORDS': 'x86', 'LICENSE': 'GPL-2', 'RDEPEND': '>=app-shells/bash-3.2_p17 >=dev-lang/python-2.6', 'SLOT': '0', "MISC_CONTENT": ebuild_body, } } playground = ResolverPlayground(ebuilds=ebuilds) try: root_config = playground.trees[playground.eroot]['root_config'] portdb = root_config.trees["porttree"].dbapi settings = config(clone=playground.settings) if "__PORTAGE_TEST_HARDLINK_LOCKS" in os.environ: settings["__PORTAGE_TEST_HARDLINK_LOCKS"] = \ os.environ["__PORTAGE_TEST_HARDLINK_LOCKS"] settings.backup_changes("__PORTAGE_TEST_HARDLINK_LOCKS") cpv = 'sys-apps/portage-2.1' metadata = dict( zip(Package.metadata_keys, portdb.aux_get(cpv, Package.metadata_keys))) pkg = Package(built=False, cpv=cpv, installed=False, metadata=metadata, root_config=root_config, type_name='ebuild') settings.setcpv(pkg) settings['PORTAGE_PYTHON'] = _python_interpreter settings['PORTAGE_BUILDDIR'] = os.path.join( settings['PORTAGE_TMPDIR'], cpv) settings['T'] = os.path.join(settings['PORTAGE_BUILDDIR'], 'temp') for x in ('PORTAGE_BUILDDIR', 'T'): os.makedirs(settings[x]) # Create a fake environment, to pretend as if the ebuild # has been sourced already. open(os.path.join(settings['T'], 'environment'), 'wb').close() scheduler = SchedulerInterface(global_event_loop()) for phase in ('_internal_test', ): # Test EbuildSpawnProcess by calling doebuild.spawn() with # returnpid=False. This case is no longer used by portage # internals since EbuildPhase is used instead and that passes # returnpid=True to doebuild.spawn(). rval = doebuild_spawn("%s %s" % (_shell_quote( os.path.join(settings["PORTAGE_BIN_PATH"], os.path.basename(EBUILD_SH_BINARY))), phase), settings, free=1) self.assertEqual(rval, os.EX_OK) ebuild_phase = EbuildPhase(background=False, phase=phase, scheduler=scheduler, settings=settings) ebuild_phase.start() ebuild_phase.wait() self.assertEqual(ebuild_phase.returncode, os.EX_OK) ebuild_phase = MiscFunctionsProcess(background=False, commands=['success_hooks'], scheduler=scheduler, settings=settings) ebuild_phase.start() ebuild_phase.wait() self.assertEqual(ebuild_phase.returncode, os.EX_OK) spawn_nofetch(portdb, portdb.findname(cpv), settings=settings) finally: playground.cleanup()