def dodir(path): try: os.makedirs(path, 0o755) except OSError: if not os.path.isdir(path): raise os.chmod(path, 0o755)
def rcs_archive(archive, curconf, newconf, mrgconf): """Archive existing config in rcs (on trunk). Then, if mrgconf is specified and an old branch version exists, merge the user's changes and the distributed changes and put the result into mrgconf. Lastly, if newconf was specified, leave it in the archive dir with a .dist.new suffix along with the last 1.1.1 branch version with a .dist suffix.""" try: os.makedirs(os.path.dirname(archive)) except OSError: pass try: curconf_st = os.lstat(curconf) except OSError: curconf_st = None if curconf_st is not None and \ (stat.S_ISREG(curconf_st.st_mode) or stat.S_ISLNK(curconf_st.st_mode)): _archive_copy(curconf_st, curconf, archive) if os.path.lexists(archive + ',v'): os.system(RCS_LOCK + ' ' + archive) os.system(RCS_PUT + ' ' + archive) ret = 0 mystat = None if newconf: try: mystat = os.lstat(newconf) except OSError: pass if mystat is not None and \ (stat.S_ISREG(mystat.st_mode) or stat.S_ISLNK(mystat.st_mode)): os.system(RCS_GET + ' -r' + RCS_BRANCH + ' ' + archive) has_branch = os.path.lexists(archive) if has_branch: os.rename(archive, archive + '.dist') _archive_copy(mystat, newconf, archive) if has_branch: if mrgconf and os.path.isfile(archive) and \ os.path.isfile(mrgconf): # This puts the results of the merge into mrgconf. ret = os.system(RCS_MERGE % (archive, mrgconf)) os.chmod(mrgconf, mystat.st_mode) os.chown(mrgconf, mystat.st_uid, mystat.st_gid) os.rename(archive, archive + '.dist.new') return ret
def file_archive(archive, curconf, newconf, mrgconf): """Archive existing config to the archive-dir, bumping old versions out of the way into .# versions (log-rotate style). Then, if mrgconf was specified and there is a .dist version, merge the user's changes and the distributed changes and put the result into mrgconf. Lastly, if newconf was specified, archive it as a .dist.new version (which gets moved to the .dist version at the end of the processing).""" _file_archive_ensure_dir(os.path.dirname(archive)) # Archive the current config file if it isn't already saved if (os.path.lexists(archive) and len( diffstatusoutput_mixed("diff -aq '%s' '%s'", curconf, archive)[1]) != 0): _file_archive_rotate(archive) try: curconf_st = os.lstat(curconf) except OSError: curconf_st = None if curconf_st is not None and \ (stat.S_ISREG(curconf_st.st_mode) or stat.S_ISLNK(curconf_st.st_mode)): _archive_copy(curconf_st, curconf, archive) mystat = None if newconf: try: mystat = os.lstat(newconf) except OSError: pass if mystat is not None and \ (stat.S_ISREG(mystat.st_mode) or stat.S_ISLNK(mystat.st_mode)): # Save off new config file in the archive dir with .dist.new suffix newconf_archive = archive + '.dist.new' if os.path.isdir( newconf_archive) and not os.path.islink(newconf_archive): _file_archive_rotate(newconf_archive) _archive_copy(mystat, newconf, newconf_archive) ret = 0 if mrgconf and os.path.isfile(curconf) and \ os.path.isfile(newconf) and \ os.path.isfile(archive + '.dist'): # This puts the results of the merge into mrgconf. ret = os.system(DIFF3_MERGE % (curconf, archive + '.dist', newconf, mrgconf)) os.chmod(mrgconf, mystat.st_mode) os.chown(mrgconf, mystat.st_uid, mystat.st_gid) return ret
def file_archive(archive, curconf, newconf, mrgconf): """Archive existing config to the archive-dir, bumping old versions out of the way into .# versions (log-rotate style). Then, if mrgconf was specified and there is a .dist version, merge the user's changes and the distributed changes and put the result into mrgconf. Lastly, if newconf was specified, archive it as a .dist.new version (which gets moved to the .dist version at the end of the processing).""" _file_archive_ensure_dir(os.path.dirname(archive)) # Archive the current config file if it isn't already saved if (os.path.lexists(archive) and len(diffstatusoutput_mixed( "diff -aq '%s' '%s'", curconf, archive)[1]) != 0): _file_archive_rotate(archive) try: curconf_st = os.lstat(curconf) except OSError: curconf_st = None if curconf_st is not None and \ (stat.S_ISREG(curconf_st.st_mode) or stat.S_ISLNK(curconf_st.st_mode)): _archive_copy(curconf_st, curconf, archive) mystat = None if newconf: try: mystat = os.lstat(newconf) except OSError: pass if mystat is not None and \ (stat.S_ISREG(mystat.st_mode) or stat.S_ISLNK(mystat.st_mode)): # Save off new config file in the archive dir with .dist.new suffix newconf_archive = archive + '.dist.new' if os.path.isdir(newconf_archive ) and not os.path.islink(newconf_archive): _file_archive_rotate(newconf_archive) _archive_copy(mystat, newconf, newconf_archive) ret = 0 if mrgconf and os.path.isfile(curconf) and \ os.path.isfile(newconf) and \ os.path.isfile(archive + '.dist'): # This puts the results of the merge into mrgconf. ret = os.system(DIFF3_MERGE % (curconf, archive + '.dist', newconf, mrgconf)) os.chmod(mrgconf, mystat.st_mode) os.chown(mrgconf, mystat.st_uid, mystat.st_gid) return ret
def read_config(mandatory_opts): eprefix = portage.settings["EPREFIX"] if portage._not_installed: config_path = os.path.join(portage.PORTAGE_BASE_PATH, "cnf", "dispatch-conf.conf") else: config_path = os.path.join(eprefix or os.sep, "etc/dispatch-conf.conf") loader = KeyValuePairFileLoader(config_path, None) opts, _errors = loader.load() if not opts: print( _("dispatch-conf: Error reading {}; fatal").format(config_path), file=sys.stderr, ) sys.exit(1) # Handle quote removal here, since KeyValuePairFileLoader doesn't do that. quotes = "\"'" for k, v in opts.items(): if v[:1] in quotes and v[:1] == v[-1:]: opts[k] = v[1:-1] for key in mandatory_opts: if key not in opts: if key == "merge": opts[ "merge"] = "sdiff --suppress-common-lines --output='%s' '%s' '%s'" else: print( _('dispatch-conf: Missing option "%s" in /etc/dispatch-conf.conf; fatal' ) % (key, ), file=sys.stderr, ) # archive-dir supports ${EPREFIX} expansion, in order to avoid hardcoding variables = {"EPREFIX": eprefix} opts["archive-dir"] = varexpand(opts["archive-dir"], mydict=variables) if not os.path.exists(opts["archive-dir"]): os.mkdir(opts["archive-dir"]) # Use restrictive permissions by default, in order to protect # against vulnerabilities (like bug #315603 involving rcs). os.chmod(opts["archive-dir"], 0o700) elif not os.path.isdir(opts["archive-dir"]): print( _("dispatch-conf: Config archive dir [%s] must exist; fatal") % (opts["archive-dir"], ), file=sys.stderr, ) sys.exit(1) return opts
def read_config(mandatory_opts): eprefix = portage.settings["EPREFIX"] if portage._not_installed: config_path = os.path.join(portage.PORTAGE_BASE_PATH, "cnf", "dispatch-conf.conf") else: config_path = os.path.join(eprefix or os.sep, "etc/dispatch-conf.conf") loader = KeyValuePairFileLoader(config_path, None) opts, _errors = loader.load() if not opts: print(_('dispatch-conf: Error reading /etc/dispatch-conf.conf; fatal'), file=sys.stderr) sys.exit(1) # Handle quote removal here, since KeyValuePairFileLoader doesn't do that. quotes = "\"'" for k, v in opts.items(): if v[:1] in quotes and v[:1] == v[-1:]: opts[k] = v[1:-1] for key in mandatory_opts: if key not in opts: if key == "merge": opts["merge"] = "sdiff --suppress-common-lines --output='%s' '%s' '%s'" else: print(_('dispatch-conf: Missing option "%s" in /etc/dispatch-conf.conf; fatal') % (key,), file=sys.stderr) # archive-dir supports ${EPREFIX} expansion, in order to avoid hardcoding variables = {"EPREFIX": eprefix} opts['archive-dir'] = varexpand(opts['archive-dir'], mydict=variables) if not os.path.exists(opts['archive-dir']): os.mkdir(opts['archive-dir']) # Use restrictive permissions by default, in order to protect # against vulnerabilities (like bug #315603 involving rcs). os.chmod(opts['archive-dir'], 0o700) elif not os.path.isdir(opts['archive-dir']): print(_('dispatch-conf: Config archive dir [%s] must exist; fatal') % (opts['archive-dir'],), file=sys.stderr) sys.exit(1) return opts
def dofile(src, dst): shutil.copy(src, dst) os.chmod(dst, 0o644)
def testEbuildFetch(self): user_config = { "make.conf": ( 'GENTOO_MIRRORS="{scheme}://{host}:{port}"', ), } distfiles = { 'bar': b'bar\n', 'foo': b'foo\n', } ebuilds = { 'dev-libs/A-1': { 'EAPI': '7', 'SRC_URI': '''{scheme}://{host}:{port}/distfiles/bar.txt -> bar {scheme}://{host}:{port}/distfiles/foo.txt -> foo''', }, } loop = SchedulerInterface(global_event_loop()) def run_async(func, *args, **kwargs): with ForkExecutor(loop=loop) as executor: return loop.run_until_complete(loop.run_in_executor(executor, functools.partial(func, *args, **kwargs))) scheme = 'http' host = '127.0.0.1' content = {} content['/distfiles/layout.conf'] = b'[structure]\n0=flat\n' for k, v in distfiles.items(): # mirror path content['/distfiles/{}'.format(k)] = v # upstream path content['/distfiles/{}.txt'.format(k)] = v with AsyncHTTPServer(host, content, loop) as server: ebuilds_subst = {} for cpv, metadata in ebuilds.items(): metadata = metadata.copy() metadata['SRC_URI'] = metadata['SRC_URI'].format( scheme=scheme, host=host, port=server.server_port) ebuilds_subst[cpv] = metadata user_config_subst = user_config.copy() for configname, configdata in user_config.items(): configdata_sub = [] for line in configdata: configdata_sub.append(line.format( scheme=scheme, host=host, port=server.server_port)) user_config_subst[configname] = tuple(configdata_sub) playground = ResolverPlayground(ebuilds=ebuilds_subst, distfiles=distfiles, user_config=user_config_subst) ro_distdir = tempfile.mkdtemp() eubin = os.path.join(playground.eprefix, "usr", "bin") try: fetchcommand = portage.util.shlex_split(playground.settings['FETCHCOMMAND']) fetch_bin = portage.process.find_binary(fetchcommand[0]) if fetch_bin is None: self.skipTest('FETCHCOMMAND not found: {}'.format(playground.settings['FETCHCOMMAND'])) os.symlink(fetch_bin, os.path.join(eubin, os.path.basename(fetch_bin))) resumecommand = portage.util.shlex_split(playground.settings['RESUMECOMMAND']) resume_bin = portage.process.find_binary(resumecommand[0]) if resume_bin is None: self.skipTest('RESUMECOMMAND not found: {}'.format(playground.settings['RESUMECOMMAND'])) if resume_bin != fetch_bin: os.symlink(resume_bin, os.path.join(eubin, os.path.basename(resume_bin))) root_config = playground.trees[playground.eroot]['root_config'] portdb = root_config.trees["porttree"].dbapi settings = config(clone=playground.settings) # Demonstrate that fetch preserves a stale file in DISTDIR when no digests are given. foo_uri = {'foo': ('{scheme}://{host}:{port}/distfiles/foo'.format(scheme=scheme, host=host, port=server.server_port),)} foo_path = os.path.join(settings['DISTDIR'], 'foo') foo_stale_content = b'stale content\n' with open(foo_path, 'wb') as f: f.write(b'stale content\n') self.assertTrue(bool(run_async(fetch, foo_uri, settings, try_mirrors=False))) with open(foo_path, 'rb') as f: self.assertEqual(f.read(), foo_stale_content) with open(foo_path, 'rb') as f: self.assertNotEqual(f.read(), distfiles['foo']) # Use force=True to update the stale file. self.assertTrue(bool(run_async(fetch, foo_uri, settings, try_mirrors=False, force=True))) with open(foo_path, 'rb') as f: self.assertEqual(f.read(), distfiles['foo']) # Test force=True with FEATURES=skiprocheck, using read-only DISTDIR. # FETCHCOMMAND is set to temporarily chmod +w DISTDIR. Note that # FETCHCOMMAND must perform atomic rename itself due to read-only # DISTDIR. with open(foo_path, 'wb') as f: f.write(b'stale content\n') orig_fetchcommand = settings['FETCHCOMMAND'] orig_distdir_mode = os.stat(settings['DISTDIR']).st_mode temp_fetchcommand = os.path.join(eubin, 'fetchcommand') with open(temp_fetchcommand, 'w') as f: f.write(""" set -e URI=$1 DISTDIR=$2 FILE=$3 trap 'chmod a-w "${DISTDIR}"' EXIT chmod ug+w "${DISTDIR}" %s mv -f "${DISTDIR}/${FILE}.__download__" "${DISTDIR}/${FILE}" """ % orig_fetchcommand.replace('${FILE}', '${FILE}.__download__')) settings['FETCHCOMMAND'] = '"%s" "%s" "${URI}" "${DISTDIR}" "${FILE}"' % (BASH_BINARY, temp_fetchcommand) settings.features.add('skiprocheck') settings.features.remove('distlocks') os.chmod(settings['DISTDIR'], 0o555) try: self.assertTrue(bool(run_async(fetch, foo_uri, settings, try_mirrors=False, force=True))) finally: settings['FETCHCOMMAND'] = orig_fetchcommand os.chmod(settings['DISTDIR'], orig_distdir_mode) settings.features.remove('skiprocheck') settings.features.add('distlocks') os.unlink(temp_fetchcommand) with open(foo_path, 'rb') as f: self.assertEqual(f.read(), distfiles['foo']) # Test emirrordist invocation. emirrordist_cmd = (portage._python_interpreter, '-b', '-Wd', os.path.join(self.bindir, 'emirrordist'), '--distfiles', settings['DISTDIR'], '--config-root', settings['EPREFIX'], '--repositories-configuration', settings.repositories.config_string(), '--repo', 'test_repo', '--mirror') env = settings.environ() env['PYTHONPATH'] = ':'.join( filter(None, [PORTAGE_PYM_PATH] + os.environ.get('PYTHONPATH', '').split(':'))) for k in distfiles: os.unlink(os.path.join(settings['DISTDIR'], k)) proc = loop.run_until_complete(asyncio.create_subprocess_exec(*emirrordist_cmd, env=env)) self.assertEqual(loop.run_until_complete(proc.wait()), 0) for k in distfiles: with open(os.path.join(settings['DISTDIR'], k), 'rb') as f: self.assertEqual(f.read(), distfiles[k]) # Tests only work with one ebuild at a time, so the config # pool only needs a single config instance. class config_pool: @staticmethod def allocate(): return settings @staticmethod def deallocate(settings): pass def async_fetch(pkg, ebuild_path): fetcher = EbuildFetcher(config_pool=config_pool, ebuild_path=ebuild_path, fetchonly=False, fetchall=True, pkg=pkg, scheduler=loop) fetcher.start() return fetcher.async_wait() for cpv in ebuilds: metadata = dict(zip(Package.metadata_keys, portdb.aux_get(cpv, Package.metadata_keys))) pkg = Package(built=False, cpv=cpv, installed=False, metadata=metadata, root_config=root_config, type_name='ebuild') settings.setcpv(pkg) ebuild_path = portdb.findname(pkg.cpv) portage.doebuild_environment(ebuild_path, 'fetch', settings=settings, db=portdb) # Test good files in DISTDIR for k in settings['AA'].split(): os.stat(os.path.join(settings['DISTDIR'], k)) self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0) for k in settings['AA'].split(): with open(os.path.join(settings['DISTDIR'], k), 'rb') as f: self.assertEqual(f.read(), distfiles[k]) # Test digestgen with fetch os.unlink(os.path.join(os.path.dirname(ebuild_path), 'Manifest')) for k in settings['AA'].split(): os.unlink(os.path.join(settings['DISTDIR'], k)) with ForkExecutor(loop=loop) as executor: self.assertTrue(bool(loop.run_until_complete( loop.run_in_executor(executor, functools.partial( digestgen, mysettings=settings, myportdb=portdb))))) for k in settings['AA'].split(): with open(os.path.join(settings['DISTDIR'], k), 'rb') as f: self.assertEqual(f.read(), distfiles[k]) # Test missing files in DISTDIR for k in settings['AA'].split(): os.unlink(os.path.join(settings['DISTDIR'], k)) self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0) for k in settings['AA'].split(): with open(os.path.join(settings['DISTDIR'], k), 'rb') as f: self.assertEqual(f.read(), distfiles[k]) # Test empty files in DISTDIR for k in settings['AA'].split(): file_path = os.path.join(settings['DISTDIR'], k) with open(file_path, 'wb') as f: pass self.assertEqual(os.stat(file_path).st_size, 0) self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0) for k in settings['AA'].split(): with open(os.path.join(settings['DISTDIR'], k), 'rb') as f: self.assertEqual(f.read(), distfiles[k]) # Test non-empty files containing null bytes in DISTDIR for k in settings['AA'].split(): file_path = os.path.join(settings['DISTDIR'], k) with open(file_path, 'wb') as f: f.write(len(distfiles[k]) * b'\0') self.assertEqual(os.stat(file_path).st_size, len(distfiles[k])) self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0) for k in settings['AA'].split(): with open(os.path.join(settings['DISTDIR'], k), 'rb') as f: self.assertEqual(f.read(), distfiles[k]) # Test PORTAGE_RO_DISTDIRS settings['PORTAGE_RO_DISTDIRS'] = '"{}"'.format(ro_distdir) orig_fetchcommand = settings['FETCHCOMMAND'] orig_resumecommand = settings['RESUMECOMMAND'] try: settings['FETCHCOMMAND'] = settings['RESUMECOMMAND'] = '' for k in settings['AA'].split(): file_path = os.path.join(settings['DISTDIR'], k) os.rename(file_path, os.path.join(ro_distdir, k)) self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0) for k in settings['AA'].split(): file_path = os.path.join(settings['DISTDIR'], k) self.assertTrue(os.path.islink(file_path)) with open(file_path, 'rb') as f: self.assertEqual(f.read(), distfiles[k]) os.unlink(file_path) finally: settings.pop('PORTAGE_RO_DISTDIRS') settings['FETCHCOMMAND'] = orig_fetchcommand settings['RESUMECOMMAND'] = orig_resumecommand # Test local filesystem in GENTOO_MIRRORS orig_mirrors = settings['GENTOO_MIRRORS'] orig_fetchcommand = settings['FETCHCOMMAND'] try: settings['GENTOO_MIRRORS'] = ro_distdir settings['FETCHCOMMAND'] = settings['RESUMECOMMAND'] = '' self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0) for k in settings['AA'].split(): with open(os.path.join(settings['DISTDIR'], k), 'rb') as f: self.assertEqual(f.read(), distfiles[k]) finally: settings['GENTOO_MIRRORS'] = orig_mirrors settings['FETCHCOMMAND'] = orig_fetchcommand settings['RESUMECOMMAND'] = orig_resumecommand # Test readonly DISTDIR orig_distdir_mode = os.stat(settings['DISTDIR']).st_mode try: os.chmod(settings['DISTDIR'], 0o555) self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0) for k in settings['AA'].split(): with open(os.path.join(settings['DISTDIR'], k), 'rb') as f: self.assertEqual(f.read(), distfiles[k]) finally: os.chmod(settings['DISTDIR'], orig_distdir_mode) # Test parallel-fetch mode settings['PORTAGE_PARALLEL_FETCHONLY'] = '1' try: self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0) for k in settings['AA'].split(): with open(os.path.join(settings['DISTDIR'], k), 'rb') as f: self.assertEqual(f.read(), distfiles[k]) for k in settings['AA'].split(): os.unlink(os.path.join(settings['DISTDIR'], k)) self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0) for k in settings['AA'].split(): with open(os.path.join(settings['DISTDIR'], k), 'rb') as f: self.assertEqual(f.read(), distfiles[k]) finally: settings.pop('PORTAGE_PARALLEL_FETCHONLY') # Test RESUMECOMMAND orig_resume_min_size = settings['PORTAGE_FETCH_RESUME_MIN_SIZE'] try: settings['PORTAGE_FETCH_RESUME_MIN_SIZE'] = '2' for k in settings['AA'].split(): file_path = os.path.join(settings['DISTDIR'], k) os.unlink(file_path) with open(file_path + _download_suffix, 'wb') as f: f.write(distfiles[k][:2]) self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0) for k in settings['AA'].split(): with open(os.path.join(settings['DISTDIR'], k), 'rb') as f: self.assertEqual(f.read(), distfiles[k]) finally: settings['PORTAGE_FETCH_RESUME_MIN_SIZE'] = orig_resume_min_size # Test readonly DISTDIR + skiprocheck, with FETCHCOMMAND set to temporarily chmod DISTDIR orig_fetchcommand = settings['FETCHCOMMAND'] orig_distdir_mode = os.stat(settings['DISTDIR']).st_mode for k in settings['AA'].split(): os.unlink(os.path.join(settings['DISTDIR'], k)) try: os.chmod(settings['DISTDIR'], 0o555) settings['FETCHCOMMAND'] = '"%s" -c "chmod ug+w \\"${DISTDIR}\\"; %s; status=\\$?; chmod a-w \\"${DISTDIR}\\"; exit \\$status"' % (BASH_BINARY, orig_fetchcommand.replace('"', '\\"')) settings.features.add('skiprocheck') settings.features.remove('distlocks') self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0) finally: settings['FETCHCOMMAND'] = orig_fetchcommand os.chmod(settings['DISTDIR'], orig_distdir_mode) settings.features.remove('skiprocheck') settings.features.add('distlocks') finally: shutil.rmtree(ro_distdir) playground.cleanup()
def apply_permissions(filename, uid=-1, gid=-1, mode=-1, mask=-1, stat_cached=None, follow_links=True): """Apply user, group, and mode bits to a file if the existing bits do not already match. The default behavior is to force an exact match of mode bits. When mask=0 is specified, mode bits on the target file are allowed to be a superset of the mode argument (via logical OR). When mask>0, the mode bits that the target file is allowed to have are restricted via logical XOR. Returns True if the permissions were modified and False otherwise.""" modified = False if stat_cached is None: try: if follow_links: stat_cached = os.stat(filename) else: stat_cached = os.lstat(filename) except OSError as oe: func_call = "stat('%s')" % filename if oe.errno == errno.EPERM: raise OperationNotPermitted(func_call) elif oe.errno == errno.EACCES: raise PermissionDenied(func_call) elif oe.errno == errno.ENOENT: raise FileNotFound(filename) else: raise if (uid != -1 and uid != stat_cached.st_uid) or \ (gid != -1 and gid != stat_cached.st_gid): try: if follow_links: os.chown(filename, uid, gid) else: portage.data.lchown(filename, uid, gid) modified = True except OSError as oe: func_call = "chown('%s', %i, %i)" % (filename, uid, gid) if oe.errno == errno.EPERM: raise OperationNotPermitted(func_call) elif oe.errno == errno.EACCES: raise PermissionDenied(func_call) elif oe.errno == errno.EROFS: raise ReadOnlyFileSystem(func_call) elif oe.errno == errno.ENOENT: raise FileNotFound(filename) else: raise new_mode = -1 st_mode = stat_cached.st_mode & 0o7777 # protect from unwanted bits if mask >= 0: if mode == -1: mode = 0 # Don't add any mode bits when mode is unspecified. else: mode = mode & 0o7777 if (mode & st_mode != mode) or \ ((mask ^ st_mode) & st_mode != st_mode): new_mode = mode | st_mode new_mode = (mask ^ new_mode) & new_mode elif mode != -1: mode = mode & 0o7777 # protect from unwanted bits if mode != st_mode: new_mode = mode # The chown system call may clear S_ISUID and S_ISGID # bits, so those bits are restored if necessary. if modified and new_mode == -1 and \ (st_mode & stat.S_ISUID or st_mode & stat.S_ISGID): if mode == -1: new_mode = st_mode else: mode = mode & 0o7777 if mask >= 0: new_mode = mode | st_mode new_mode = (mask ^ new_mode) & new_mode else: new_mode = mode if not (new_mode & stat.S_ISUID or new_mode & stat.S_ISGID): new_mode = -1 if not follow_links and stat.S_ISLNK(stat_cached.st_mode): # Mode doesn't matter for symlinks. new_mode = -1 if new_mode != -1: try: os.chmod(filename, new_mode) modified = True except OSError as oe: func_call = "chmod('%s', %s)" % (filename, oct(new_mode)) if oe.errno == errno.EPERM: raise OperationNotPermitted(func_call) elif oe.errno == errno.EACCES: raise PermissionDenied(func_call) elif oe.errno == errno.EROFS: raise ReadOnlyFileSystem(func_call) elif oe.errno == errno.ENOENT: raise FileNotFound(filename) raise return modified
def _testEbuildFetch( self, loop, scheme, host, orig_distfiles, ebuilds, content, server, playground, ro_distdir, ): mirror_layouts = ( ( "[structure]", "0=filename-hash BLAKE2B 8", "1=flat", ), ( "[structure]", "1=filename-hash BLAKE2B 8", "0=flat", ), ( "[structure]", "0=content-hash SHA512 8:8:8", "1=flat", ), ) fetchcommand = portage.util.shlex_split( playground.settings["FETCHCOMMAND"]) fetch_bin = portage.process.find_binary(fetchcommand[0]) if fetch_bin is None: self.skipTest("FETCHCOMMAND not found: {}".format( playground.settings["FETCHCOMMAND"])) eubin = os.path.join(playground.eprefix, "usr", "bin") os.symlink(fetch_bin, os.path.join(eubin, os.path.basename(fetch_bin))) resumecommand = portage.util.shlex_split( playground.settings["RESUMECOMMAND"]) resume_bin = portage.process.find_binary(resumecommand[0]) if resume_bin is None: self.skipTest("RESUMECOMMAND not found: {}".format( playground.settings["RESUMECOMMAND"])) if resume_bin != fetch_bin: os.symlink(resume_bin, os.path.join(eubin, os.path.basename(resume_bin))) root_config = playground.trees[playground.eroot]["root_config"] portdb = root_config.trees["porttree"].dbapi def run_async(func, *args, **kwargs): with ForkExecutor(loop=loop) as executor: return loop.run_until_complete( loop.run_in_executor( executor, functools.partial(func, *args, **kwargs))) for layout_lines in mirror_layouts: settings = config(clone=playground.settings) layout_data = "".join("{}\n".format(line) for line in layout_lines) mirror_conf = MirrorLayoutConfig() mirror_conf.read_from_file(io.StringIO(layout_data)) layouts = mirror_conf.get_all_layouts() content["/distfiles/layout.conf"] = layout_data.encode("utf8") distfiles = {} for k, v in orig_distfiles.items(): filename = DistfileName( k, digests=dict((algo, checksum_str(v, hashname=algo)) for algo in MANIFEST2_HASH_DEFAULTS), ) distfiles[filename] = v # mirror path for layout in layouts: content["/distfiles/" + layout.get_path(filename)] = v # upstream path content["/distfiles/{}.txt".format(k)] = v shutil.rmtree(settings["DISTDIR"]) os.makedirs(settings["DISTDIR"]) with open(os.path.join(settings["DISTDIR"], "layout.conf"), "wt") as f: f.write(layout_data) if any( isinstance(layout, ContentHashLayout) for layout in layouts): content_db = os.path.join(playground.eprefix, "var/db/emirrordist/content.db") os.makedirs(os.path.dirname(content_db), exist_ok=True) try: os.unlink(content_db) except OSError: pass else: content_db = None # Demonstrate that fetch preserves a stale file in DISTDIR when no digests are given. foo_uri = { "foo": ("{scheme}://{host}:{port}/distfiles/foo".format( scheme=scheme, host=host, port=server.server_port), ) } foo_path = os.path.join(settings["DISTDIR"], "foo") foo_stale_content = b"stale content\n" with open(foo_path, "wb") as f: f.write(b"stale content\n") self.assertTrue( bool(run_async(fetch, foo_uri, settings, try_mirrors=False))) with open(foo_path, "rb") as f: self.assertEqual(f.read(), foo_stale_content) with open(foo_path, "rb") as f: self.assertNotEqual(f.read(), distfiles["foo"]) # Use force=True to update the stale file. self.assertTrue( bool( run_async(fetch, foo_uri, settings, try_mirrors=False, force=True))) with open(foo_path, "rb") as f: self.assertEqual(f.read(), distfiles["foo"]) # Test force=True with FEATURES=skiprocheck, using read-only DISTDIR. # FETCHCOMMAND is set to temporarily chmod +w DISTDIR. Note that # FETCHCOMMAND must perform atomic rename itself due to read-only # DISTDIR. with open(foo_path, "wb") as f: f.write(b"stale content\n") orig_fetchcommand = settings["FETCHCOMMAND"] orig_distdir_mode = os.stat(settings["DISTDIR"]).st_mode temp_fetchcommand = os.path.join(eubin, "fetchcommand") with open(temp_fetchcommand, "w") as f: f.write(""" set -e URI=$1 DISTDIR=$2 FILE=$3 trap 'chmod a-w "${DISTDIR}"' EXIT chmod ug+w "${DISTDIR}" %s mv -f "${DISTDIR}/${FILE}.__download__" "${DISTDIR}/${FILE}" """ % orig_fetchcommand.replace("${FILE}", "${FILE}.__download__")) settings[ "FETCHCOMMAND"] = '"%s" "%s" "${URI}" "${DISTDIR}" "${FILE}"' % ( BASH_BINARY, temp_fetchcommand, ) settings.features.add("skiprocheck") settings.features.remove("distlocks") os.chmod(settings["DISTDIR"], 0o555) try: self.assertTrue( bool( run_async(fetch, foo_uri, settings, try_mirrors=False, force=True))) finally: settings["FETCHCOMMAND"] = orig_fetchcommand os.chmod(settings["DISTDIR"], orig_distdir_mode) settings.features.remove("skiprocheck") settings.features.add("distlocks") os.unlink(temp_fetchcommand) with open(foo_path, "rb") as f: self.assertEqual(f.read(), distfiles["foo"]) # Test emirrordist invocation. emirrordist_cmd = ( portage._python_interpreter, "-b", "-Wd", os.path.join(self.bindir, "emirrordist"), "--distfiles", settings["DISTDIR"], "--config-root", settings["EPREFIX"], "--delete", "--repositories-configuration", settings.repositories.config_string(), "--repo", "test_repo", "--mirror", ) if content_db is not None: emirrordist_cmd = emirrordist_cmd + ( "--content-db", content_db, ) env = settings.environ() env["PYTHONPATH"] = ":".join( filter( None, [PORTAGE_PYM_PATH] + os.environ.get("PYTHONPATH", "").split(":"), )) for k in distfiles: try: os.unlink(os.path.join(settings["DISTDIR"], k)) except OSError: pass proc = loop.run_until_complete( asyncio.create_subprocess_exec(*emirrordist_cmd, env=env)) self.assertEqual(loop.run_until_complete(proc.wait()), 0) for k in distfiles: with open( os.path.join(settings["DISTDIR"], layouts[0].get_path(k)), "rb") as f: self.assertEqual(f.read(), distfiles[k]) if content_db is not None: loop.run_until_complete( self._test_content_db( emirrordist_cmd, env, layouts, content_db, distfiles, settings, portdb, )) # Tests only work with one ebuild at a time, so the config # pool only needs a single config instance. class config_pool: @staticmethod def allocate(): return settings @staticmethod def deallocate(settings): pass def async_fetch(pkg, ebuild_path): fetcher = EbuildFetcher( config_pool=config_pool, ebuild_path=ebuild_path, fetchonly=False, fetchall=True, pkg=pkg, scheduler=loop, ) fetcher.start() return fetcher.async_wait() for cpv in ebuilds: metadata = dict( zip( Package.metadata_keys, portdb.aux_get(cpv, Package.metadata_keys), )) pkg = Package( built=False, cpv=cpv, installed=False, metadata=metadata, root_config=root_config, type_name="ebuild", ) settings.setcpv(pkg) ebuild_path = portdb.findname(pkg.cpv) portage.doebuild_environment(ebuild_path, "fetch", settings=settings, db=portdb) # Test good files in DISTDIR for k in settings["AA"].split(): os.stat(os.path.join(settings["DISTDIR"], k)) self.assertEqual( loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0) for k in settings["AA"].split(): with open(os.path.join(settings["DISTDIR"], k), "rb") as f: self.assertEqual(f.read(), distfiles[k]) # Test digestgen with fetch os.unlink( os.path.join(os.path.dirname(ebuild_path), "Manifest")) for k in settings["AA"].split(): os.unlink(os.path.join(settings["DISTDIR"], k)) with ForkExecutor(loop=loop) as executor: self.assertTrue( bool( loop.run_until_complete( loop.run_in_executor( executor, functools.partial(digestgen, mysettings=settings, myportdb=portdb), )))) for k in settings["AA"].split(): with open(os.path.join(settings["DISTDIR"], k), "rb") as f: self.assertEqual(f.read(), distfiles[k]) # Test missing files in DISTDIR for k in settings["AA"].split(): os.unlink(os.path.join(settings["DISTDIR"], k)) self.assertEqual( loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0) for k in settings["AA"].split(): with open(os.path.join(settings["DISTDIR"], k), "rb") as f: self.assertEqual(f.read(), distfiles[k]) # Test empty files in DISTDIR for k in settings["AA"].split(): file_path = os.path.join(settings["DISTDIR"], k) with open(file_path, "wb") as f: pass self.assertEqual(os.stat(file_path).st_size, 0) self.assertEqual( loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0) for k in settings["AA"].split(): with open(os.path.join(settings["DISTDIR"], k), "rb") as f: self.assertEqual(f.read(), distfiles[k]) # Test non-empty files containing null bytes in DISTDIR for k in settings["AA"].split(): file_path = os.path.join(settings["DISTDIR"], k) with open(file_path, "wb") as f: f.write(len(distfiles[k]) * b"\0") self.assertEqual( os.stat(file_path).st_size, len(distfiles[k])) self.assertEqual( loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0) for k in settings["AA"].split(): with open(os.path.join(settings["DISTDIR"], k), "rb") as f: self.assertEqual(f.read(), distfiles[k]) # Test PORTAGE_RO_DISTDIRS settings["PORTAGE_RO_DISTDIRS"] = '"{}"'.format(ro_distdir) orig_fetchcommand = settings["FETCHCOMMAND"] orig_resumecommand = settings["RESUMECOMMAND"] try: settings["FETCHCOMMAND"] = settings["RESUMECOMMAND"] = "" for k in settings["AA"].split(): file_path = os.path.join(settings["DISTDIR"], k) os.rename(file_path, os.path.join(ro_distdir, k)) self.assertEqual( loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0) for k in settings["AA"].split(): file_path = os.path.join(settings["DISTDIR"], k) self.assertTrue(os.path.islink(file_path)) with open(file_path, "rb") as f: self.assertEqual(f.read(), distfiles[k]) os.unlink(file_path) finally: settings.pop("PORTAGE_RO_DISTDIRS") settings["FETCHCOMMAND"] = orig_fetchcommand settings["RESUMECOMMAND"] = orig_resumecommand # Test local filesystem in GENTOO_MIRRORS orig_mirrors = settings["GENTOO_MIRRORS"] orig_fetchcommand = settings["FETCHCOMMAND"] try: settings["GENTOO_MIRRORS"] = ro_distdir settings["FETCHCOMMAND"] = settings["RESUMECOMMAND"] = "" self.assertEqual( loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0) for k in settings["AA"].split(): with open(os.path.join(settings["DISTDIR"], k), "rb") as f: self.assertEqual(f.read(), distfiles[k]) finally: settings["GENTOO_MIRRORS"] = orig_mirrors settings["FETCHCOMMAND"] = orig_fetchcommand settings["RESUMECOMMAND"] = orig_resumecommand # Test readonly DISTDIR orig_distdir_mode = os.stat(settings["DISTDIR"]).st_mode try: os.chmod(settings["DISTDIR"], 0o555) self.assertEqual( loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0) for k in settings["AA"].split(): with open(os.path.join(settings["DISTDIR"], k), "rb") as f: self.assertEqual(f.read(), distfiles[k]) finally: os.chmod(settings["DISTDIR"], orig_distdir_mode) # Test parallel-fetch mode settings["PORTAGE_PARALLEL_FETCHONLY"] = "1" try: self.assertEqual( loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0) for k in settings["AA"].split(): with open(os.path.join(settings["DISTDIR"], k), "rb") as f: self.assertEqual(f.read(), distfiles[k]) for k in settings["AA"].split(): os.unlink(os.path.join(settings["DISTDIR"], k)) self.assertEqual( loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0) for k in settings["AA"].split(): with open(os.path.join(settings["DISTDIR"], k), "rb") as f: self.assertEqual(f.read(), distfiles[k]) finally: settings.pop("PORTAGE_PARALLEL_FETCHONLY") # Test RESUMECOMMAND orig_resume_min_size = settings[ "PORTAGE_FETCH_RESUME_MIN_SIZE"] try: settings["PORTAGE_FETCH_RESUME_MIN_SIZE"] = "2" for k in settings["AA"].split(): file_path = os.path.join(settings["DISTDIR"], k) os.unlink(file_path) with open(file_path + _download_suffix, "wb") as f: f.write(distfiles[k][:2]) self.assertEqual( loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0) for k in settings["AA"].split(): with open(os.path.join(settings["DISTDIR"], k), "rb") as f: self.assertEqual(f.read(), distfiles[k]) finally: settings[ "PORTAGE_FETCH_RESUME_MIN_SIZE"] = orig_resume_min_size # Test readonly DISTDIR + skiprocheck, with FETCHCOMMAND set to temporarily chmod DISTDIR orig_fetchcommand = settings["FETCHCOMMAND"] orig_distdir_mode = os.stat(settings["DISTDIR"]).st_mode for k in settings["AA"].split(): os.unlink(os.path.join(settings["DISTDIR"], k)) try: os.chmod(settings["DISTDIR"], 0o555) settings["FETCHCOMMAND"] = ( '"%s" -c "chmod ug+w \\"${DISTDIR}\\"; %s; status=\\$?; chmod a-w \\"${DISTDIR}\\"; exit \\$status"' % (BASH_BINARY, orig_fetchcommand.replace('"', '\\"'))) settings.features.add("skiprocheck") settings.features.remove("distlocks") self.assertEqual( loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0) finally: settings["FETCHCOMMAND"] = orig_fetchcommand os.chmod(settings["DISTDIR"], orig_distdir_mode) settings.features.remove("skiprocheck") settings.features.add("distlocks")
def dofile(src,dst): shutil.copy(src, dst) os.chmod(dst, 0o644)
def testEbuildFetch(self): distfiles = { 'bar': b'bar\n', 'foo': b'foo\n', } ebuilds = { 'dev-libs/A-1': { 'EAPI': '7', 'RESTRICT': 'primaryuri', 'SRC_URI': '''{scheme}://{host}:{port}/distfiles/bar.txt -> bar {scheme}://{host}:{port}/distfiles/foo.txt -> foo''', }, } loop = SchedulerInterface(global_event_loop()) scheme = 'http' host = '127.0.0.1' content = {} for k, v in distfiles.items(): content['/distfiles/{}.txt'.format(k)] = v with AsyncHTTPServer(host, content, loop) as server: ebuilds_subst = {} for cpv, metadata in ebuilds.items(): metadata = metadata.copy() metadata['SRC_URI'] = metadata['SRC_URI'].format( scheme=scheme, host=host, port=server.server_port) ebuilds_subst[cpv] = metadata playground = ResolverPlayground(ebuilds=ebuilds_subst, distfiles=distfiles) ro_distdir = tempfile.mkdtemp() try: fetchcommand = portage.util.shlex_split(playground.settings['FETCHCOMMAND']) fetch_bin = portage.process.find_binary(fetchcommand[0]) if fetch_bin is None: self.skipTest('FETCHCOMMAND not found: {}'.format(playground.settings['FETCHCOMMAND'])) resumecommand = portage.util.shlex_split(playground.settings['RESUMECOMMAND']) resume_bin = portage.process.find_binary(resumecommand[0]) if resume_bin is None: self.skipTest('RESUMECOMMAND not found: {}'.format(playground.settings['RESUMECOMMAND'])) root_config = playground.trees[playground.eroot]['root_config'] portdb = root_config.trees["porttree"].dbapi settings = config(clone=playground.settings) # Tests only work with one ebuild at a time, so the config # pool only needs a single config instance. class config_pool: @staticmethod def allocate(): return settings @staticmethod def deallocate(settings): pass def async_fetch(pkg, ebuild_path): fetcher = EbuildFetcher(config_pool=config_pool, ebuild_path=ebuild_path, fetchonly=False, fetchall=True, pkg=pkg, scheduler=loop) fetcher.start() return fetcher.async_wait() for cpv in ebuilds: metadata = dict(zip(Package.metadata_keys, portdb.aux_get(cpv, Package.metadata_keys))) pkg = Package(built=False, cpv=cpv, installed=False, metadata=metadata, root_config=root_config, type_name='ebuild') settings.setcpv(pkg) ebuild_path = portdb.findname(pkg.cpv) portage.doebuild_environment(ebuild_path, 'fetch', settings=settings, db=portdb) # Test good files in DISTDIR for k in settings['AA'].split(): os.stat(os.path.join(settings['DISTDIR'], k)) self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0) for k in settings['AA'].split(): with open(os.path.join(settings['DISTDIR'], k), 'rb') as f: self.assertEqual(f.read(), distfiles[k]) # Test digestgen with fetch os.unlink(os.path.join(os.path.dirname(ebuild_path), 'Manifest')) for k in settings['AA'].split(): os.unlink(os.path.join(settings['DISTDIR'], k)) with ForkExecutor(loop=loop) as executor: self.assertTrue(bool(loop.run_until_complete( loop.run_in_executor(executor, functools.partial( digestgen, mysettings=settings, myportdb=portdb))))) for k in settings['AA'].split(): with open(os.path.join(settings['DISTDIR'], k), 'rb') as f: self.assertEqual(f.read(), distfiles[k]) # Test missing files in DISTDIR for k in settings['AA'].split(): os.unlink(os.path.join(settings['DISTDIR'], k)) self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0) for k in settings['AA'].split(): with open(os.path.join(settings['DISTDIR'], k), 'rb') as f: self.assertEqual(f.read(), distfiles[k]) # Test empty files in DISTDIR for k in settings['AA'].split(): file_path = os.path.join(settings['DISTDIR'], k) with open(file_path, 'wb') as f: pass self.assertEqual(os.stat(file_path).st_size, 0) self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0) for k in settings['AA'].split(): with open(os.path.join(settings['DISTDIR'], k), 'rb') as f: self.assertEqual(f.read(), distfiles[k]) # Test non-empty files containing null bytes in DISTDIR for k in settings['AA'].split(): file_path = os.path.join(settings['DISTDIR'], k) with open(file_path, 'wb') as f: f.write(len(distfiles[k]) * b'\0') self.assertEqual(os.stat(file_path).st_size, len(distfiles[k])) self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0) for k in settings['AA'].split(): with open(os.path.join(settings['DISTDIR'], k), 'rb') as f: self.assertEqual(f.read(), distfiles[k]) # Test PORTAGE_RO_DISTDIRS settings['PORTAGE_RO_DISTDIRS'] = '"{}"'.format(ro_distdir) orig_fetchcommand = settings['FETCHCOMMAND'] orig_resumecommand = settings['RESUMECOMMAND'] try: settings['FETCHCOMMAND'] = settings['RESUMECOMMAND'] = '' for k in settings['AA'].split(): file_path = os.path.join(settings['DISTDIR'], k) os.rename(file_path, os.path.join(ro_distdir, k)) self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0) for k in settings['AA'].split(): file_path = os.path.join(settings['DISTDIR'], k) self.assertTrue(os.path.islink(file_path)) with open(file_path, 'rb') as f: self.assertEqual(f.read(), distfiles[k]) os.unlink(file_path) finally: settings.pop('PORTAGE_RO_DISTDIRS') settings['FETCHCOMMAND'] = orig_fetchcommand settings['RESUMECOMMAND'] = orig_resumecommand # Test local filesystem in GENTOO_MIRRORS orig_mirrors = settings['GENTOO_MIRRORS'] orig_fetchcommand = settings['FETCHCOMMAND'] try: settings['GENTOO_MIRRORS'] = ro_distdir settings['FETCHCOMMAND'] = settings['RESUMECOMMAND'] = '' self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0) for k in settings['AA'].split(): with open(os.path.join(settings['DISTDIR'], k), 'rb') as f: self.assertEqual(f.read(), distfiles[k]) finally: settings['GENTOO_MIRRORS'] = orig_mirrors settings['FETCHCOMMAND'] = orig_fetchcommand settings['RESUMECOMMAND'] = orig_resumecommand # Test readonly DISTDIR orig_distdir_mode = os.stat(settings['DISTDIR']).st_mode try: os.chmod(settings['DISTDIR'], 0o555) self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0) for k in settings['AA'].split(): with open(os.path.join(settings['DISTDIR'], k), 'rb') as f: self.assertEqual(f.read(), distfiles[k]) finally: os.chmod(settings['DISTDIR'], orig_distdir_mode) # Test parallel-fetch mode settings['PORTAGE_PARALLEL_FETCHONLY'] = '1' try: self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0) for k in settings['AA'].split(): with open(os.path.join(settings['DISTDIR'], k), 'rb') as f: self.assertEqual(f.read(), distfiles[k]) for k in settings['AA'].split(): os.unlink(os.path.join(settings['DISTDIR'], k)) self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0) for k in settings['AA'].split(): with open(os.path.join(settings['DISTDIR'], k), 'rb') as f: self.assertEqual(f.read(), distfiles[k]) finally: settings.pop('PORTAGE_PARALLEL_FETCHONLY') # Test RESUMECOMMAND orig_resume_min_size = settings['PORTAGE_FETCH_RESUME_MIN_SIZE'] try: settings['PORTAGE_FETCH_RESUME_MIN_SIZE'] = '2' for k in settings['AA'].split(): file_path = os.path.join(settings['DISTDIR'], k) os.unlink(file_path) with open(file_path + _download_suffix, 'wb') as f: f.write(distfiles[k][:2]) self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0) for k in settings['AA'].split(): with open(os.path.join(settings['DISTDIR'], k), 'rb') as f: self.assertEqual(f.read(), distfiles[k]) finally: settings['PORTAGE_FETCH_RESUME_MIN_SIZE'] = orig_resume_min_size finally: shutil.rmtree(ro_distdir) playground.cleanup()