def populate_versionlock_conf( yum_dnf: YumDnf, out_dir: Path, install_dir: Path, ): with create_ro(out_dir / 'versionlock.conf', 'w') as outf: outf.write( textwrap.dedent(f'''\ [main] enabled = 1 locklist = {install_dir.decode()}/versionlock.list ''')) # Write an empty lock-list. This will be bind-mounted in at runtime. with create_ro(out_dir / 'versionlock.list', 'w'): pass # Side-load the appropriate versionlock plugin, we currently don't have # a good way to install this via an RPM. with Path.resource( __package__, f'{yum_dnf.value}_versionlock.gz', exe=False, ) as p, \ gzip.open(p) as rf, \ create_ro(out_dir / 'versionlock.py', 'wb') as wf: wf.write(rf.read())
def package_full(self, subvol: Subvol, output_path: str, opts: _Opts): create_ro(output_path, 'wb').close() # Ensure non-root ownership subvol.run_as_root([ 'mksquashfs', subvol.path(), output_path, '-comp', 'zstd', '-noappend', ])
def test_interposed_rename(self): with temp_dir() as td: shadow_td = self._shadow / td.lstrip(b'/') os.makedirs(shadow_td) # Good case: a file gets renamed with create_ro(td / 'gets_moved', 'w') as f: f.write('i become shadow') for d in [td, shadow_td]: with create_ro(d / 'ok_dest', 'w') as f: f.write('unmodified') self._check_file_contents([ (td / 'gets_moved', 'i become shadow'), (td / 'ok_dest', 'unmodified'), (shadow_td / 'ok_dest', 'unmodified'), ]) with _capture_fd(2) as res: self.assertEqual( 0, self._rename(td / 'gets_moved', td / 'ok_dest')) self.assertEqual( f'`rename({td}/gets_moved, {td}/ok_dest)` will replace ' + f'shadowed original `{shadow_td}/ok_dest`\n', res.contents.decode(), ) self.assertFalse(os.path.exists(td / 'gets_moved')) self._check_file_contents([ (td / 'ok_dest', 'unmodified'), (shadow_td / 'ok_dest', 'i become shadow'), ]) # Normal case: destination lacks a shadow counterpart with create_ro(td / 'also_moved', 'w') as f: f.write('no shadow for me') with create_ro(td / 'unshadowed', 'w') as f: f.write('unmodified') self._check_file_contents([ (td / 'also_moved', 'no shadow for me'), (td / 'unshadowed', 'unmodified'), ]) with _capture_fd(2) as res: self.assertEqual( 0, self._rename(td / 'also_moved', td / 'unshadowed')) self.assertEqual(b'', res.contents) self.assertFalse(os.path.exists(td / 'also_moved')) self._check_file_contents([ (td / 'unshadowed', 'no shadow for me'), ])
def _prepare_versionlock_lists( subvol: Subvol, snapshot_dir: Path, list_path: Path ) -> Dict[str, Tuple[str, int]]: ''' Returns a map of "in-snapshot path" -> "tempfile with its contents", with the intention that the tempfile in the value will be a read-only bind-mount over the path in the key. ''' # `dnf` and `yum` expect different formats, so we parse our own. with open(list_path) as rf: envras = [l.split('\t') for l in rf] templates = {'yum': '{e}:{n}-{v}-{r}.{a}', 'dnf': '{n}-{e}:{v}-{r}.{a}'} dest_to_src_and_size = {} with temp_dir() as d: # Only bind-mount lists for those binaries that exist in the snapshot. for prog in set( f'{p}' for p in (subvol.path(snapshot_dir)).listdir() ) & set(templates.keys()): template = templates[prog] src = d / (prog + '-versionlock.list') with create_ro(src, 'w') as wf: for e, n, v, r, a in envras: wf.write(template.format(e=e, n=n, v=v, r=r, a=a)) set_new_key( dest_to_src_and_size, # This path convention must match how `write_yum_dnf_conf.py` # and `rpm_repo_snapshot.bzl` set up their output. snapshot_dir / f'{prog}/etc/{prog}/plugins/versionlock.list', (src, len(envras)) ) yield dest_to_src_and_size
def _write_confs_get_repos( dest: Path, yum_conf_content: str, dnf_conf_content: str, *, exclude_repos: FrozenSet[str], ) -> Iterable[YumDnfConfRepo]: assert not (exclude_repos & {'main', 'DEFAULT'}), exclude_repos yum_dnf_repos = [] for out_name, content in [ ('yum.conf', yum_conf_content), ('dnf.conf', dnf_conf_content), ]: if content is not None: # Save the original, unmodified config in case of an error with create_ro(dest / (out_name + '.orig'), 'w') as out: out.write(content) # Remove the excluded repos cp = ConfigParser() cp.read_string(content) for excluded in exclude_repos: cp.remove_section(excluded) with create_ro(dest / out_name, 'w+') as out: cp.write(out) out.seek(0) new_content = out.read() yum_dnf_repos.append( set( YumDnfConfParser(YumDnf.dnf, StringIO(new_content)).gen_repos())) yum_repos, dnf_repos = yum_dnf_repos diff_repos = yum_repos.symmetric_difference(dnf_repos) if diff_repos: # pragma: no cover # This is not allowed because `RpmActionItem` needs the package sets # to be the same for `yum` or `dnf`, since it uses the # `snapshot.sql3` DB to validate package names and determine # allowable versions (aka versionlock). # # We could potentially tag every `rpm` row with "dnf" or "yum" or # "both" to resolve this. In that case, the right logic would be to # merge the repo lists here, and to check that `yum_dnf` column in # any queries from the compiler. We really don't need this extra # complexity today. raise RuntimeError( f'`--yum-conf` and `--dnf-conf` had different repos {diff_repos}') return dnf_repos
def _set_up_shadow(self, replacement, to_shadow): # Create the mountpoint at the shadowed location, and the file # that will shadow it. with create_ro(to_shadow, 'w'): pass with create_ro(replacement, 'w') as outfile: outfile.write('shadows carrot') # Shadow the file that `yum` / `dnf` wants to write -- writing to # this location will now fail since it's read-only. subprocess.check_call([ 'mount', '-o', 'bind,ro', replacement, to_shadow, ]) try: yield finally: # Required so that our temporary dirs can be cleaned up. subprocess.check_call(['umount', to_shadow])
def add_sqlite_to_storage( cls, storage: Storage, dest_dir: Path, ) -> Iterable[sqlite3.Connection]: with tempfile.NamedTemporaryFile() as db_tf: with sqlite3.connect(db_tf.name) as db: RepoSnapshot._create_sqlite_tables(db) yield db db.close() with storage.writer() as db_out: for chunk in read_chunks(db_tf, cls._STORAGE_CHUNK_SIZE): db_out.write(chunk) with create_ro(dest_dir / cls._STORAGE_ID_FILE, 'w') as sidf: sidf.write(db_out.commit() + '\n')
def snapshot_gpg_keys( *, key_urls: Iterable[str], whitelist_dir: Path, snapshot_dir: Path, ) -> None: os.mkdir(snapshot_dir / 'gpg_keys') for url in key_urls: with open_url(url) as key_file: key_content = key_file.read() # Check that the key is in our whitelist, and the content matches. filename = os.path.basename(urlparse(url).path) with open(whitelist_dir / filename, 'rb') as infile: whitelist_key = infile.read() assert whitelist_key == key_content, (whitelist_key, key_content) with create_ro(snapshot_dir / 'gpg_keys' / filename, 'wb') as outfile: outfile.write(whitelist_key)
def write_yum_dnf_conf( *, yum_dnf: YumDnf, infile: TextIO, out_dir: Path, install_dir: Path, ports: Iterable[int], ): # `yum-dnf-from-snapshot` implicitly depends on this path convention for # the config and for the plugins under `<snapshot_dir>/etc`. plugin_dir = f'{yum_dnf.value}/plugins' config_path = f'{yum_dnf.value}/{yum_dnf.value}.conf' os.makedirs(out_dir / plugin_dir) populate_versionlock_conf( yum_dnf, out_dir=out_dir / plugin_dir, install_dir=install_dir / plugin_dir, ) server_urls = [urlparse(f'http://localhost:{p}') for p in ports] yc = YumDnfConfParser(yum_dnf, infile) isolated_yc = yc.isolate().isolate_repos( repo._replace( base_url=[ urlunparse(url._replace(path=repo.name)) for url in server_urls ], gpg_key_urls=[ urlunparse( # NB: It's be "better" to use `random.choice` but it # makes it harder to write tests, so worse it is. server_urls[0]._replace(path=os.path.join( repo.name, os.path.basename(urlparse(key_url).path), ))) for key_url in repo.gpg_key_urls ], ) for repo in yc.gen_repos()).isolate_main( config_path=(install_dir / config_path).decode(), versionlock_dir=(install_dir / plugin_dir).decode(), ) with create_ro(out_dir / config_path, 'w') as conf_out: isolated_yc.write(conf_out)
def fetch_sqlite_from_storage( cls, storage: Storage, from_dir: Path, dest: Path, ) -> Path: ''' At present, this is just a helper for tests. Real builds should use `rpm_repo_snapshot()` to fetch the `.sql3` DB Returns the populated `dest` for convenience. ''' with open(from_dir / cls._STORAGE_ID_FILE) as sid_in: sid = sid_in.read() assert sid[-1] == '\n', repr(sid) sid = sid[:-1] with storage.reader(sid) as db_in, create_ro(dest, 'wb') as db_out: for chunk in read_chunks(db_in, cls._STORAGE_CHUNK_SIZE): db_out.write(chunk) return dest
def test_update_shadowed(self): with temp_dir() as root, mock.patch.object( # Note that the shadowed root is under the install root, since # the `rename` runs under chroot. yum_dnf_from_snapshot, 'SHADOWED_PATHS_ROOT', Path('/shadow'), ): os.mkdir(root / 'meta') os.mkdir(root / 'rpm_test') os.makedirs(root / 'shadow/rpm_test') to_shadow = root / 'rpm_test/carrot.txt' replacement = root / 'rpm_test/shadows_carrot.txt' shadowed_original = root / 'shadow/rpm_test/carrot.txt' # Our shadowing setup is supposed to have moved the original here. with create_ro(shadowed_original, 'w') as outfile: outfile.write('yum/dnf overwrites this') with self._set_up_shadow(replacement, to_shadow): with open(to_shadow) as infile: self.assertEqual('shadows carrot', infile.read()) with open(shadowed_original) as infile: self.assertEqual('yum/dnf overwrites this', infile.read()) yum_dnf_from_snapshot.yum_dnf_from_snapshot( yum_dnf=self._YUM_DNF, snapshot_dir=_SNAPSHOT_DIR, protected_paths=[], yum_dnf_args=[ f'--installroot={root}', 'install', '--assumeyes', 'rpm-test-carrot', ], ) # The shadow is still in place with open(to_shadow) as infile: self.assertEqual('shadows carrot', infile.read()) # But we updated the shadowed file with open(shadowed_original) as infile: self.assertEqual('carrot 2 rc0\n', infile.read())
def test_conf(self): install_dir = '/INSTALL/DIR' prog_name = self._YUM_DNF.value expected_out = _CONF_OUT.format( prog_name=prog_name, extra_directives=textwrap.dedent('''\ skip_missing_names_on_install = 0 skip_missing_names_on_update = 0 ''') if self._YUM_DNF == YumDnf.yum else '', ) with temp_dir() as td: with create_ro(td / 'in', 'w') as outf: outf.write(_CONF_IN) wydc.main([ f'--rpm-installer={self._YUM_DNF.value}', f'--input-conf={td / "in"}', f'--output-dir={td / "out"}', f'--install-dir={install_dir}', '--repo-server-ports=1234 5678', ]) with open(td / f'out/{prog_name}/{prog_name}.conf') as infile: self.assertEqual(expected_out, infile.read())
def test_get_shadowed_rename_dest(self): with temp_dir() as td: shadow_td = self._shadow / td.lstrip(b'/') os.makedirs(shadow_td) # These `real_*` things have no shadow counterparts. os.mkdir(td / 'real_dir_exists') with create_ro(td / 'real_file_exists', 'w'): pass # The shadow setup is OK for this one, but the source better not # be `real_file_exists`. os.link(td / 'real_file_exists', td / 'hardlink') with create_ro(shadow_td / 'hardlink', 'w'): pass # Good case: both exist and are files. for d in [td, shadow_td]: with create_ro(d / 'shadow_and_real_exist', 'w'): pass # Destination is OK, but shadow is a directory (bug?) with create_ro(td / 'real_file_shadow_dir', 'w'): pass os.mkdir(shadow_td / 'real_file_shadow_dir') # Destination does not exist self.assertEqual( None, self._get_shadowed_rename_dest(td / 'real_file_exists', td / 'file_does_not_exist')) # Destination is a directory self.assertEqual( None, self._get_shadowed_rename_dest(b'/etc', td / 'real_file_exists')) # Same inode self.assertEqual( None, self._get_shadowed_rename_dest(td / 'real_file_exists', td / 'hardlink')) # 'hardlink' destination is fine if not renaming the same inode self.assertEqual( shadow_td / 'hardlink', self._get_shadowed_rename_dest(td / 'shadow_and_real_exist', td / 'hardlink'), ) # Shadow destination does not exist self.assertEqual( None, self._get_shadowed_rename_dest(b'/etc/shadow_and_real_exist', b'/etc/real_file_exists')) # Shadow destination is a directory self.assertEqual( None, self._get_shadowed_rename_dest(b'/etc/real_file_exists', b'/etc/real_file_shadow_dir')) # Good case: different indoes, both destinations are files self.assertEqual( shadow_td / 'shadow_and_real_exist', self._get_shadowed_rename_dest( # We don't error-check the source being a directory, # since `rename` will fail td / 'real_dir_exists', td / 'shadow_and_real_exist'), )
def package_full(self, subvol: Subvol, output_path: str, opts: _Opts): with create_ro(output_path, 'wb') as outfile, subprocess.Popen( ['zstd', '--stdout'], stdin=subprocess.PIPE, stdout=outfile ) as zst, subvol.mark_readonly_and_write_sendstream_to_file(zst.stdin): pass check_popen_returncode(zst)
def package_full(self, subvol: Subvol, output_path: str, opts: _Opts): with create_ro(output_path, 'wb') as outfile, \ subvol.mark_readonly_and_write_sendstream_to_file(outfile): pass