def test_yum_is_dnf(self): # Setup for yum not being the same as dnf, modeled after fb with temp_dir() as td: yum_path = Path(td / 'yum').touch() with mock.patch('shutil.which') as mock_which: mock_which.return_value = None self.assertFalse(yum_is_dnf()) mock_which.return_value = yum_path.decode() self.assertFalse(yum_is_dnf()) # Setup for yum being the same as dnf, modeled after fedora # where `/bin/yum -> dnf-3` with temp_dir() as td: dnf_name = 'dnf-3' dnf_path = Path(td / dnf_name).touch() yum_path = td / 'yum' # Symlink to the name for a relative symlink that ends up # as yum -> dnf-3 os.symlink(dnf_name, yum_path) with mock.patch('shutil.which') as mock_which: mock_paths = {dnf_name: dnf_path, 'yum': yum_path} mock_which.side_effect = lambda p: mock_paths[p].decode() self.assertTrue(yum_is_dnf())
def sign_rpm(rpm_path: Path, gpg_signing_key: str) -> None: 'Signs an RPM with the provided key data' with tempfile.TemporaryDirectory() as td: gpg = gnupg.GPG(gnupghome=td) res = gpg.import_keys(gpg_signing_key) assert res.count == 1, 'Only 1 private key can be imported for signing' # Paths inside the container for passing artifacts to and fro work_dir = Path(generate_work_dir()) package_dir = work_dir / 'package' gpg_dir = work_dir / 'gpg' opts = new_nspawn_opts( cmd=[ '/usr/bin/rpmsign', f'--define=_gpg_name {res.fingerprints[0]}', '--addsign', Path(package_dir / os.path.basename(rpm_path)).shell_quote(), ], layer=_build_appliance(), bindmount_ro=[ (td, gpg_dir), ], bindmount_rw=[ (os.path.dirname(rpm_path), package_dir), ], user=pwd.getpwnam('root'), setenv=[f'GNUPGHOME={gpg_dir.shell_quote()}'], ) run_non_booted_nspawn(opts, PopenArgs())
def build_rpm(package_dir: Path, arch: str, rpm: Rpm, gpg_signing_key: str) -> Path: 'Returns the filename of the built RPM.' with temp_dir(dir=package_dir) as td, tempfile.NamedTemporaryFile() as tf, \ Path.resource(__package__, 'busybox', exe=True) as busybox_path: tf.write(rpm.spec(busybox_path).encode()) tf.flush() work_dir = Path(generate_work_dir()) format_kwargs = { "quoted_arch": shlex.quote(arch), "quoted_buildroot": Path(work_dir / 'build').shell_quote(), "quoted_home": Path(work_dir / 'home').shell_quote(), "quoted_spec_file": shlex.quote(tf.name), "quoted_work_dir": work_dir.shell_quote(), # We get the uid of the current user so that we can chown the # work_dir *inside* the running container. The nspawn'd build # appliance container needs to run as root so that it can mkdir # the `work_dir` which exists at /. If we don't chown the # resulting tree that `rpmbuild` creates the rename would would # fail. "current_uid": os.getuid(), } opts = new_nspawn_opts( cmd=[ 'sh', '-uec', '''\ /usr/bin/rpmbuild \ -bb \ --target {quoted_arch} \ --buildroot {quoted_buildroot} \ {quoted_spec_file} \ && chown -R {current_uid} {quoted_work_dir} \ '''.format(**format_kwargs), ], layer=_build_appliance(), bindmount_ro=[(tf.name, tf.name), (busybox_path, busybox_path)], bindmount_rw=[(td, work_dir)], user=pwd.getpwnam('root'), setenv=['HOME={quoted_home}'.format(**format_kwargs)], ) run_non_booted_nspawn(opts, PopenArgs()) # `rpmbuild` has a non-configurable output layout, so # we'll move the resulting rpm into our package dir. rpms_dir = td / 'home/rpmbuild/RPMS' / arch rpm_name, = rpms_dir.listdir() os.rename(rpms_dir / rpm_name, package_dir / rpm_name) sign_rpm(package_dir / rpm_name, gpg_signing_key) return rpm_name
def test_rpm_action_item_auto_downgrade(self): parent_subvol = find_built_subvol( (Path(__file__).dirname() / 'test-with-one-local-rpm').decode()) src_rpm = Path(__file__).dirname() / "rpm-test-cheese-1-1.rpm" with TempSubvolumes(sys.argv[0]) as temp_subvolumes: # ensure cheese2 is installed in the parent from rpm-test-cheese-2-1 assert os.path.isfile( parent_subvol.path('/usr/share/rpm_test/cheese2.txt')) # make sure the RPM we are installing is older in order to # trigger the downgrade src_data = RpmMetadata.from_file(src_rpm) subvol_data = RpmMetadata.from_subvol(parent_subvol, src_data.name) assert compare_rpm_versions(src_data, subvol_data) < 0 subvol = temp_subvolumes.snapshot(parent_subvol, 'rpm_action') RpmActionItem.get_phase_builder( [ RpmActionItem( from_target='t', source=src_rpm, action=RpmAction.install, ) ], DUMMY_LAYER_OPTS._replace( yum_from_snapshot=Path(__file__).dirname() / 'yum-from-test-snapshot', ), )(subvol) subvol.run_as_root([ 'rm', '-rf', subvol.path('dev'), subvol.path('meta'), subvol.path('var'), ]) self.assertEqual([ '(Dir)', { 'usr': [ '(Dir)', { 'share': [ '(Dir)', { 'rpm_test': [ '(Dir)', { 'cheese1.txt': ['(File d36)'], } ], } ], } ], } ], render_subvol(subvol))
def test_units_enabled(self): # Get a list of the available .wants dirs for all targets to validate available_targets = [ Path(avail) for avail in glob.glob(PROV_ROOT / "*.wants") ] # spec[1] is the target name, skip if None for unit, target, *_ in unit_test_specs: # Make sure it's enabled where it should be if target: enabled_in_target = PROV_ROOT / _twant(target) / unit self.assertTrue(os.path.islink(enabled_in_target), enabled_in_target) self.assertTrue(os.path.isfile(enabled_in_target), enabled_in_target) # make sure it's *not* enabled where it shouldn't be for avail_target in [ avail for avail in available_targets if target and avail.basename() != _twant(target) ]: unit_in_target_wants = avail_target / unit self.assertFalse(os.path.exists(avail_target / unit), unit_in_target_wants)
def test_rpm_action_item_remove_local(self): # We expect the removal to be based just on the name of the RPM # in the metadata, so removing cheese-2 should be fine via either: for ver in [1, 2]: self._check_cheese_removal( Path(__file__).dirname() / f'rpm-test-cheese-{ver}-1.rpm', )
def _check_protected_dir(self, subvol, protected_dir): protected_dir = Path(protected_dir) write_to_protected = _builder(_touch_cmd(protected_dir / 'ALIEN')) with self.assertRaises(subprocess.CalledProcessError): write_to_protected(subvol) self.assertTrue(os.path.isdir(subvol.path(protected_dir))) self.assertFalse(os.path.exists(subvol.path(protected_dir / 'ALIEN')))
def _install( self, *, protected_paths, install_args=None ): if install_args is None: install_args = _INSTALL_ARGS install_root = Path(tempfile.mkdtemp()) try: # IMAGE_ROOT/meta/ is always required since it's always protected for p in set(protected_paths) | {'meta/'}: if p.endswith('/'): os.makedirs(install_root / p) else: os.makedirs(os.path.dirname(install_root / p)) with open(install_root / p, 'wb'): pass # Note: this can't use `_yum_using_build_appliance` because that # would lose coverage info on `yum_dnf_from_snapshot.py`. On # the other hand, running this test against the host is fragile # since it depends on the system packages available on CI # containers. For this reason, this entire test is an # `image.python_unittest` that runs in a build appliance. yum_dnf_from_snapshot.yum_dnf_from_snapshot( yum_dnf=self._YUM_DNF, snapshot_dir=_SNAPSHOT_DIR, protected_paths=protected_paths, yum_dnf_args=[ f'--installroot={install_root}', *install_args, ] ) yield install_root finally: assert os.path.realpath(install_root) != b'/' # Courtesy of `yum`, the `install_root` is now owned by root. subprocess.run(['sudo', 'rm', '-rf', install_root], check=True)
def _image_source_path( layer_opts: LayerOpts, *, source: AnyStr = None, layer: Subvol = None, path: AnyStr = None, ) -> Path: assert (source is None) ^ (layer is None), (source, layer, path) source = Path.or_none(source) # Absolute `path` is still relative to `source` or `layer` path = Path((path and path.lstrip('/')) or '.') if source: return (source / path).normpath() if os.path.exists(layer.path(META_ARTIFACTS_REQUIRE_REPO)): _validate_artifacts_require_repo(layer, layer_opts, 'image.source') return Path(layer.path(path))
def customize_fields(kwargs): # noqa: B902 cmd = kwargs.pop('cmd') assert all(isinstance(c, (str, bytes)) for c in cmd), cmd kwargs['cmd'] = tuple(cmd) assert isinstance(kwargs['user'], str), kwargs['user'] kwargs['serve_rpm_snapshots'] = tuple( Path(s) for s in kwargs.pop('serve_rpm_snapshots'))
def customize_fields(cls, kwargs): super().customize_fields(kwargs) coerce_path_field_normal_relative(kwargs, 'dest') customize_stat_options(kwargs, default_mode=None) # Defaulted later source = kwargs['source'] dest = kwargs['dest'] # The 3 separate `*_mode` arguments must be set instead of `mode` for # directory sources. popped_args = ['mode', 'exe_mode', 'data_mode', 'dir_mode'] mode, dir_mode, exe_mode, data_mode = (kwargs.pop(a, None) for a in popped_args) st_source = os.stat(source, follow_symlinks=False) if stat.S_ISDIR(st_source.st_mode): assert mode is None, f'Cannot use `mode` for directory sources.' kwargs['paths'] = tuple( _recurse_into_source( Path(source), Path(dest), dir_mode=dir_mode or _DIR_MODE, exe_mode=exe_mode or _EXE_MODE, data_mode=data_mode or _DATA_MODE, )) elif stat.S_ISREG(st_source.st_mode): assert {dir_mode, exe_mode, data_mode} == {None}, \ 'Cannot use `{dir,exe,data}_mode` for file sources.' if mode is None: # This tests whether the build repo user can execute the # file. This is a very natural test for build artifacts, # and files in the repo. Note that this can be affected if # the ambient umask is pathological, which is why # `compiler.py` checks the umask. mode = _EXE_MODE if os.access(source, os.X_OK) else _DATA_MODE kwargs['paths'] = (_InstallablePath( source=source, provides=ProvidesFile(path=dest), mode=mode, ), ) else: raise RuntimeError( f'{source} must be a regular file or directory, got {st_source}' )
def gen_subvolume_subtree_provides(subvol: Subvol, subtree: Path): 'Yields "Provides" instances for a path `subtree` in `subvol`.' # "Provides" classes use image-absolute paths that are `str` (for now). # Accept any string type to ease future migrations. subtree = os.path.join('/', Path(subtree).decode()) protected_paths = protected_path_set(subvol) for prot_path in protected_paths: rel_to_subtree = os.path.relpath(os.path.join('/', prot_path), subtree) if not has_leading_dot_dot(rel_to_subtree): yield ProvidesDoNotAccess(path=rel_to_subtree) subtree_full_path = subvol.path(subtree).decode() subtree_exists = False # Traverse the subvolume as root, so that we have permission to access # everything. for type_and_path in subvol.run_as_root([ # -P is the analog of --no-dereference in GNU tools # # Filter out the protected paths at traversal time. If one of the # paths has a very large or very slow mount, traversing it would # have a devastating effect on build times, so let's avoid looking # inside protected paths entirely. An alternative would be to # `send` and to parse the sendstream, but this is ok too. 'find', '-P', subtree_full_path, '(', *itertools.dropwhile( lambda x: x == '-o', # Drop the initial `-o` itertools.chain.from_iterable([ # `normpath` removes the trailing / for protected dirs '-o', '-path', subvol.path(os.path.normpath(p)) ] for p in protected_paths), ), ')', '-prune', '-o', '-printf', '%y %p\\0', ], stdout=subprocess.PIPE).stdout.split(b'\0'): if not type_and_path: # after the trailing \0 continue filetype, abspath = type_and_path.decode().split(' ', 1) relpath = os.path.relpath(abspath, subtree_full_path) assert not has_leading_dot_dot(relpath), (abspath, subtree_full_path) # We already "provided" this path above, and it should have been # filtered out by `find`. assert not is_path_protected(relpath, protected_paths), relpath # Future: This provides all symlinks as files, while we should # probably provide symlinks to valid directories inside the image as # directories to be consistent with SymlinkToDirItem. if filetype in ['b', 'c', 'p', 'f', 'l', 's']: yield ProvidesFile(path=relpath) elif filetype == 'd': yield ProvidesDirectory(path=relpath) else: # pragma: no cover raise AssertionError(f'Unknown {filetype} for {abspath}') if relpath == '.': subtree_exists = True # We should've gotten a CalledProcessError from `find`. assert subtree_exists, f'{subtree} does not exist in {subvol.path()}'
def test_receive(self, temp_subvols): new_subvol_name = 'differs_from_create_ops' sv = temp_subvols.caller_will_create(new_subvol_name) with open(Path(__file__).dirname() / 'create_ops.sendstream') as f, \ sv.receive(f): pass self.assertEqual( render_demo_subvols(create_ops=new_subvol_name), render_sendstream(sv.mark_readonly_and_get_sendstream()), )
def test_rpm_action_conflict(self): layer_opts = DUMMY_LAYER_OPTS._replace( yum_from_snapshot='required but ignored') # Test both install-install, install-remove, and install-downgrade # conflicts. for rpm_actions in ( (('cat', RpmAction.install), ('cat', RpmAction.install)), ( ('dog', RpmAction.remove_if_exists), ('dog', RpmAction.install), ), ): with self.assertRaisesRegex(RuntimeError, 'RPM action conflict '): # Note that we don't need to run the builder to hit the error RpmActionItem.get_phase_builder( [ RpmActionItem(from_target='t', name=r, action=a) for r, a in rpm_actions ], layer_opts, ) with self.assertRaisesRegex(RuntimeError, 'RPM action conflict '): # An extra test case for local RPM name conflicts (filenames are # different but RPM names are the same) RpmActionItem.get_phase_builder( [ RpmActionItem( from_target='t', source=Path(__file__).dirname() / "rpm-test-cheese-2-1.rpm", action=RpmAction.install, ), RpmActionItem( from_target='t', source=Path(__file__).dirname() / "rpm-test-cheese-1-1.rpm", action=RpmAction.remove_if_exists, ), ], layer_opts, )
def test_install_file_from_layer(self): layer = find_built_subvol( Path(__file__).dirname() / 'test-with-one-local-rpm') path_in_layer = b'usr/share/rpm_test/cheese2.txt' item = _install_file_item( from_target='t', source={ 'layer': layer, 'path': '/' + path_in_layer.decode() }, dest='cheese2', ) self.assertEqual(0o444, item.mode) self.assertEqual(Path(layer.path(path_in_layer)), item.source) self.assertEqual(layer.path(path_in_layer), item.source) self._check_item( item, {ProvidesFile(path='cheese2')}, {require_directory('/')}, )
def check_call(infile, subvolumes_dir): if Path(infile.name).dirname().basename() != sigil_dirname: return orig_from_json_file(infile, subvolumes_dir) test_case.assertEqual(parent_layer_file, infile.name) test_case.assertEqual(_SUBVOLS_DIR, subvolumes_dir) class FakeSubvolumeOnDisk: def subvolume_path(self): return subvolume_path.decode() return FakeSubvolumeOnDisk()
def _dummies_for_protected_paths( protected_paths: Iterable[str], ) -> Mapping[Path, Path]: ''' Some locations (some host yum/dnf directories, and install root /meta/ and mountpoints) should be off-limits to writes by RPMs. We enforce that by bind-mounting an empty file or directory on top of each one. ''' with temp_dir() as td, tempfile.NamedTemporaryFile() as tf: # NB: There may be duplicates in protected_paths, so we normalize. # If the duplicates include both a file and a directory, this picks # one arbitrarily, and if the type on disk is different, we will # fail at mount time. This doesn't seem worth an explicit check. yield { Path(p).normpath(): (td if p.endswith('/') else Path(tf.name)) for p in protected_paths } # NB: The bind mount is read-only, so this is just paranoia. If it # were left RW, we'd need to check its owner / permissions too. for expected, actual in (([], td.listdir()), (b'', tf.read())): assert expected == actual, \ f'Some RPM wrote {actual} to {protected_paths}'
def _install_root(conf_path: Path, yum_dnf_args: Iterable[str]) -> Path: # Peek at the `yum` / `dnf` args, which take precedence over the config. p = argparse.ArgumentParser(allow_abbrev=False, add_help=False) p.add_argument('--installroot', type=Path.from_argparse) args, _ = p.parse_known_args(yum_dnf_args) if args.installroot: return args.installroot # For our wrapper to be transparent, the `installroot` semantics have to # match that of `yum` / `dnf`, so the argument is optional, with a # fallback to the config file, and then to `/`. cp = ConfigParser() with open(conf_path) as conf_in: cp.read_file(conf_in) return Path(cp['main'].get('installroot', '/'))
def test_receive_sendstream(self): item = ReceiveSendstreamItem( from_target='t', source=Path(__file__).dirname() / 'create_ops.sendstream', ) self.assertEqual(PhaseOrder.MAKE_SUBVOL, item.phase_order()) with TempSubvolumes(sys.argv[0]) as temp_subvolumes: new_subvol_name = 'differs_from_create_ops' subvol = temp_subvolumes.caller_will_create(new_subvol_name) item.get_phase_builder([item], DUMMY_LAYER_OPTS)(subvol) self.assertEqual( render_demo_subvols(create_ops=new_subvol_name), render_sendstream(subvol.mark_readonly_and_get_sendstream()), )
def find_built_subvol( layer_output, *, path_in_repo=None, subvolumes_dir=None, ): # It's OK for both to be None (uses the current file to find repo), but # it's not OK to set both. assert (path_in_repo is None) or (subvolumes_dir is None) with open(Path(layer_output) / 'layer.json') as infile: return Subvol( SubvolumeOnDisk.from_json_file( infile, subvolumes_dir if subvolumes_dir else _get_subvolumes_dir(path_in_repo), ).subvolume_path(), already_exists=True, )
def test_install_file_from_layer(self): layer = find_built_subvol( Path(__file__).dirname() / 'test-with-one-local-rpm') path_in_layer = b'rpm_test/cheese2.txt' item = _install_file_item( from_target='t', source={ 'layer': layer, 'path': '/' + path_in_layer.decode() }, dest='cheese2', ) source_path = layer.path(path_in_layer) p = _InstallablePath(source_path, ProvidesFile(path='cheese2'), 'a+r') self.assertEqual((p, ), item.paths) self.assertEqual(source_path, item.source) self._check_item(item, {p.provides}, {require_directory('/')})
def test_rpm_build_item(self): parent_subvol = find_built_subvol( (Path(__file__).dirname() / 'toy-rpmbuild-setup').decode()) with TempSubvolumes(sys.argv[0]) as temp_subvolumes: assert os.path.isfile( parent_subvol.path('/rpmbuild/SOURCES/toy_src_file')) assert os.path.isfile( parent_subvol.path('/rpmbuild/SPECS/specfile.spec')) subvol = temp_subvolumes.snapshot(parent_subvol, 'rpm_build') item = RpmBuildItem(from_target='t', rpmbuild_dir='/rpmbuild') RpmBuildItem.get_phase_builder( [item], DUMMY_LAYER_OPTS, )(subvol) self.assertEqual(item.phase_order(), PhaseOrder.RPM_BUILD) assert os.path.isfile(subvol.path('/rpmbuild/RPMS/toy.rpm'))
def build(self, subvol: Subvol, layer_opts: LayerOpts): if layer_opts.build_appliance: work_dir = generate_work_dir() full_path = Path(work_dir) / self.into_dir / self.path_to_make opts = new_nspawn_opts( cmd=['mkdir', '-p', full_path], layer=layer_opts.build_appliance, bindmount_rw=[(subvol.path(), work_dir)], user=pwd.getpwnam('root'), ) run_non_booted_nspawn(opts, PopenArgs()) else: inner_dir = subvol.path( os.path.join(self.into_dir, self.path_to_make)) subvol.run_as_root(['mkdir', '-p', inner_dir]) outer_dir = self.path_to_make.split('/', 1)[0] build_stat_options( self, subvol, subvol.path(os.path.join(self.into_dir, outer_dir)), )
def test_rpm_action_item_auto_downgrade(self): parent_subvol = layer_resource_subvol( __package__, 'test-with-one-local-rpm', ) src_rpm = Path(__file__).dirname() / "rpm-test-cheese-1-1.rpm" with TempSubvolumes(sys.argv[0]) as temp_subvolumes: # ensure cheese2 is installed in the parent from rpm-test-cheese-2-1 assert os.path.isfile(parent_subvol.path('/rpm_test/cheese2.txt')) # make sure the RPM we are installing is older in order to # trigger the downgrade src_data = RpmMetadata.from_file(src_rpm) subvol_data = RpmMetadata.from_subvol(parent_subvol, src_data.name) assert compare_rpm_versions(src_data, subvol_data) < 0 subvol = temp_subvolumes.snapshot(parent_subvol, 'rpm_action') RpmActionItem.get_phase_builder( [ RpmActionItem( from_target='t', source=src_rpm, action=RpmAction.install, ) ], self._opts(), )(subvol) subvol.run_as_root([ 'rm', '-rf', subvol.path('dev'), subvol.path('etc'), subvol.path('meta'), subvol.path('var'), ]) self.assertEqual([ '(Dir)', { 'rpm_test': ['(Dir)', { 'cheese1.txt': ['(File d42)'], }], } ], render_subvol(subvol))
def temp_repos_steps(base_dir=None, arch: str = 'x86_64', *args, **kwargs): ''' Given a history of changes to a set of RPM repos (as in `SAMPLE_STEPS`), generates a collection of RPM repos on disk by running: - `rpmbuild` to build the RPM files - `createrepo` to build the repo metadata Returns a temporary path, cleaned up once the context exits, containing a directory per time step (named 0, 1, 2, etc). Each timestep directory contains a directory per repo, and each repo has this layout: repodata/{repomd.xml,other-repodata.{xml,sqlite}.bz2} reponame-pkgs/rpm-test-<name>-<version>-<release>.<arch>.rpm ''' td = Path(tempfile.mkdtemp(dir=base_dir)) try: make_repo_steps(out_dir=td, arch=arch, *args, **kwargs) yield td except BaseException: # Clean up even on Ctrl-C shutil.rmtree(td) raise
def test_update_shadowed(self): with temp_dir() as root, mock.patch.object( # Note that the shadowed root is under the install root, since # the `rename` runs under chroot. yum_dnf_from_snapshot, 'SHADOWED_PATHS_ROOT', Path('/shadow'), ): os.mkdir(root / 'meta') os.mkdir(root / 'rpm_test') os.makedirs(root / 'shadow/rpm_test') to_shadow = root / 'rpm_test/carrot.txt' replacement = root / 'rpm_test/shadows_carrot.txt' shadowed_original = root / 'shadow/rpm_test/carrot.txt' # Our shadowing setup is supposed to have moved the original here. with create_ro(shadowed_original, 'w') as outfile: outfile.write('yum/dnf overwrites this') with self._set_up_shadow(replacement, to_shadow): with open(to_shadow) as infile: self.assertEqual('shadows carrot', infile.read()) with open(shadowed_original) as infile: self.assertEqual('yum/dnf overwrites this', infile.read()) yum_dnf_from_snapshot.yum_dnf_from_snapshot( yum_dnf=self._YUM_DNF, snapshot_dir=_SNAPSHOT_DIR, protected_paths=[], yum_dnf_args=[ f'--installroot={root}', 'install', '--assumeyes', 'rpm-test-carrot', ], ) # The shadow is still in place with open(to_shadow) as infile: self.assertEqual('shadows carrot', infile.read()) # But we updated the shadowed file with open(shadowed_original) as infile: self.assertEqual('carrot 2 rc0\n', infile.read())
def test_install_file(self): with tempfile.NamedTemporaryFile() as tf: os.chmod(tf.name, stat.S_IXUSR) exe_item = _install_file_item( from_target='t', source={'source': tf.name}, dest='d/c', ) ep = _InstallablePath(Path(tf.name), ProvidesFile(path='d/c'), 'a+rx') self.assertEqual((ep, ), exe_item.paths) self.assertEqual(tf.name.encode(), exe_item.source) self._check_item(exe_item, {ep.provides}, {require_directory('d')}) # Checks `image.source(path=...)` with temp_dir() as td: os.mkdir(td / 'b') open(td / 'b/q', 'w').close() data_item = _install_file_item( from_target='t', source={ 'source': td, 'path': '/b/q' }, dest='d', ) dp = _InstallablePath(td / 'b/q', ProvidesFile(path='d'), 'a+r') self.assertEqual((dp, ), data_item.paths) self.assertEqual(td / 'b/q', data_item.source) self._check_item(data_item, {dp.provides}, {require_directory('/')}) # NB: We don't need to get coverage for this check on ALL the items # because the presence of the ProvidesDoNotAccess items it the real # safeguard -- e.g. that's what prevents TarballItem from writing # to /meta/ or other protected paths. with self.assertRaisesRegex(AssertionError, 'cannot start with meta/'): _install_file_item( from_target='t', source={'source': 'a/b/c'}, dest='/meta/foo', )
def _ensure_private_network(): ''' Normally, we run under `systemd-nspawn --private-network`. We don't want to run in environments with network access because in these cases it's very possible that `yum` / `dnf` will end up doing something non-deterministic by reaching out to the network. ''' # From `/usr/include/uapi/linux/if_arp.h` allowed_types = { 768, # ARPHRD_TUNNEL 769, # ARPHRD_TUNNEL6 772, # ARPHRD_LOOPBACK } net = Path('/sys/class/net') for iface in net.listdir(): with open(net / iface / 'type') as infile: iface_type = int(infile.read()) # Not covered because we don't want to rely on the CI container # having a network interface. if iface_type not in allowed_types: # pragma: no cover raise RuntimeError( 'Refusing to run without --private-network, found ' f'unknown interface {iface} of type {iface_type}.')
def test_write_to_tarball(self, temp_subvols): # create a subvol from a demo sendstream, tar it, untar into a new # subvol, then compare the two demo_sv_name = 'demo_sv' demo_sv = temp_subvols.caller_will_create(demo_sv_name) with open(Path(__file__).dirname() / 'create_ops.sendstream') as f, \ demo_sv.receive(f): pass unpacked_sv = temp_subvols.create('subvol') with tempfile.NamedTemporaryFile() as tar_file: with demo_sv.write_to_tarball(tar_file): pass demo_sv.run_as_root([ 'tar', 'xzf', tar_file.name, '--xattrs', '-C', unpacked_sv.path(), ]) demo_render = render_demo_subvols(create_ops=demo_sv_name) # Tar does not preserve the original's cloned extents of # zeros demo_render[1]['56KB_nuls'] = ['(File d57344)'] demo_render[1]['56KB_nuls_clone'] = ['(File d57344)'] # Tar des not preserve unix domain sockets, as these are usable only for # the lifetime of the associated process and should therefore be safe to # ignore. demo_render[1].pop('unix_sock') self.assertEqual( demo_render, render_sendstream(unpacked_sv.mark_readonly_and_get_sendstream()), )
def receive(self, from_file): # At present, we always have an empty wrapper dir to receive into. # If this changes, we could make a tempdir inside `parent_fd`. with open_fd( os.path.dirname(self.path()), os.O_RDONLY | os.O_DIRECTORY, ) as parent_fd: wrapper_dir_contents = os.listdir(parent_fd) assert wrapper_dir_contents == [], wrapper_dir_contents try: with self.popen_as_root( [ 'btrfs', 'receive', # Future: If we get `pass_fds` support, use `/proc/self/fd' Path('/proc') / str(os.getpid()) / 'fd' / str(parent_fd), ], _subvol_exists=False, stdin=from_file): yield finally: received_names = os.listdir(parent_fd) assert len(received_names) <= 1, received_names if received_names: os.rename( received_names[0], os.path.basename(self.path()), src_dir_fd=parent_fd, dst_dir_fd=parent_fd, ) # This may be a **partially received** subvol. If these # semantics turn out to be broken for our purposes, we # can try to clean up the subvolume on error instead, # but at present it seems easier to leak it, and let the # GC code delete it later. self._exists = True