def cmd_export_overlay(pod_id, output_path, filter_patterns, *, debug=False): oses.assert_root_privilege() ASSERT.not_predicate(output_path, g1.files.lexists) # Exclude pod-generated files. # TODO: Right now we hard-code the list, but this is fragile. filter_args = [ '--exclude=/etc/machine-id', '--exclude=/var/lib/dbus/machine-id', '--exclude=/etc/hostname', '--exclude=/etc/hosts', '--exclude=/etc/systemd', '--exclude=/etc/.pwd.lock', '--exclude=/etc/mtab', # Remove distro unit files. '--exclude=/etc/systemd/system', '--exclude=/lib/systemd/system', '--exclude=/usr/lib/systemd/system', ] filter_args.extend('--%s=%s' % pair for pair in filter_patterns) if debug: # Log which files are included/excluded due to filter rules. filter_args.append('--debug=FILTER2') with locks.acquiring_exclusive(_get_active_path()): pod_dir_path = ASSERT.predicate(_get_pod_dir_path(pod_id), Path.is_dir) pod_dir_lock = ASSERT.true(locks.try_acquire_exclusive(pod_dir_path)) try: upper_path = _get_upper_path(pod_dir_path) bases.rsync_copy(upper_path, output_path, filter_args) finally: pod_dir_lock.release() pod_dir_lock.close()
def generate_unit_file(root_path, pod_name, pod_version, app): LOG.info('create unit file: %s', app.name) pod_etc_path = ASSERT.predicate(_get_pod_etc_path(root_path), Path.is_dir) ASSERT.not_predicate( _get_pod_unit_path(pod_etc_path, app), g1.files.lexists, ).write_text(_generate_unit_file_content(pod_name, pod_version, app)) ASSERT.not_predicate( _get_pod_wants_path(pod_etc_path, app), g1.files.lexists, ).symlink_to(Path('..') / _get_pod_unit_filename(app))
def _umount(path): ASSERT.not_predicate(path, Path.is_symlink) LOG.info('umount: %s', path) try: with scripts.doing_capture_stderr(): scripts.run(['umount', path]) except subprocess.CalledProcessError as exc: if _UMOUNT_ERROR_WHITELIST.search(exc.stderr, re.MULTILINE): LOG.debug('umount err: %s, %s', path, exc.stderr, exc_info=True) else: LOG.error('umount err: %s, %s', path, exc.stderr) raise
def _move_pod_dir_to_graveyard(dir_path): dst_path = _get_graveyard_path() / dir_path.name if g1.files.lexists(dst_path): dst_path.with_name('%s_%s' % (dst_path.name, models.generate_pod_id())) LOG.debug( 'rename duplicated pod directory under graveyard: %s -> %s', dir_path.name, dst_path.name, ) ASSERT.not_predicate(dst_path, g1.files.lexists) dir_path.rename(dst_path) return dst_path
def cmd_prepare_base_rootfs(image_rootfs_path): ASSERT.not_predicate(image_rootfs_path, Path.exists) oses.assert_root_privilege() scripts.run([ 'debootstrap', '--variant=minbase', '--components=main', # Install dbus for convenience. # Install sudo for changing service user/group. # Install tzdata for /etc/localtime. '--include=dbus,sudo,systemd,tzdata', models.BASE_IMAGE_RELEASE_CODE_NAME, image_rootfs_path, 'http://us.archive.ubuntu.com/ubuntu/', ])
def cmd_add_ref(pod_id, target_path): oses.assert_root_privilege() with locks.acquiring_shared(_get_active_path()): _add_ref( ASSERT.predicate(_get_pod_dir_path(pod_id), Path.is_dir), ASSERT.not_predicate(target_path, g1.files.lexists), ) return 0
def build_image(metadata, make_rootfs, output_path): ASSERT.not_predicate(output_path, g1.files.lexists) with tempfile.TemporaryDirectory( dir=output_path.parent, prefix=output_path.name + '-', ) as temp_output_dir_path: temp_output_dir_path = Path(temp_output_dir_path) _write_metadata(metadata, temp_output_dir_path) make_rootfs(get_rootfs_path(temp_output_dir_path)) _setup_image_dir(temp_output_dir_path) scripts.run([ 'tar', '--create', *('--file', output_path), '--gzip', *('--directory', temp_output_dir_path), _METADATA, _ROOTFS, ])
def cmd_install( *, image_id=None, name=None, version=None, tag=None, xar_name, exec_relpath, ): oses.assert_root_privilege() with _locking_top_dirs(): if image_id is None: image_id = ASSERT.not_none( images.find_id(name=name, version=version, tag=tag)) _install_xar_dir( _get_xar_dir_path(xar_name), image_id, ASSERT.not_predicate(exec_relpath, Path.is_absolute), )
def __post_init__(self): validate_xar_label(self.label) validate_xar_version(self.version) ASSERT.not_xor(self.exec_relpath is None, self.image is None) if self.exec_relpath is not None: ASSERT.not_predicate(Path(self.exec_relpath), Path.is_absolute)
def install(self, bundle_dir, target_ops_dir_path): ASSERT.isinstance(bundle_dir, PodBundleDir) log_args = (bundle_dir.label, bundle_dir.version) # Make metadata first so that uninstall may roll back properly. LOG.debug('pods install: metadata: %s %s', *log_args) metadata, groups = self._make_metadata(bundle_dir.deploy_instruction) jsons.dump_dataobject(metadata, self.metadata_path) bases.set_file_attrs(self.metadata_path) # Sanity check of the just-written metadata file. ASSERT.equal(self.label, bundle_dir.label) ASSERT.equal(self.version, bundle_dir.version) ASSERT.equal(self.metadata, metadata) LOG.debug('pods install: pod ids: %s %s: %s', *log_args, ', '.join(groups)) LOG.debug('pods install: volumes: %s %s', *log_args) bases.make_dir(self.volumes_dir_path) for volume, volume_path in bundle_dir.iter_volumes(): volume_dir_path = self.volumes_dir_path / volume.name LOG.debug('pods: extract: %s -> %s', volume_path, volume_dir_path) bases.make_dir(ASSERT.not_predicate(volume_dir_path, Path.exists)) scripts.tar_extract( volume_path, directory=volume_dir_path, extra_args=( '--same-owner', '--same-permissions', ), ) LOG.debug('pods install: images: %s %s', *log_args) for _, image_path in bundle_dir.iter_images(): ctr_scripts.ctr_import_image(image_path) LOG.debug('pods install: tokens: %s %s', *log_args) assignments = {} with tokens.make_tokens_database().writing() as active_tokens: for pod_id in groups: assignments[pod_id] = { alias: active_tokens.assign(token_name, pod_id, alias) for alias, token_name in bundle_dir.deploy_instruction.token_names.items() } envs = ops_envs.load() LOG.debug('pods install: prepare pods: %s %s', *log_args) bases.make_dir(self.refs_dir_path) for pod_id, group in groups.items(): pod_config = self._make_pod_config( bundle_dir.deploy_instruction, target_ops_dir_path, systemds.make_envs( pod_id, self.metadata, group.envs, envs, assignments[pod_id], ), ) with tempfile.NamedTemporaryFile() as config_tempfile: config_path = Path(config_tempfile.name) jsons.dump_dataobject(pod_config, config_path) ctr_scripts.ctr_prepare_pod(pod_id, config_path) ctr_scripts.ctr_add_ref_to_pod(pod_id, self.refs_dir_path / pod_id) LOG.debug('pods install: systemd units: %s %s', *log_args) units = {(pod_id, unit.name): unit for pod_id, group in groups.items() for unit in group.units} for config in self.metadata.systemd_unit_configs: systemds.install( config, self.metadata, groups[config.pod_id], units[config.pod_id, config.name], envs, assignments[config.pod_id], ) systemds.daemon_reload() return True
def cmd_setup_base_rootfs(image_rootfs_path, prune_stash_path): """Set up base rootfs. Changes from 18.04 to 20.04. * /lib is now a symlink to /usr/lib. * system.slice has been removed: https://github.com/systemd/systemd/commit/d8e5a9338278d6602a0c552f01f298771a384798 """ ASSERT.predicate(image_rootfs_path, Path.is_dir) oses.assert_root_privilege() # Remove unneeded files. for dir_relpath in ( 'usr/share/doc', 'usr/share/info', 'usr/share/man', 'var/cache', 'var/lib/apt', 'var/lib/dpkg', ): dir_path = image_rootfs_path / dir_relpath if dir_path.is_dir(): if prune_stash_path: dst_path = ASSERT.not_predicate( prune_stash_path / dir_relpath, g1.files.lexists ) dst_path.mkdir(mode=0o755, parents=True, exist_ok=True) _move_dir_content(dir_path, dst_path) else: _clear_dir_content(dir_path) # Remove certain config files. for path in ( # Remove this so that systemd-nspawn may set the hostname. image_rootfs_path / 'etc/hostname', # systemd-nspawn uses machine-id to link journal. image_rootfs_path / 'etc/machine-id', image_rootfs_path / 'var/lib/dbus/machine-id', # debootstrap seems to copy this file from the build machine, # which is not the host machine that runs this image; so let's # replace this with a generic stub. image_rootfs_path / 'etc/resolv.conf', image_rootfs_path / 'run/systemd/resolve/stub-resolv.conf', ): LOG.info('remove: %s', path) g1.files.remove(path) # Replace certain config files. for path, content in ( (image_rootfs_path / 'etc/default/locale', _LOCALE), (image_rootfs_path / 'etc/resolv.conf', _RESOLV_CONF), (image_rootfs_path / 'etc/systemd/journald.conf', _JOURNALD_CONF), ): LOG.info('replace: %s', path) path.write_text(content) # Remove unneeded unit files. base_units = set(_BASE_UNITS) for unit_dir_path in ( image_rootfs_path / 'etc/systemd/system', image_rootfs_path / 'usr/lib/systemd/system', ): if not unit_dir_path.exists(): continue LOG.info('clean up unit files in: %s', unit_dir_path) for unit_path in unit_dir_path.iterdir(): if unit_path.name in base_units: base_units.remove(unit_path.name) continue # There should have no duplicated units, right? ASSERT.not_in(unit_path.name, _BASE_UNITS) LOG.info('remove: %s', unit_path) g1.files.remove(unit_path) ASSERT.empty(base_units) # Create unit files. for unit_dir_path, unit_files in ( (image_rootfs_path / 'etc/systemd/system', _ETC_UNIT_FILES), (image_rootfs_path / 'usr/lib/systemd/system', _LIB_UNIT_FILES), ): for unit_file in unit_files: ASSERT.predicate(unit_dir_path, Path.is_dir) path = unit_dir_path / unit_file.relpath LOG.info('create: %s', path) if unit_file.kind is _UnitFile.Kinds.DIRECTORY: path.mkdir(mode=0o755) elif unit_file.kind is _UnitFile.Kinds.FILE: path.write_text(unit_file.content) path.chmod(0o644) else: ASSERT.is_(unit_file.kind, _UnitFile.Kinds.SYMLINK) path.symlink_to(unit_file.content) bases.chown_root(path) # Create ``pod-exit`` script and exit status directory. pod_exit_path = image_rootfs_path / 'usr/sbin/pod-exit' LOG.info('create: %s', pod_exit_path) pod_exit_path.write_text(_POD_EXIT) bases.setup_file(pod_exit_path, 0o755, bases.chown_root) bases.make_dir(image_rootfs_path / 'var/lib/pod', 0o755, bases.chown_root) bases.make_dir( image_rootfs_path / 'var/lib/pod/exit-status', 0o755, bases.chown_root )