def generate_sources_list(cfg, release, mirrors, target=None): """ generate_sources_list create a source.list file based on a custom or default template by replacing mirrors and release in the template """ default_mirrors = get_default_mirrors(util.get_architecture(target)) aptsrc = "/etc/apt/sources.list" params = {'RELEASE': release} for k in mirrors: params[k] = mirrors[k] tmpl = cfg.get('sources_list', None) if tmpl is None: LOG.info( "No custom template provided, fall back to modify" "mirrors in %s on the target system", aptsrc) tmpl = util.load_file(paths.target_path(target, aptsrc)) # Strategy if no custom template was provided: # - Only replacing mirrors # - no reason to replace "release" as it is from target anyway # - The less we depend upon, the more stable this is against changes # - warn if expected original content wasn't found tmpl = mirror_to_placeholder(tmpl, default_mirrors['PRIMARY'], "$MIRROR") tmpl = mirror_to_placeholder(tmpl, default_mirrors['SECURITY'], "$SECURITY") orig = paths.target_path(target, aptsrc) if os.path.exists(orig): os.rename(orig, orig + ".curtin.old") rendered = util.render_string(tmpl, params) disabled = disable_suites(cfg.get('disable_suites'), rendered, release) util.write_file(paths.target_path(target, aptsrc), disabled, mode=0o644)
def _apt_source_list(self, cfg, expected): "_apt_source_list - Test rendering from template (generic)" arch = util.get_architecture() # would fail inside the unittest context bpath = "curtin.commands.apt_config." upath = bpath + "util." self.add_patch(upath + "get_architecture", "mockga", return_value=arch) self.add_patch(upath + "write_file", "mockwrite") self.add_patch(bpath + "os.rename", "mockrename") self.add_patch(upath + "load_file", "mockload_file", return_value=MOCKED_APT_SRC_LIST) self.add_patch(bpath + "distro.lsb_release", "mock_lsb_release", return_value={'codename': 'fakerel'}) self.add_patch(bpath + "apply_preserve_sources_list", "mock_apply_preserve_sources_list") apt_config.handle_apt(cfg, TARGET) self.mockga.assert_called_with(TARGET) self.mock_apply_preserve_sources_list.assert_called_with(TARGET) calls = [ call(paths.target_path(TARGET, '/etc/apt/sources.list'), expected, mode=0o644) ] self.mockwrite.assert_has_calls(calls)
def _apt_source_list(cfg, expected): "_apt_source_list - Test rendering from template (generic)" arch = util.get_architecture() # would fail inside the unittest context with mock.patch.object(util, 'get_architecture', return_value=arch) as mockga: with mock.patch.object(util, 'write_file') as mockwrite: # keep it side effect free and avoid permission errors with mock.patch.object(os, 'rename'): # make test independent to executing system with mock.patch.object(util, 'load_file', return_value=MOCKED_APT_SRC_LIST): with mock.patch.object( util, 'lsb_release', return_value={'codename': 'fakerel'}): apt_config.handle_apt(cfg, TARGET) mockga.assert_called_with("/") cloudfile = '/etc/cloud/cloud.cfg.d/curtin-preserve-sources.cfg' cloudconf = yaml.dump({'apt_preserve_sources_list': True}, indent=1) calls = [ call(util.target_path(TARGET, '/etc/apt/sources.list'), expected, mode=0o644), call(util.target_path(TARGET, cloudfile), cloudconf, mode=0o644) ] mockwrite.assert_has_calls(calls)
def _get_default_params(): """ get_default_params Get the most basic default mrror and release info to be used in tests """ params = {} params['RELEASE'] = distro.lsb_release()['codename'] arch = util.get_architecture() params['MIRROR'] = apt_config.get_default_mirrors(arch)["PRIMARY"] return params
def test_mirror(self): model = SubiquityModel('test') mirror_val = 'http://my-mirror' model.mirror.set_mirror(mirror_val) config = model.render('ident') from curtin.commands.apt_config import get_mirror from curtin.util import get_architecture self.assertEqual( get_mirror(config["apt"], "primary", get_architecture()), mirror_val)
def get_default_mirrors(arch=None): """returns the default mirrors for the target. These depend on the architecture, for more see: https://wiki.ubuntu.com/UbuntuDevelopment/PackageArchive#Ports""" if arch is None: arch = util.get_architecture() if arch in PRIMARY_ARCHES: return PRIMARY_ARCH_MIRRORS.copy() if arch in PORTS_ARCHES: return PORTS_MIRRORS.copy() raise ValueError("No default mirror known for arch %s" % arch)
def test_mirror_default(self): """test_mirror_default - Test without defining a mirror""" arch = util.get_architecture() default_mirrors = apt_config.get_default_mirrors(arch) pmir = default_mirrors["PRIMARY"] smir = default_mirrors["SECURITY"] mirrors = apt_config.find_apt_mirror_info({}, arch) self.assertEqual(mirrors['MIRROR'], pmir) self.assertEqual(mirrors['PRIMARY'], pmir) self.assertEqual(mirrors['SECURITY'], smir)
def test_apt_srcl_custom(self): """test_apt_srcl_custom - Test rendering a custom source template""" cfg = yaml.safe_load(YAML_TEXT_CUSTOM_SL) target = self.new_root arch = util.get_architecture() # would fail inside the unittest context with mock.patch.object(util, 'get_architecture', return_value=arch): with mock.patch.object(distro, 'lsb_release', return_value={'codename': 'fakerel'}): apt_config.handle_apt(cfg, target) self.assertEqual( EXPECTED_CONVERTED_CONTENT, util.load_file(paths.target_path(target, "/etc/apt/sources.list")))
def generate_sources_list(cfg, release, mirrors, target=None): """ generate_sources_list create a source.list file based on a custom or default template by replacing mirrors and release in the template """ default_mirrors = get_default_mirrors(util.get_architecture(target)) aptsrc = "/etc/apt/sources.list" params = {'RELEASE': release} for k in mirrors: params[k] = mirrors[k] tmpl = cfg.get('sources_list', None) if tmpl is None: LOG.info( "No custom template provided, fall back to modify" "mirrors in %s on the target system", aptsrc) tmpl = util.load_file(util.target_path(target, aptsrc)) # Strategy if no custom template was provided: # - Only replacing mirrors # - no reason to replace "release" as it is from target anyway # - The less we depend upon, the more stable this is against changes # - warn if expected original content wasn't found tmpl = mirror_to_placeholder(tmpl, default_mirrors['PRIMARY'], "$MIRROR") tmpl = mirror_to_placeholder(tmpl, default_mirrors['SECURITY'], "$SECURITY") orig = util.target_path(target, aptsrc) if os.path.exists(orig): os.rename(orig, orig + ".curtin.old") rendered = util.render_string(tmpl, params) disabled = disable_suites(cfg.get('disable_suites'), rendered, release) util.write_file(util.target_path(target, aptsrc), disabled, mode=0o644) # protect the just generated sources.list from cloud-init cloudfile = "/etc/cloud/cloud.cfg.d/curtin-preserve-sources.cfg" # this has to work with older cloud-init as well, so use old key cloudconf = yaml.dump({'apt_preserve_sources_list': True}, indent=1) try: util.write_file(util.target_path(target, cloudfile), cloudconf, mode=0o644) except IOError: LOG.exception("Failed to protect source.list from cloud-init in (%s)", util.target_path(target, cloudfile)) raise
def get_flash_kernel_pkgs(arch=None, uefi=None): if arch is None: arch = util.get_architecture() if uefi is None: uefi = util.is_uefi_bootable() if uefi: return None if not arch.startswith('arm'): return None try: fk_packages, _ = util.subp(['list-flash-kernel-packages'], capture=True) return fk_packages except util.ProcessExecutionError: # Ignore errors return None
def test_mirror_arches_sysdefault(self): """test_mirror_arches - Test arches falling back to sys default""" arch = util.get_architecture() default_mirrors = apt_config.get_default_mirrors(arch) pmir = default_mirrors["PRIMARY"] smir = default_mirrors["SECURITY"] cfg = {"primary": [{'arches': ["thisarchdoesntexist_64"], "uri": "notthis"}, {'arches': ["thisarchdoesntexist"], "uri": "notthiseither"}], "security": [{'arches': ["thisarchdoesntexist"], "uri": "nothat"}, {'arches': ["thisarchdoesntexist_64"], "uri": "nothateither"}]} mirrors = apt_config.find_apt_mirror_info(cfg, arch) self.assertEqual(mirrors['MIRROR'], pmir) self.assertEqual(mirrors['PRIMARY'], pmir) self.assertEqual(mirrors['SECURITY'], smir)
def handle_apt(cfg, target=None): """ handle_apt process the config for apt_config. This can be called from curthooks if a global apt config was provided or via the "apt" standalone command. """ release = distro.lsb_release(target=target)['codename'] arch = util.get_architecture(target) mirrors = find_apt_mirror_info(cfg, arch) LOG.debug("Apt Mirror info: %s", mirrors) apply_debconf_selections(cfg, target) if not config.value_as_boolean(cfg.get('preserve_sources_list', True)): generate_sources_list(cfg, release, mirrors, target) apply_preserve_sources_list(target) rename_apt_lists(mirrors, target) try: apply_apt_proxy_config(cfg, target + APT_PROXY_FN, target + APT_CONFIG_FN) except (IOError, OSError): LOG.exception("Failed to apply proxy or apt config info:") # Process 'apt_source -> sources {dict}' if 'sources' in cfg: params = mirrors params['RELEASE'] = release params['MIRROR'] = mirrors["MIRROR"] matcher = None matchcfg = cfg.get('add_apt_repo_match', ADD_APT_REPO_MATCH) if matchcfg: matcher = re.compile(matchcfg).search add_apt_sources(cfg['sources'], target, template_params=params, aa_repo_match=matcher)
def rename_apt_lists(new_mirrors, target=None): """rename_apt_lists - rename apt lists to preserve old cache data""" default_mirrors = get_default_mirrors(util.get_architecture(target)) pre = paths.target_path(target, APT_LISTS) for (name, omirror) in default_mirrors.items(): nmirror = new_mirrors.get(name) if not nmirror: continue oprefix = pre + os.path.sep + mirrorurl_to_apt_fileprefix(omirror) nprefix = pre + os.path.sep + mirrorurl_to_apt_fileprefix(nmirror) if oprefix == nprefix: continue olen = len(oprefix) for filename in glob.glob("%s_*" % oprefix): newname = "%s%s" % (nprefix, filename[olen:]) LOG.debug("Renaming apt list %s to %s", filename, newname) try: os.rename(filename, newname) except OSError: # since this is a best effort task, warn with but don't fail LOG.warn("Failed to rename apt list:", exc_info=True)
def find_apt_mirror_info(cfg, arch=None): """find_apt_mirror_info find an apt_mirror given the cfg provided. It can check for separate config of primary and security mirrors If only primary is given security is assumed to be equal to primary If the generic apt_mirror is given that is defining for both """ if arch is None: arch = util.get_architecture() LOG.debug("got arch for mirror selection: %s", arch) pmirror = get_mirror(cfg, "primary", arch) LOG.debug("got primary mirror: %s", pmirror) smirror = get_mirror(cfg, "security", arch) LOG.debug("got security mirror: %s", smirror) # Note: curtin has no cloud-datasource fallback mirror_info = update_mirror_info(pmirror, smirror, arch) # less complex replacements use only MIRROR, derive from primary mirror_info["MIRROR"] = mirror_info["PRIMARY"] return mirror_info
class TestOldAptAbs(VMBaseClass): """TestOldAptAbs - Basic tests for old apt features of curtin""" interactive = False extra_disks = [] fstab_expected = {} disk_to_check = [] collect_scripts = VMBaseClass.collect_scripts + [ textwrap.dedent(""" cd OUTPUT_COLLECT_D cat /etc/fstab > fstab ls /dev/disk/by-dname > ls_dname find /etc/network/interfaces.d > find_interfacesd grep -A 3 "Name: debconf/priority" /var/cache/debconf/config.dat > debc apt-config dump > aptconf cp /etc/apt/apt.conf.d/90curtin-aptproxy . cp /etc/apt/sources.list . cp /etc/cloud/cloud.cfg.d/curtin-preserve-sources.cfg . cp /etc/cloud/cloud.cfg.d/90_dpkg.cfg . """) ] arch = util.get_architecture() if arch in ['amd64', 'i386']: conf_file = "examples/tests/test_old_apt_features.yaml" exp_mirror = "http://us.archive.ubuntu.com/ubuntu" exp_secmirror = "http://archive.ubuntu.com/ubuntu" if arch in ['s390x', 'arm64', 'armhf', 'powerpc', 'ppc64el']: conf_file = "examples/tests/test_old_apt_features_ports.yaml" exp_mirror = "http://ports.ubuntu.com/ubuntu-ports" exp_secmirror = "http://ports.ubuntu.com/ubuntu-ports" def test_output_files_exist(self): """test_output_files_exist - Check if all output files exist""" self.output_files_exist([ "debc", "aptconf", "sources.list", "curtin-preserve-sources.cfg", "90_dpkg.cfg" ]) def test_preserve_source(self): """test_preserve_source - no clobbering sources.list by cloud-init""" self.check_file_regex("curtin-preserve-sources.cfg", "apt_preserve_sources_list.*true") def test_debconf(self): """test_debconf - Check if debconf is in place""" self.check_file_strippedline("debc", "Value: low") def test_aptconf(self): """test_aptconf - Check if apt conf for proxy is in place""" # this gets configured by tools/launch and get_apt_proxy in # tests/vmtests/__init__.py, so compare with those, if set if not self.proxy: self.skipTest('Host apt-proxy not set') self.output_files_exist(["90curtin-aptproxy"]) rproxy = r"Acquire::http::Proxy \"" + re.escape(self.proxy) + r"\";" self.check_file_regex("aptconf", rproxy) self.check_file_regex("90curtin-aptproxy", rproxy) def test_mirrors(self): """test_mirrors - Check for mirrors placed in source.list""" lines = self.load_collect_file('sources.list').splitlines() data = sources_to_dict(lines) self.assertIn(self.exp_secmirror, data) self.assertIn(self.exp_mirror, data) components = sorted(["main", "restricted", "universe", "multiverse"]) self.assertEqual( components, sorted(data[self.exp_secmirror]['%s-security' % self.release])) self.assertEqual(components, sorted(data[self.exp_mirror][self.release])) def test_cloudinit_seeded(self): content = self.load_collect_file("90_dpkg.cfg") # not the greatest test, but we seeded NoCloud as the only datasource # in examples/tests/test_old_apt_features.yaml. Just verify that # there are no others there. self.assertIn("nocloud", content.lower()) self.assertNotIn("maas", content.lower())
def setup_grub(cfg, target): # target is the path to the mounted filesystem # FIXME: these methods need moving to curtin.block # and using them from there rather than commands.block_meta from curtin.commands.block_meta import (extract_storage_ordered_dict, get_path_to_storage_volume) grubcfg = cfg.get('grub', {}) # copy legacy top level name if 'grub_install_devices' in cfg and 'install_devices' not in grubcfg: grubcfg['install_devices'] = cfg['grub_install_devices'] LOG.debug("setup grub on target %s", target) # if there is storage config, look for devices tagged with 'grub_device' storage_cfg_odict = None try: storage_cfg_odict = extract_storage_ordered_dict(cfg) except ValueError: pass if storage_cfg_odict: storage_grub_devices = [] for item_id, item in storage_cfg_odict.items(): if not item.get('grub_device'): continue LOG.debug("checking: %s", item) storage_grub_devices.append( get_path_to_storage_volume(item_id, storage_cfg_odict)) if len(storage_grub_devices) > 0: grubcfg['install_devices'] = storage_grub_devices LOG.debug("install_devices: %s", grubcfg.get('install_devices')) if 'install_devices' in grubcfg: instdevs = grubcfg.get('install_devices') if isinstance(instdevs, str): instdevs = [instdevs] if instdevs is None: LOG.debug("grub installation disabled by config") else: # If there were no install_devices found then we try to do the right # thing. That right thing is basically installing on all block # devices that are mounted. On powerpc, though it means finding PrEP # partitions. devs = block.get_devices_for_mp(target) blockdevs = set() for maybepart in devs: try: (blockdev, part) = block.get_blockdev_for_partition(maybepart) blockdevs.add(blockdev) except ValueError: # if there is no syspath for this device such as a lvm # or raid device, then a ValueError is raised here. LOG.debug("failed to find block device for %s", maybepart) if platform.machine().startswith("ppc64"): # assume we want partitions that are 4100 (PReP). The snippet here # just prints the partition number partitions of that type. shnip = textwrap.dedent(""" export LANG=C; for d in "$@"; do sgdisk "$d" --print | awk '$6 == prep { print d $1 }' "d=$d" prep=4100 done """) try: out, err = util.subp(['sh', '-c', shnip, '--'] + list(blockdevs), capture=True) instdevs = str(out).splitlines() if not instdevs: LOG.warn("No power grub target partitions found!") instdevs = None except util.ProcessExecutionError as e: LOG.warn("Failed to find power grub partitions: %s", e) instdevs = None else: instdevs = list(blockdevs) # UEFI requires grub-efi-{arch}. If a signed version of that package # exists then it will be installed. if util.is_uefi_bootable(): arch = util.get_architecture() pkgs = ['grub-efi-%s' % arch] # Architecture might support a signed UEFI loader uefi_pkg_signed = 'grub-efi-%s-signed' % arch if util.has_pkg_available(uefi_pkg_signed): pkgs.append(uefi_pkg_signed) # AMD64 has shim-signed for SecureBoot support if arch == "amd64": pkgs.append("shim-signed") # Install the UEFI packages needed for the architecture util.install_packages(pkgs, target=target) env = os.environ.copy() replace_default = grubcfg.get('replace_linux_default', True) if str(replace_default).lower() in ("0", "false"): env['REPLACE_GRUB_LINUX_DEFAULT'] = "0" else: env['REPLACE_GRUB_LINUX_DEFAULT'] = "1" if instdevs: instdevs = [block.get_dev_name_entry(i)[1] for i in instdevs] else: instdevs = ["none"] if util.is_uefi_bootable() and grubcfg.get('update_nvram', True): uefi_remove_old_loaders(grubcfg, target) LOG.debug("installing grub to %s [replace_default=%s]", instdevs, replace_default) with util.ChrootableTarget(target): args = ['install-grub'] if util.is_uefi_bootable(): args.append("--uefi") if grubcfg.get('update_nvram', True): LOG.debug("GRUB UEFI enabling NVRAM updates") args.append("--update-nvram") else: LOG.debug("NOT enabling UEFI nvram updates") LOG.debug("Target system may not boot") args.append(target) # capture stdout and stderr joined. join_stdout_err = ['sh', '-c', 'exec "$0" "$@" 2>&1'] out, _err = util.subp(join_stdout_err + args + instdevs, env=env, capture=True) LOG.debug("%s\n%s\n", args, out) if util.is_uefi_bootable() and grubcfg.get('update_nvram', True): uefi_reorder_loaders(grubcfg, target)
REQUIRED_KERNEL_MODULES = [ # kmod name ] if lsb_release()['codename'] == "precise": REQUIRED_IMPORTS.append(('import oauth.oauth', 'python-oauth', None), ) else: REQUIRED_IMPORTS.append( ('import oauthlib.oauth1', 'python-oauthlib', 'python3-oauthlib'), ) # zfs is > trusty only if not lsb_release()['codename'] in ["precise", "trusty"]: REQUIRED_EXECUTABLES.append(('zfs', 'zfsutils-linux')) REQUIRED_KERNEL_MODULES.append('zfs') if not is_uefi_bootable() and 'arm' in get_architecture(): REQUIRED_EXECUTABLES.append(('flash-kernel', 'flash-kernel')) class MissingDeps(Exception): def __init__(self, message, deps): self.message = message if isinstance(deps, str) or deps is None: deps = [deps] self.deps = [d for d in deps if d is not None] self.fatal = None in deps def __str__(self): if self.fatal: if not len(self.deps): return self.message + " Unresolvable."
def __init__(self): self.config = copy.deepcopy(DEFAULT) self.architecture = get_architecture() self.default_mirror = self.get_mirror()