def test_system_upgrade_redhat_dnf(self, m_which, m_subp): """system_upgrade osfamily=redhat calls run_yum_command mode=upgrade""" osfamily = distro.DISTROS.redhat target = 'mytarget' mode = 'upgrade' m_which.return_value = '/usr/bin/dnf' pkglist = [] expected_calls = [ mock.call([ 'dnf', '--assumeyes', '--quiet', mode, '--downloadonly', '--setopt=keepcache=1' ] + pkglist, env=None, retries=[1] * 10, target=paths.target_path(target)), mock.call(['dnf', '--assumeyes', '--quiet', mode, '--cacheonly'] + pkglist, env=None, target=paths.target_path(target)) ] # call system_upgrade via osfamily; note that we expect the same calls # call system_upgrade via osfamily; note that we expect the same calls # call yum_install through run_yum_command distro.run_yum_command(mode, pkglist, target=target) m_subp.assert_has_calls(expected_calls) # call system_upgrade via osfamily; note that we expect the same calls # but to prevent a false positive we clear m_subp's call stack. m_subp.reset_mock() self.assertFalse(m_subp.called) distro.system_upgrade(target=target, osfamily=osfamily) m_subp.assert_has_calls(expected_calls)
def write_grub_config(target, grubcfg, grub_conf, new_params): replace_default = config.value_as_boolean( grubcfg.get('replace_linux_default', True)) if replace_default: replace_grub_cmdline_linux_default(target, new_params) probe_os = config.value_as_boolean( grubcfg.get('probe_additional_os', False)) if not probe_os: probe_content = [ ('# Curtin disable grub os prober that might find other ' 'OS installs.'), 'GRUB_DISABLE_OS_PROBER="true"', '' ] util.write_file(target_path(target, grub_conf), "\n".join(probe_content), omode='a+') # if terminal is present in config, but unset, then don't grub_terminal = grubcfg.get('terminal', 'console') if not isinstance(grub_terminal, str): raise ValueError("Unexpected value %s for 'terminal'. " "Value must be a string" % grub_terminal) if not grub_terminal.lower() == "unmodified": terminal_content = [ '# Curtin configured GRUB_TERMINAL value', 'GRUB_TERMINAL="%s"' % grub_terminal ] util.write_file(target_path(target, grub_conf), "\n".join(terminal_content), omode='a+')
def test_system_upgrade_debian(self, m_subp, m_which, m_apt_update, m_env): """system_upgrade osfamily=debian calls run_apt_command mode=upgrade""" osfamily = distro.DISTROS.debian target = 'mytarget' m_env.copy.return_value = {} m_which.return_value = None env = {'DEBIAN_FRONTEND': 'noninteractive'} pkglist = [] apt_base = [ 'apt-get', '--quiet', '--assume-yes', '--option=Dpkg::options::=--force-unsafe-io', '--option=Dpkg::Options::=--force-confold' ] apt_cmd = apt_base + ['dist-upgrade'] + pkglist auto_remove = apt_base + ['autoremove'] expected_calls = [ mock.call(apt_cmd, env=env, target=paths.target_path(target)), mock.call(auto_remove, env=env, target=paths.target_path(target)), ] which_calls = [mock.call('eatmydata', target=target)] apt_update_calls = [ mock.call(target, env=env, comment=' '.join(apt_cmd)) ] distro.system_upgrade(target=target, osfamily=osfamily) m_which.assert_has_calls(which_calls) m_apt_update.assert_has_calls(apt_update_calls) m_subp.assert_has_calls(expected_calls)
def generate_sources_list(cfg, release, mirrors, target=None): """ generate_sources_list create a source.list file based on a custom or default template by replacing mirrors and release in the template """ default_mirrors = get_default_mirrors(distro.get_architecture(target)) aptsrc = "/etc/apt/sources.list" params = {'RELEASE': release} for k in mirrors: params[k] = mirrors[k] tmpl = cfg.get('sources_list', None) if tmpl is None: LOG.info( "No custom template provided, fall back to modify" "mirrors in %s on the target system", aptsrc) tmpl = util.load_file(paths.target_path(target, aptsrc)) # Strategy if no custom template was provided: # - Only replacing mirrors # - no reason to replace "release" as it is from target anyway # - The less we depend upon, the more stable this is against changes # - warn if expected original content wasn't found tmpl = mirror_to_placeholder(tmpl, default_mirrors['PRIMARY'], "$MIRROR") tmpl = mirror_to_placeholder(tmpl, default_mirrors['SECURITY'], "$SECURITY") orig = paths.target_path(target, aptsrc) if os.path.exists(orig): os.rename(orig, orig + ".curtin.old") rendered = util.render_string(tmpl, params) disabled = disable_suites(cfg.get('disable_suites'), rendered, release) util.write_file(paths.target_path(target, aptsrc), disabled, mode=0o644)
def setUp(self): super(TestWriteGrubConfig, self).setUp() self.target = self.tmp_dir() self.grubdefault = "/etc/default/grub" self.grubconf = "/etc/default/grub.d/50-curtin.cfg" self.target_grubdefault = paths.target_path(self.target, self.grubdefault) self.target_grubconf = paths.target_path(self.target, self.grubconf)
def prepare_grub_dir(target, grub_cfg): util.ensure_dir(os.path.dirname(target_path(target, grub_cfg))) # LP: #1179940 . The 50-cloudig-settings.cfg file is written by the cloud # images build and defines/override some settings. Disable it. ci_cfg = target_path( target, os.path.join(os.path.dirname(grub_cfg), "50-cloudimg-settings.cfg")) if os.path.exists(ci_cfg): LOG.debug('grub: moved %s out of the way', ci_cfg) shutil.move(ci_cfg, ci_cfg + '.disabled')
def disconnect_target_disks(target_root_path=None): target_nodes_path = paths.target_path(target_root_path, '/etc/iscsi/nodes') fails = [] if os.path.isdir(target_nodes_path): for target in os.listdir(target_nodes_path): if target not in iscsiadm_sessions(): LOG.debug('iscsi target %s not active, skipping', target) continue # conn is "host,port,lun" for conn in os.listdir( os.path.sep.join([target_nodes_path, target])): host, port, _ = conn.split(',') try: util.subp(['sync']) iscsiadm_logout(target, '%s:%s' % (host, port)) except util.ProcessExecutionError as e: fails.append(target) LOG.warn("Unable to logout of iSCSI target %s: %s", target, e) else: LOG.warning('Skipping disconnect: failed to find iscsi nodes path: %s', target_nodes_path) if fails: raise RuntimeError( "Unable to logout of iSCSI targets: %s" % ', '.join(fails))
def _apt_source_list(self, cfg, expected): "_apt_source_list - Test rendering from template (generic)" arch = distro.get_architecture() # would fail inside the unittest context bpath = "curtin.commands.apt_config." upath = bpath + "util." dpath = bpath + 'distro.' self.add_patch(dpath + "get_architecture", "mockga", return_value=arch) self.add_patch(upath + "write_file", "mockwrite") self.add_patch(bpath + "os.rename", "mockrename") self.add_patch(upath + "load_file", "mockload_file", return_value=MOCKED_APT_SRC_LIST) self.add_patch(bpath + "distro.lsb_release", "mock_lsb_release", return_value={'codename': 'fakerel'}) self.add_patch(bpath + "apply_preserve_sources_list", "mock_apply_preserve_sources_list") apt_config.handle_apt(cfg, TARGET) self.mockga.assert_called_with(TARGET) self.mock_apply_preserve_sources_list.assert_called_with(TARGET) calls = [ call(paths.target_path(TARGET, '/etc/apt/sources.list'), expected, mode=0o644) ] self.mockwrite.assert_has_calls(calls)
def apply_preserve_sources_list(target): # protect the just generated sources.list from cloud-init cloudfile = "/etc/cloud/cloud.cfg.d/curtin-preserve-sources.cfg" target_ver = distro.get_package_version('cloud-init', target=target) if not target_ver: LOG.info( "Attempt to read cloud-init version from target returned " "'%s', not writing preserve_sources_list config.", target_ver) return cfg = {'apt': {'preserve_sources_list': True}} if target_ver['major'] < 1: # anything cloud-init 0.X.X will get the old config key. cfg = {'apt_preserve_sources_list': True} try: util.write_file(paths.target_path(target, cloudfile), config.dump_config(cfg), mode=0o644) LOG.debug("Set preserve_sources_list to True in %s with: %s", cloudfile, cfg) except IOError: LOG.exception( "Failed to protect /etc/apt/sources.list from cloud-init in '%s'", cloudfile) raise
def in_target_main(args): if args.target is not None: target = args.target else: state = util.load_command_environment() target = state['target'] if args.target is None: sys.stderr.write("Unable to find target. " "Use --target or set TARGET_MOUNT_POINT\n") sys.exit(2) daemons = args.allow_daemons if paths.target_path(args.target) == "/": sys.stderr.write("WARN: Target is /, daemons are allowed.\n") daemons = True cmd = args.command_args with util.ChrootableTarget(target, allow_daemons=daemons) as chroot: exit = 0 if not args.interactive: try: chroot.subp(cmd, capture=args.capture) except util.ProcessExecutionError as e: exit = e.exit_code else: if chroot.target != "/": cmd = ["chroot", chroot.target] + args.command_args # in python 3.4 pty.spawn started returning a value. # There, it is the status from os.waitpid. From testing (py3.6) # that seemse to be exit_code * 256. ret = pty.spawn(cmd) # pylint: disable=E1111 if ret is not None: exit = int(ret / 256) sys.exit(exit)
def _maybe_remove_legacy_eth0(target, path="etc/network/interfaces.d/eth0.cfg"): """Ubuntu cloud images previously included a 'eth0.cfg' that had hard coded content. That file would interfere with the rendered configuration if it was present. if the file does not exist do nothing. If the file exists: - with known content, remove it and warn - with unknown content, leave it and warn """ cfg = paths.target_path(target, path=path) if not os.path.exists(cfg): LOG.warn('Failed to find legacy network conf file %s', cfg) return bmsg = "Dynamic networking config may not apply." try: contents = util.load_file(cfg) known_contents = ["auto eth0", "iface eth0 inet dhcp"] lines = [ f.strip() for f in contents.splitlines() if not f.startswith("#") ] if lines == known_contents: util.del_file(cfg) msg = "removed %s with known contents" % cfg else: msg = (bmsg + " '%s' exists with user configured content." % cfg) except Exception: msg = bmsg + " %s exists, but could not be read." % cfg LOG.exception(msg) raise LOG.warn(msg)
def clean_cloud_init(target): """clean out any local cloud-init config""" flist = glob.glob( paths.target_path(target, "/etc/cloud/cloud.cfg.d/*dpkg*")) LOG.debug("cleaning cloud-init config from: %s", flist) for dpkg_cfg in flist: os.unlink(dpkg_cfg)
def test_skip_rename_resolvconf_gone(self, m_rename): self.m_shutil.copy.side_effect = self.mycopy self.m_shutil.rmtree.side_effect = self.mydel with util.ChrootableTarget(self.target): tp = paths.target_path(self.target, path='/etc/resolv.conf') target_conf = util.load_file(tp) self.assertEqual(self.host_content, target_conf) self.assertEqual(0, m_rename.call_count)
def setUp(self): super(TestGetArchitecture, self).setUp() self.target = paths.target_path('mytarget') self.add_patch('curtin.util.subp', 'm_subp') self.add_patch('curtin.distro.get_osfamily', 'm_get_osfamily') self.add_patch('curtin.distro.dpkg_get_architecture', 'm_dpkg_get_arch') self.add_patch('curtin.distro.rpm_get_architecture', 'm_rpm_get_arch') self.m_get_osfamily.return_value = distro.DISTROS.debian
def test_chrootable_target_renames_and_copies_resolvconf_if_symlink(self): target_rconf = os.path.join(self.target, 'etc/resolv.conf') os.symlink('../run/foobar/wark.conf', target_rconf) self.m_shutil.copy.side_effect = self.mycopy self.m_shutil.rmtree.side_effect = self.mydel with util.ChrootableTarget(self.target): target_conf = util.load_file( paths.target_path(self.target, path='/etc/resolv.conf')) self.assertEqual(self.host_content, target_conf)
def _subp_wrap_popen(self, cmd, kwargs, stdout=b'', stderr=b'', returncodes=None): # mocks the subprocess.Popen as expected from subp # checks that subp returned the output of 'communicate' and # returns the (args, kwargs) that Popen() was called with. # returncodes is a list to cover, one for each expected call if returncodes is None: returncodes = [0] capture = kwargs.get('capture') mreturncodes = mock.PropertyMock(side_effect=iter(returncodes)) with mock.patch("curtin.util.subprocess.Popen") as m_popen: sp = mock.Mock() m_popen.return_value = sp if capture: sp.communicate.return_value = (stdout, stderr) else: sp.communicate.return_value = (None, None) type(sp).returncode = mreturncodes ret = util.subp(cmd, **kwargs) # popen may be called once or > 1 for retries, but must be called. self.assertTrue(m_popen.called) # communicate() needs to have been called. self.assertTrue(sp.communicate.called) if capture: # capture response is decoded if decode is not False decode = kwargs.get('decode', "replace") if decode is False: self.assertEqual(stdout.decode(stdout, stderr), ret) else: self.assertEqual((stdout.decode(errors=decode), stderr.decode(errors=decode)), ret) else: # if capture is false, then return is None, None self.assertEqual((None, None), ret) # if target is not provided or is /, chroot should not be used calls = m_popen.call_args_list popen_args, popen_kwargs = calls[-1] target = paths.target_path(kwargs.get('target', None)) unshcmd = self.mock_get_unshare_pid_args.return_value if target == "/": self.assertEqual(unshcmd + list(cmd), popen_args[0]) else: self.assertEqual(unshcmd + ['chroot', target] + list(cmd), popen_args[0]) return calls
def test_chrootable_target_renames_and_copies_resolvconf(self): content = "target_resolvconf" util.write_file(os.path.join(self.target, 'etc/resolv.conf'), content) self.m_shutil.copy.side_effect = self.mycopy self.m_shutil.rmtree.side_effect = self.mydel with util.ChrootableTarget(self.target): target_conf = util.load_file( paths.target_path(self.target, path='/etc/resolv.conf')) self.assertEqual(self.host_content, target_conf)
def test_apply_ipv6_mtu_hook(self, mock_write): target = 'mytarget' prehookfn = 'if-pre-up.d/mtuipv6' posthookfn = 'if-up.d/mtuipv6' mode = 0o755 apply_net._patch_ifupdown_ipv6_mtu_hook(target, prehookfn=prehookfn, posthookfn=posthookfn) precfg = paths.target_path(target, path=prehookfn) postcfg = paths.target_path(target, path=posthookfn) precontents = apply_net.IFUPDOWN_IPV6_MTU_PRE_HOOK postcontents = apply_net.IFUPDOWN_IPV6_MTU_POST_HOOK hook_calls = [ call(precfg, precontents, mode=mode), call(postcfg, postcontents, mode=mode), ] mock_write.assert_has_calls(hook_calls)
def get_grub_install_command(uefi, distroinfo, target): grub_install_cmd = 'grub-install' if distroinfo.family == distro.DISTROS.debian: # prefer grub-multi-install if present if uefi and os.path.exists(target_path(target, GRUB_MULTI_INSTALL)): grub_install_cmd = GRUB_MULTI_INSTALL elif distroinfo.family == distro.DISTROS.redhat: grub_install_cmd = 'grub2-install' LOG.debug('Using grub install command: %s', grub_install_cmd) return grub_install_cmd
def test_disable_ipv6_priv_extentions_notfound(self, mock_ospath, mock_load): target = 'mytarget' path = 'foo.conf' mock_ospath.exists.return_value = False apply_net._disable_ipv6_privacy_extensions(target, path=path) # source file not found cfg = paths.target_path(target, path) mock_ospath.exists.assert_called_with(cfg) self.assertEqual(0, mock_load.call_count)
def find_efi_loader(target, bootid): efi_path = '/boot/efi/EFI' possible_loaders = [ os.path.join(efi_path, bootid, 'shimx64.efi'), os.path.join(efi_path, 'BOOT', 'BOOTX64.EFI'), os.path.join(efi_path, bootid, 'grubx64.efi'), ] for loader in possible_loaders: tloader = target_path(target, path=loader) if os.path.exists(tloader): LOG.debug('find_efi_loader: found %s', loader) return loader return None
def test_dnf_install(self, m_which, m_subp): pkglist = ['foobar', 'wark'] target = 'mytarget' mode = 'install' m_which.return_value = '/usr/bin/dnf' expected_calls = [ mock.call([ 'dnf', '--assumeyes', '--quiet', 'install', '--downloadonly', '--setopt=keepcache=1' ] + pkglist, env=None, retries=[1] * 10, target=paths.target_path(target)), mock.call( ['dnf', '--assumeyes', '--quiet', 'install', '--cacheonly'] + pkglist, env=None, target=paths.target_path(target)) ] # call yum_install directly self.assertFalse(m_subp.called) distro.yum_install(mode, pkglist, target=target) m_subp.assert_has_calls(expected_calls) # call yum_install through run_yum_command; expect the same calls # so clear m_subp's call stack. m_subp.reset_mock() self.assertFalse(m_subp.called) distro.run_yum_command('install', pkglist, target=target) m_subp.assert_has_calls(expected_calls) # call yum_install through install_packages; expect the same calls # so clear m_subp's call stack. m_subp.reset_mock() self.assertFalse(m_subp.called) osfamily = distro.DISTROS.redhat distro.install_packages(pkglist, osfamily=osfamily, target=target) m_subp.assert_has_calls(expected_calls)
def test_remove_legacy_eth0_notfound(self, mock_ospath, mock_load, mock_del): target = 'mytarget' path = 'eth0.conf' mock_ospath.exists.return_value = False apply_net._maybe_remove_legacy_eth0(target, path) # source file not found cfg = paths.target_path(target, path) mock_ospath.exists.assert_called_with(cfg) self.assertEqual(0, mock_load.call_count) self.assertEqual(0, mock_del.call_count)
def test_remove_legacy_eth0(self, mock_ospath, mock_load, mock_del): target = 'mytarget' path = 'eth0.cfg' cfg = paths.target_path(target, path) legacy_eth0_contents = ( 'auto eth0\n' 'iface eth0 inet dhcp') mock_ospath.exists.return_value = True mock_load.side_effect = [legacy_eth0_contents] apply_net._maybe_remove_legacy_eth0(target, path) mock_del.assert_called_with(cfg)
def add_apt_key_raw(filename, key, target=None): """ actual adding of a key as defined in key argument to the system """ if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in str(key): target_keyfile_ext = '.asc' omode = 'w' key = key.rstrip() else: target_keyfile_ext = '.gpg' omode = 'wb' target_keyfile = paths.target_path( target, "/etc/apt/trusted.gpg.d/" + filename + target_keyfile_ext) util.write_file(target_keyfile, key, mode=0o644, omode=omode) LOG.debug("Adding key to '%s':\n'%s'", target_keyfile, key)
def test_apt_srcl_custom(self): """test_apt_srcl_custom - Test rendering a custom source template""" cfg = yaml.safe_load(YAML_TEXT_CUSTOM_SL) target = self.new_root arch = distro.get_architecture() # would fail inside the unittest context with mock.patch.object(distro, 'get_architecture', return_value=arch): with mock.patch.object(distro, 'lsb_release', return_value={'codename': 'fakerel'}): apt_config.handle_apt(cfg, target) self.assertEqual( EXPECTED_CONVERTED_CONTENT, util.load_file(paths.target_path(target, "/etc/apt/sources.list")))
def get_fstype(target, source): target_source = paths.target_path(target, source) try: out, _ = util.subp(['findmnt', '--noheading', '--target', target_source, '-o', 'FSTYPE'], capture=True) except util.ProcessExecutionError as exc: LOG.warning('Failed to query %s fstype, findmnt returned error: %s', target_source, exc) return None if out: """ $ findmnt --noheading --target /btrfs -o FSTYPE btrfs """ return out.splitlines()[-1] return None
def test_disable_ipv6_priv_extentions(self, mock_write, mock_ospath, mock_load, mock_del): target = 'mytarget' path = 'etc/sysctl.d/10-ipv6-privacy.conf' ipv6_priv_contents = ( 'net.ipv6.conf.all.use_tempaddr = 2\n' 'net.ipv6.conf.default.use_tempaddr = 2') expected_ipv6_priv_contents = '\n'.join( ["# IPv6 Privacy Extensions (RFC 4941)", "# Disabled by curtin", "# net.ipv6.conf.all.use_tempaddr = 2", "# net.ipv6.conf.default.use_tempaddr = 2"]) mock_ospath.exists.return_value = True mock_load.side_effect = [ipv6_priv_contents] apply_net._disable_ipv6_privacy_extensions(target) cfg = paths.target_path(target, path=path) mock_write.assert_called_with(cfg, expected_ipv6_priv_contents)
def _patch_ifupdown_ipv6_mtu_hook(target, prehookfn="etc/network/if-pre-up.d/mtuipv6", posthookfn="etc/network/if-up.d/mtuipv6"): contents = { 'prehook': IFUPDOWN_IPV6_MTU_PRE_HOOK, 'posthook': IFUPDOWN_IPV6_MTU_POST_HOOK, } hookfn = { 'prehook': prehookfn, 'posthook': posthookfn, } for hook in ['prehook', 'posthook']: fn = hookfn[hook] cfg = paths.target_path(target, path=fn) LOG.info('Injecting fix for ipv6 mtu settings: %s', cfg) util.write_file(cfg, contents[hook], mode=0o755)
def _disable_ipv6_privacy_extensions(target, path="etc/sysctl.d/10-ipv6-privacy.conf"): """Ubuntu server image sets a preference to use IPv6 privacy extensions by default; this races with the cloud-image desire to disable them. Resolve this by allowing the cloud-image setting to win. """ LOG.debug('Attempting to remove ipv6 privacy extensions') cfg = paths.target_path(target, path=path) if not os.path.exists(cfg): LOG.warn('Failed to find ipv6 privacy conf file %s', cfg) return bmsg = "Disabling IPv6 privacy extensions config may not apply." try: contents = util.load_file(cfg) known_contents = [ "net.ipv6.conf.all.use_tempaddr = 2", "net.ipv6.conf.default.use_tempaddr = 2" ] lines = [ f.strip() for f in contents.splitlines() if not f.startswith("#") ] if lines == known_contents: LOG.info('Removing ipv6 privacy extension config file: %s', cfg) util.del_file(cfg) msg = "removed %s with known contents" % cfg curtin_contents = '\n'.join([ "# IPv6 Privacy Extensions (RFC 4941)", "# Disabled by curtin", "# net.ipv6.conf.all.use_tempaddr = 2", "# net.ipv6.conf.default.use_tempaddr = 2" ]) util.write_file(cfg, curtin_contents) else: LOG.debug('skipping removal of %s, expected content not found', cfg) LOG.debug("Found content in file %s:\n%s", cfg, lines) LOG.debug("Expected contents in file %s:\n%s", cfg, known_contents) msg = (bmsg + " '%s' exists with user configured content." % cfg) except Exception as e: msg = bmsg + " %s exists, but could not be read. %s" % (cfg, e) LOG.exception(msg) raise