def _apt_source_list(cfg, expected):
        "_apt_source_list - Test rendering from template (generic)"

        arch = util.get_architecture()
        # would fail inside the unittest context
        with mock.patch.object(util, 'get_architecture',
                               return_value=arch) as mockga:
            with mock.patch.object(util, 'write_file') as mockwrite:
                # keep it side effect free and avoid permission errors
                with mock.patch.object(os, 'rename'):
                    # make test independent to executing system
                    with mock.patch.object(util,
                                           'load_file',
                                           return_value=MOCKED_APT_SRC_LIST):
                        with mock.patch.object(
                                util,
                                'lsb_release',
                                return_value={'codename': 'fakerel'}):
                            apt_config.handle_apt(cfg, TARGET)

        mockga.assert_called_with("/")

        cloudfile = '/etc/cloud/cloud.cfg.d/curtin-preserve-sources.cfg'
        cloudconf = yaml.dump({'apt_preserve_sources_list': True}, indent=1)
        calls = [
            call(util.target_path(TARGET, '/etc/apt/sources.list'),
                 expected,
                 mode=0o644),
            call(util.target_path(TARGET, cloudfile), cloudconf, mode=0o644)
        ]
        mockwrite.assert_has_calls(calls)
Exemple #2
0
def disconnect_target_disks(target_root_path=None):
    target_nodes_path = util.target_path(target_root_path, '/etc/iscsi/nodes')
    fails = []
    if os.path.isdir(target_nodes_path):
        for target in os.listdir(target_nodes_path):
            if target not in iscsiadm_sessions():
                LOG.debug('iscsi target %s not active, skipping', target)
                continue
            # conn is "host,port,lun"
            for conn in os.listdir(
                    os.path.sep.join([target_nodes_path, target])):
                host, port, _ = conn.split(',')
                try:
                    util.subp(['sync'])
                    iscsiadm_logout(target, '%s:%s' % (host, port))
                except util.ProcessExecutionError as e:
                    fails.append(target)
                    LOG.warn("Unable to logout of iSCSI target %s: %s", target,
                             e)
    else:
        LOG.warning('Skipping disconnect: failed to find iscsi nodes path: %s',
                    target_nodes_path)
    if fails:
        raise RuntimeError("Unable to logout of iSCSI targets: %s" %
                           ', '.join(fails))
Exemple #3
0
def in_target_main(args):
    if args.target is not None:
        target = args.target
    else:
        state = util.load_command_environment()
        target = state['target']

    if args.target is None:
        sys.stderr.write("Unable to find target.  "
                         "Use --target or set TARGET_MOUNT_POINT\n")
        sys.exit(2)

    daemons = args.allow_daemons
    if util.target_path(args.target) == "/":
        sys.stderr.write("WARN: Target is /, daemons are allowed.\n")
        daemons = True
    cmd = args.command_args
    with util.ChrootableTarget(target, allow_daemons=daemons) as chroot:
        exit = 0
        if not args.interactive:
            try:
                chroot.subp(cmd, capture=args.capture)
            except util.ProcessExecutionError as e:
                exit = e.exit_code
        else:
            if chroot.target != "/":
                cmd = ["chroot", chroot.target] + args.command_args

            # in python 3.4 pty.spawn started returning a value.
            # There, it is the status from os.waitpid.  From testing (py3.6)
            # that seemse to be exit_code * 256.
            ret = pty.spawn(cmd)  # pylint: disable=E1111
            if ret is not None:
                exit = int(ret / 256)
        sys.exit(exit)
Exemple #4
0
def _maybe_remove_legacy_eth0(target,
                              path="etc/network/interfaces.d/eth0.cfg"):
    """Ubuntu cloud images previously included a 'eth0.cfg' that had
       hard coded content.  That file would interfere with the rendered
       configuration if it was present.

       if the file does not exist do nothing.
       If the file exists:
         - with known content, remove it and warn
         - with unknown content, leave it and warn
    """

    cfg = util.target_path(target, path=path)
    if not os.path.exists(cfg):
        LOG.warn('Failed to find legacy network conf file %s', cfg)
        return

    bmsg = "Dynamic networking config may not apply."
    try:
        contents = util.load_file(cfg)
        known_contents = ["auto eth0", "iface eth0 inet dhcp"]
        lines = [
            f.strip() for f in contents.splitlines() if not f.startswith("#")
        ]
        if lines == known_contents:
            util.del_file(cfg)
            msg = "removed %s with known contents" % cfg
        else:
            msg = (bmsg + " '%s' exists with user configured content." % cfg)
    except Exception:
        msg = bmsg + " %s exists, but could not be read." % cfg
        LOG.exception(msg)
        raise

    LOG.warn(msg)
Exemple #5
0
def generate_sources_list(cfg, release, mirrors, target=None):
    """ generate_sources_list
        create a source.list file based on a custom or default template
        by replacing mirrors and release in the template
    """
    default_mirrors = get_default_mirrors(util.get_architecture(target))
    aptsrc = "/etc/apt/sources.list"
    params = {'RELEASE': release}
    for k in mirrors:
        params[k] = mirrors[k]

    tmpl = cfg.get('sources_list', None)
    if tmpl is None:
        LOG.info(
            "No custom template provided, fall back to modify"
            "mirrors in %s on the target system", aptsrc)
        tmpl = util.load_file(util.target_path(target, aptsrc))
        # Strategy if no custom template was provided:
        # - Only replacing mirrors
        # - no reason to replace "release" as it is from target anyway
        # - The less we depend upon, the more stable this is against changes
        # - warn if expected original content wasn't found
        tmpl = mirror_to_placeholder(tmpl, default_mirrors['PRIMARY'],
                                     "$MIRROR")
        tmpl = mirror_to_placeholder(tmpl, default_mirrors['SECURITY'],
                                     "$SECURITY")

    orig = util.target_path(target, aptsrc)
    if os.path.exists(orig):
        os.rename(orig, orig + ".curtin.old")

    rendered = util.render_string(tmpl, params)
    disabled = disable_suites(cfg.get('disable_suites'), rendered, release)
    util.write_file(util.target_path(target, aptsrc), disabled, mode=0o644)

    # protect the just generated sources.list from cloud-init
    cloudfile = "/etc/cloud/cloud.cfg.d/curtin-preserve-sources.cfg"
    # this has to work with older cloud-init as well, so use old key
    cloudconf = yaml.dump({'apt_preserve_sources_list': True}, indent=1)
    try:
        util.write_file(util.target_path(target, cloudfile),
                        cloudconf,
                        mode=0o644)
    except IOError:
        LOG.exception("Failed to protect source.list from cloud-init in (%s)",
                      util.target_path(target, cloudfile))
        raise
Exemple #6
0
def copy_dname_rules(rules_d, target):
    if not rules_d:
        LOG.warn("no udev rules directory to copy")
        return
    target_rules_dir = util.target_path(target, "etc/udev/rules.d")
    for rule in os.listdir(rules_d):
        target_file = os.path.join(target_rules_dir, rule)
        shutil.copy(os.path.join(rules_d, rule), target_file)
Exemple #7
0
def clean_cloud_init(target):
    """clean out any local cloud-init config"""
    flist = glob.glob(util.target_path(target,
                                       "/etc/cloud/cloud.cfg.d/*dpkg*"))

    LOG.debug("cleaning cloud-init config from: %s", flist)
    for dpkg_cfg in flist:
        os.unlink(dpkg_cfg)
Exemple #8
0
    def _subp_wrap_popen(self,
                         cmd,
                         kwargs,
                         stdout=b'',
                         stderr=b'',
                         returncodes=None):
        # mocks the subprocess.Popen as expected from subp
        # checks that subp returned the output of 'communicate' and
        # returns the (args, kwargs) that Popen() was called with.
        # returncodes is a list to cover, one for each expected call

        if returncodes is None:
            returncodes = [0]

        capture = kwargs.get('capture')

        mreturncodes = mock.PropertyMock(side_effect=iter(returncodes))
        with mock.patch("curtin.util.subprocess.Popen") as m_popen:
            sp = mock.Mock()
            m_popen.return_value = sp
            if capture:
                sp.communicate.return_value = (stdout, stderr)
            else:
                sp.communicate.return_value = (None, None)
            type(sp).returncode = mreturncodes
            ret = util.subp(cmd, **kwargs)

        # popen may be called once or > 1 for retries, but must be called.
        self.assertTrue(m_popen.called)
        # communicate() needs to have been called.
        self.assertTrue(sp.communicate.called)

        if capture:
            # capture response is decoded if decode is not False
            decode = kwargs.get('decode', "replace")
            if decode is False:
                self.assertEqual(stdout.decode(stdout, stderr), ret)
            else:
                self.assertEqual((stdout.decode(errors=decode),
                                  stderr.decode(errors=decode)), ret)
        else:
            # if capture is false, then return is None, None
            self.assertEqual((None, None), ret)

        # if target is not provided or is /, chroot should not be used
        calls = m_popen.call_args_list
        popen_args, popen_kwargs = calls[-1]
        target = util.target_path(kwargs.get('target', None))
        unshcmd = self.mock_get_unshare_pid_args.return_value
        if target == "/":
            self.assertEqual(unshcmd + list(cmd), popen_args[0])
        else:
            self.assertEqual(unshcmd + ['chroot', target] + list(cmd),
                             popen_args[0])
        return calls
Exemple #9
0
    def test_apply_ipv6_mtu_hook(self, mock_write):
        target = 'mytarget'
        prehookfn = 'if-pre-up.d/mtuipv6'
        posthookfn = 'if-up.d/mtuipv6'
        mode = 0o755

        apply_net._patch_ifupdown_ipv6_mtu_hook(target,
                                                prehookfn=prehookfn,
                                                posthookfn=posthookfn)

        precfg = util.target_path(target, path=prehookfn)
        postcfg = util.target_path(target, path=posthookfn)
        precontents = apply_net.IFUPDOWN_IPV6_MTU_PRE_HOOK
        postcontents = apply_net.IFUPDOWN_IPV6_MTU_POST_HOOK

        hook_calls = [
            call(precfg, precontents, mode=mode),
            call(postcfg, postcontents, mode=mode),
        ]
        mock_write.assert_has_calls(hook_calls)
    def test_apt_srcl_custom(self):
        """test_apt_srcl_custom - Test rendering a custom source template"""
        cfg = yaml.safe_load(YAML_TEXT_CUSTOM_SL)
        target = self.new_root

        arch = util.get_architecture()
        # would fail inside the unittest context
        with mock.patch.object(util, 'get_architecture', return_value=arch):
            with mock.patch.object(util,
                                   'lsb_release',
                                   return_value={'codename': 'fakerel'}):
                apt_config.handle_apt(cfg, target)

        self.assertEqual(
            EXPECTED_CONVERTED_CONTENT,
            util.load_file(util.target_path(target, "/etc/apt/sources.list")))
        cloudfile = util.target_path(
            target, '/etc/cloud/cloud.cfg.d/curtin-preserve-sources.cfg')
        self.assertEqual({'apt_preserve_sources_list': True},
                         yaml.load(util.load_file(cloudfile)))
Exemple #11
0
    def test_disable_ipv6_priv_extentions_notfound(self, mock_ospath,
                                                   mock_load):
        target = 'mytarget'
        path = 'foo.conf'
        mock_ospath.exists.return_value = False

        apply_net._disable_ipv6_privacy_extensions(target, path=path)

        # source file not found
        cfg = util.target_path(target, path)
        mock_ospath.exists.assert_called_with(cfg)
        self.assertEqual(0, mock_load.call_count)
Exemple #12
0
    def test_remove_legacy_eth0_notfound(self, mock_ospath, mock_load,
                                         mock_del):
        target = 'mytarget'
        path = 'eth0.conf'
        mock_ospath.exists.return_value = False

        apply_net._maybe_remove_legacy_eth0(target, path)

        # source file not found
        cfg = util.target_path(target, path)
        mock_ospath.exists.assert_called_with(cfg)
        self.assertEqual(0, mock_load.call_count)
        self.assertEqual(0, mock_del.call_count)
Exemple #13
0
def inject_pollinate_user_agent_config(ua_cfg, target):
    """Write out user-agent config dictionary to pollinate's
    user-agent file (/etc/pollinate/add-user-agent) in target.
    """
    if not isinstance(ua_cfg, dict):
        raise ValueError('ua_cfg is not a dictionary: %s', ua_cfg)

    pollinate_cfg = util.target_path(target, '/etc/pollinate/add-user-agent')
    comment = "# written by curtin"
    content = "\n".join([
        "%s/%s %s" % (ua_key, ua_val, comment)
        for ua_key, ua_val in ua_cfg.items()
    ]) + "\n"
    util.write_file(pollinate_cfg, content=content)
Exemple #14
0
    def test_remove_legacy_eth0(self, mock_ospath, mock_load, mock_del):
        target = 'mytarget'
        path = 'eth0.cfg'
        cfg = util.target_path(target, path)
        legacy_eth0_contents = (
            'auto eth0\n'
            'iface eth0 inet dhcp')

        mock_ospath.exists.return_value = True
        mock_load.side_effect = [legacy_eth0_contents]

        apply_net._maybe_remove_legacy_eth0(target, path)

        mock_del.assert_called_with(cfg)
    def _check_mount_fstab_subp(self, fdata, expected, target=None):
        # expected currently is like: mount <device> <mp>
        # and thus mp will always be target + fdata.path
        if target is None:
            target = self.tmp_dir()

        expected = [
            a if a != "_T_MP" else util.target_path(target, fdata.path)
            for a in expected
        ]
        with patch("curtin.util.subp") as m_subp:
            block_meta.mount_fstab_data(fdata, target=target)

        self.assertEqual(call(expected, capture=True), m_subp.call_args)
        self.assertTrue(os.path.isdir(self.tmp_path(fdata.path, target)))
Exemple #16
0
def copy_install_log(logfile, target, log_target_path):
    """Copy curtin install log file to target system"""
    basemsg = 'Cannot copy curtin install log "%s" to target.' % logfile
    if not logfile:
        LOG.warn(basemsg)
        return
    if not os.path.isfile(logfile):
        LOG.warn(basemsg + "  file does not exist.")
        return

    LOG.debug('Copying curtin install log from %s to target/%s',
              logfile, log_target_path)
    util.write_file(
        filename=util.target_path(target, log_target_path),
        content=util.load_file(logfile, decode=False),
        mode=0o400, omode="wb")
Exemple #17
0
def _patch_ifupdown_ipv6_mtu_hook(target,
                                  prehookfn="etc/network/if-pre-up.d/mtuipv6",
                                  posthookfn="etc/network/if-up.d/mtuipv6"):

    contents = {
        'prehook': IFUPDOWN_IPV6_MTU_PRE_HOOK,
        'posthook': IFUPDOWN_IPV6_MTU_POST_HOOK,
    }

    hookfn = {
        'prehook': prehookfn,
        'posthook': posthookfn,
    }

    for hook in ['prehook', 'posthook']:
        fn = hookfn[hook]
        cfg = util.target_path(target, path=fn)
        LOG.info('Injecting fix for ipv6 mtu settings: %s', cfg)
        util.write_file(cfg, contents[hook], mode=0o755)
Exemple #18
0
    def test_disable_ipv6_priv_extentions(self, mock_write, mock_ospath,
                                          mock_load, mock_del):
        target = 'mytarget'
        path = 'etc/sysctl.d/10-ipv6-privacy.conf'
        ipv6_priv_contents = (
            'net.ipv6.conf.all.use_tempaddr = 2\n'
            'net.ipv6.conf.default.use_tempaddr = 2')
        expected_ipv6_priv_contents = '\n'.join(
            ["# IPv6 Privacy Extensions (RFC 4941)",
             "# Disabled by curtin",
             "# net.ipv6.conf.all.use_tempaddr = 2",
             "# net.ipv6.conf.default.use_tempaddr = 2"])
        mock_ospath.exists.return_value = True
        mock_load.side_effect = [ipv6_priv_contents]

        apply_net._disable_ipv6_privacy_extensions(target)

        cfg = util.target_path(target, path=path)
        mock_write.assert_called_with(cfg, expected_ipv6_priv_contents)
Exemple #19
0
def _disable_ipv6_privacy_extensions(target,
                                     path="etc/sysctl.d/10-ipv6-privacy.conf"):
    """Ubuntu server image sets a preference to use IPv6 privacy extensions
       by default; this races with the cloud-image desire to disable them.
       Resolve this by allowing the cloud-image setting to win. """

    LOG.debug('Attempting to remove ipv6 privacy extensions')
    cfg = util.target_path(target, path=path)
    if not os.path.exists(cfg):
        LOG.warn('Failed to find ipv6 privacy conf file %s', cfg)
        return

    bmsg = "Disabling IPv6 privacy extensions config may not apply."
    try:
        contents = util.load_file(cfg)
        known_contents = [
            "net.ipv6.conf.all.use_tempaddr = 2",
            "net.ipv6.conf.default.use_tempaddr = 2"
        ]
        lines = [
            f.strip() for f in contents.splitlines() if not f.startswith("#")
        ]
        if lines == known_contents:
            LOG.info('Removing ipv6 privacy extension config file: %s', cfg)
            util.del_file(cfg)
            msg = "removed %s with known contents" % cfg
            curtin_contents = '\n'.join([
                "# IPv6 Privacy Extensions (RFC 4941)", "# Disabled by curtin",
                "# net.ipv6.conf.all.use_tempaddr = 2",
                "# net.ipv6.conf.default.use_tempaddr = 2"
            ])
            util.write_file(cfg, curtin_contents)
        else:
            LOG.debug('skipping removal of %s, expected content not found',
                      cfg)
            LOG.debug("Found content in file %s:\n%s", cfg, lines)
            LOG.debug("Expected contents in file %s:\n%s", cfg, known_contents)
            msg = (bmsg + " '%s' exists with user configured content." % cfg)
    except Exception as e:
        msg = bmsg + " %s exists, but could not be read. %s" % (cfg, e)
        LOG.exception(msg)
        raise
Exemple #20
0
def rename_apt_lists(new_mirrors, target=None):
    """rename_apt_lists - rename apt lists to preserve old cache data"""
    default_mirrors = get_default_mirrors(util.get_architecture(target))

    pre = util.target_path(target, APT_LISTS)
    for (name, omirror) in default_mirrors.items():
        nmirror = new_mirrors.get(name)
        if not nmirror:
            continue

        oprefix = pre + os.path.sep + mirrorurl_to_apt_fileprefix(omirror)
        nprefix = pre + os.path.sep + mirrorurl_to_apt_fileprefix(nmirror)
        if oprefix == nprefix:
            continue
        olen = len(oprefix)
        for filename in glob.glob("%s_*" % oprefix):
            newname = "%s%s" % (nprefix, filename[olen:])
            LOG.debug("Renaming apt list %s to %s", filename, newname)
            try:
                os.rename(filename, newname)
            except OSError:
                # since this is a best effort task, warn with but don't fail
                LOG.warn("Failed to rename apt list:", exc_info=True)
Exemple #21
0
def target_is_ubuntu_core(target):
    """Check if Ubuntu-Core specific directory is present at target"""
    if target:
        return os.path.exists(
            util.target_path(target, 'system-data/var/lib/snapd'))
    return False
Exemple #22
0
def add_apt_sources(srcdict,
                    target=None,
                    template_params=None,
                    aa_repo_match=None):
    """
    add entries in /etc/apt/sources.list.d for each abbreviated
    sources.list entry in 'srcdict'.  When rendering template, also
    include the values in dictionary searchList
    """
    if template_params is None:
        template_params = {}

    if aa_repo_match is None:
        raise ValueError('did not get a valid repo matcher')

    if not isinstance(srcdict, dict):
        raise TypeError('unknown apt format: %s' % (srcdict))

    for filename in srcdict:
        ent = srcdict[filename]
        if 'filename' not in ent:
            ent['filename'] = filename

        add_apt_key(ent, target)

        if 'source' not in ent:
            continue
        source = ent['source']
        if source == 'proposed':
            source = APT_SOURCES_PROPOSED
        source = util.render_string(source, template_params)

        if not ent['filename'].startswith("/"):
            ent['filename'] = os.path.join("/etc/apt/sources.list.d/",
                                           ent['filename'])
        if not ent['filename'].endswith(".list"):
            ent['filename'] += ".list"

        if aa_repo_match(source):
            with util.ChrootableTarget(target,
                                       sys_resolvconf=True) as in_chroot:
                try:
                    in_chroot.subp(["add-apt-repository", source],
                                   retries=(1, 2, 5, 10))
                except util.ProcessExecutionError:
                    LOG.exception("add-apt-repository failed.")
                    raise
            continue

        sourcefn = util.target_path(target, ent['filename'])
        try:
            contents = "%s\n" % (source)
            util.write_file(sourcefn, contents, omode="a")
        except IOError as detail:
            LOG.exception("failed write to file %s: %s", sourcefn, detail)
            raise

    util.apt_update(target=target,
                    force=True,
                    comment="apt-source changed config")

    return
Exemple #23
0
 def test_no_target_with_path(self):
     self.assertEqual("/my/path", util.target_path(None, "/my/path"))
Exemple #24
0
def target_is_rhel(target):
    """Check if RHEL specific file is present at target"""
    if target:
        return os.path.exists(util.target_path(target, 'etc/redhat-release'))

    return False
Exemple #25
0
def curthooks(args):
    state = util.load_command_environment()

    if args.target is not None:
        target = args.target
    else:
        target = state['target']

    if target is None:
        sys.stderr.write("Unable to find target.  "
                         "Use --target or set TARGET_MOUNT_POINT\n")
        sys.exit(2)

    cfg = config.load_command_config(args, state)
    stack_prefix = state.get('report_stack_prefix', '')

    # if curtin-hooks hook exists in target we can defer to the in-target hooks
    if util.run_hook_if_exists(target, 'curtin-hooks'):
        # For vmtests to force execute centos_apply_network_config, uncomment
        # the value in examples/tests/centos_defaults.yaml
        if cfg.get('_ammend_centos_curthooks'):
            if cfg.get('cloudconfig'):
                handle_cloudconfig(cfg['cloudconfig'],
                                   base_dir=util.target_path(
                                       target, 'etc/cloud/cloud.cfg.d'))

            if target_is_centos(target) or target_is_rhel(target):
                LOG.info('Detected RHEL/CentOS image, running extra hooks')
                with events.ReportEventStack(
                        name=stack_prefix,
                        reporting_enabled=True,
                        level="INFO",
                        description="Configuring CentOS for first boot"):
                    centos_apply_network_config(cfg.get('network', {}), target)
        sys.exit(0)

    if target_is_ubuntu_core(target):
        LOG.info('Detected Ubuntu-Core image, running hooks')
        with events.ReportEventStack(
                name=stack_prefix,
                reporting_enabled=True,
                level="INFO",
                description="Configuring Ubuntu-Core for first boot"):
            ubuntu_core_curthooks(cfg, target)
        sys.exit(0)

    with events.ReportEventStack(
            name=stack_prefix + '/writing-config',
            reporting_enabled=True,
            level="INFO",
            description="configuring apt configuring apt"):
        do_apt_config(cfg, target)
        disable_overlayroot(cfg, target)

    # LP: #1742560 prevent zfs-dkms from being installed (Xenial)
    if util.lsb_release(target=target)['codename'] == 'xenial':
        util.apt_update(target=target)
        with util.ChrootableTarget(target) as in_chroot:
            in_chroot.subp(['apt-mark', 'hold', 'zfs-dkms'])

    # packages may be needed prior to installing kernel
    with events.ReportEventStack(name=stack_prefix +
                                 '/installing-missing-packages',
                                 reporting_enabled=True,
                                 level="INFO",
                                 description="installing missing packages"):
        install_missing_packages(cfg, target)

    # If a /etc/iscsi/nodes/... file was created by block_meta then it
    # needs to be copied onto the target system
    nodes_location = os.path.join(os.path.split(state['fstab'])[0], "nodes")
    if os.path.exists(nodes_location):
        copy_iscsi_conf(nodes_location, target)
        # do we need to reconfigure open-iscsi?

    # If a mdadm.conf file was created by block_meta than it needs to be copied
    # onto the target system
    mdadm_location = os.path.join(
        os.path.split(state['fstab'])[0], "mdadm.conf")
    if os.path.exists(mdadm_location):
        copy_mdadm_conf(mdadm_location, target)
        # as per https://bugs.launchpad.net/ubuntu/+source/mdadm/+bug/964052
        # reconfigure mdadm
        util.subp(['dpkg-reconfigure', '--frontend=noninteractive', 'mdadm'],
                  data=None,
                  target=target)

    with events.ReportEventStack(name=stack_prefix + '/installing-kernel',
                                 reporting_enabled=True,
                                 level="INFO",
                                 description="installing kernel"):
        setup_zipl(cfg, target)
        install_kernel(cfg, target)
        run_zipl(cfg, target)
        restore_dist_interfaces(cfg, target)

    with events.ReportEventStack(name=stack_prefix + '/setting-up-swap',
                                 reporting_enabled=True,
                                 level="INFO",
                                 description="setting up swap"):
        add_swap(cfg, target, state.get('fstab'))

    with events.ReportEventStack(name=stack_prefix +
                                 '/apply-networking-config',
                                 reporting_enabled=True,
                                 level="INFO",
                                 description="apply networking config"):
        apply_networking(target, state)

    with events.ReportEventStack(name=stack_prefix + '/writing-etc-fstab',
                                 reporting_enabled=True,
                                 level="INFO",
                                 description="writing etc/fstab"):
        copy_fstab(state.get('fstab'), target)

    with events.ReportEventStack(name=stack_prefix + '/configuring-multipath',
                                 reporting_enabled=True,
                                 level="INFO",
                                 description="configuring multipath"):
        detect_and_handle_multipath(cfg, target)

    with events.ReportEventStack(
            name=stack_prefix + '/system-upgrade',
            reporting_enabled=True,
            level="INFO",
            description="updating packages on target system"):
        system_upgrade(cfg, target)

    with events.ReportEventStack(
            name=stack_prefix + '/pollinate-user-agent',
            reporting_enabled=True,
            level="INFO",
            description="configuring pollinate user-agent on target system"):
        handle_pollinate_user_agent(cfg, target)

    # If a crypttab file was created by block_meta than it needs to be copied
    # onto the target system, and update_initramfs() needs to be run, so that
    # the cryptsetup hooks are properly configured on the installed system and
    # it will be able to open encrypted volumes at boot.
    crypttab_location = os.path.join(
        os.path.split(state['fstab'])[0], "crypttab")
    if os.path.exists(crypttab_location):
        copy_crypttab(crypttab_location, target)
        update_initramfs(target)

    # If udev dname rules were created, copy them to target
    udev_rules_d = os.path.join(state['scratch'], "rules.d")
    if os.path.isdir(udev_rules_d):
        copy_dname_rules(udev_rules_d, target)

    # As a rule, ARMv7 systems don't use grub. This may change some
    # day, but for now, assume no. They do require the initramfs
    # to be updated, and this also triggers boot loader setup via
    # flash-kernel.
    machine = platform.machine()
    if (machine.startswith('armv7') or machine.startswith('s390x')
            or machine.startswith('aarch64') and not util.is_uefi_bootable()):
        update_initramfs(target)
    else:
        setup_grub(cfg, target)

    sys.exit(0)
Exemple #26
0
def target_is_centos(target):
    """Check if CentOS specific file is present at target"""
    if target:
        return os.path.exists(util.target_path(target, 'etc/centos-release'))

    return False
Exemple #27
0
 def test_trailing_slash(self):
     self.assertEqual("/my/target/my/path",
                      util.target_path("/my/target/", "/my/path"))
Exemple #28
0
def centos_apply_network_config(netcfg, target=None):
    """ CentOS images execute built-in curthooks which only supports
        simple networking configuration.  This hook enables advanced
        network configuration via config passthrough to the target.
    """
    def cloud_init_repo(version):
        if not version:
            raise ValueError('Missing required version parameter')

        return CLOUD_INIT_YUM_REPO_TEMPLATE % version

    if netcfg:
        LOG.info('Removing embedded network configuration (if present)')
        ifcfgs = glob.glob(
            util.target_path(target, 'etc/sysconfig/network-scripts') +
            '/ifcfg-*')
        # remove ifcfg-* (except ifcfg-lo)
        for ifcfg in ifcfgs:
            if os.path.basename(ifcfg) != "ifcfg-lo":
                util.del_file(ifcfg)

        LOG.info(
            'Checking cloud-init in target [%s] for network '
            'configuration passthrough support.', target)
        passthrough = net.netconfig_passthrough_available(target)
        LOG.debug('passthrough available via in-target: %s', passthrough)

        # if in-target cloud-init is not updated, upgrade via cloud-init repo
        if not passthrough:
            cloud_init_yum_repo = (util.target_path(
                target, 'etc/yum.repos.d/curtin-cloud-init.repo'))
            # Inject cloud-init daily yum repo
            util.write_file(cloud_init_yum_repo,
                            content=cloud_init_repo(rpm_get_dist_id(target)))

            # we separate the installation of repository packages (epel,
            # cloud-init-el-release) as we need a new invocation of yum
            # to read the newly installed repo files.
            YUM_CMD = ['yum', '-y', '--noplugins', 'install']
            retries = [1] * 30
            with util.ChrootableTarget(target) as in_chroot:
                # ensure up-to-date ca-certificates to handle https mirror
                # connections
                in_chroot.subp(YUM_CMD + ['ca-certificates'],
                               capture=True,
                               log_captured=True,
                               retries=retries)
                in_chroot.subp(YUM_CMD + ['epel-release'],
                               capture=True,
                               log_captured=True,
                               retries=retries)
                in_chroot.subp(YUM_CMD + ['cloud-init-el-release'],
                               log_captured=True,
                               capture=True,
                               retries=retries)
                in_chroot.subp(YUM_CMD + ['cloud-init'],
                               capture=True,
                               log_captured=True,
                               retries=retries)

            # remove cloud-init el-stable bootstrap repo config as the
            # cloud-init-el-release package points to the correct repo
            util.del_file(cloud_init_yum_repo)

            # install bridge-utils if needed
            with util.ChrootableTarget(target) as in_chroot:
                try:
                    in_chroot.subp(['rpm', '-q', 'bridge-utils'],
                                   capture=False,
                                   rcs=[0])
                except util.ProcessExecutionError:
                    LOG.debug('Image missing bridge-utils package, installing')
                    in_chroot.subp(YUM_CMD + ['bridge-utils'],
                                   capture=True,
                                   log_captured=True,
                                   retries=retries)

    LOG.info('Passing network configuration through to target')
    net.render_netconfig_passthrough(target, netconfig={'network': netcfg})
Exemple #29
0
 def test_bunch_of_slashes_in_path(self):
     self.assertEqual("/target/my/path/",
                      util.target_path("/target/", "//my/path/"))
     self.assertEqual("/target/my/path/",
                      util.target_path("/target/", "///my/path/"))
    def test_trusty_source_lists(self, m_get_arch, m_lsb_release):
        """Support mirror equivalency with and without trailing /.

        Trusty official images do not have a trailing slash on
            http://archive.ubuntu.com/ubuntu ."""

        orig_primary = apt_config.PRIMARY_ARCH_MIRRORS['PRIMARY']
        orig_security = apt_config.PRIMARY_ARCH_MIRRORS['SECURITY']
        msg = "Test is invalid. %s mirror does not end in a /."
        self.assertEqual(orig_primary[-1], "/", msg % "primary")
        self.assertEqual(orig_security[-1], "/", msg % "security")
        orig_primary = orig_primary[:-1]
        orig_security = orig_security[:-1]

        m_lsb_release.return_value = {
            'codename': 'trusty',
            'description': 'Ubuntu 14.04.5 LTS',
            'id': 'Ubuntu',
            'release': '14.04'
        }

        target = self.new_root
        my_primary = 'http://fixed-primary.ubuntu.com/ubuntu'
        my_security = 'http://fixed-security.ubuntu.com/ubuntu'
        cfg = {
            'preserve_sources_list': False,
            'primary': [{
                'arches': ['amd64'],
                'uri': my_primary
            }],
            'security': [{
                'arches': ['amd64'],
                'uri': my_security
            }]
        }

        # this is taken from a trusty image /etc/apt/sources.list
        tmpl = textwrap.dedent("""\
            deb {mirror} {release} {comps}
            deb {mirror} {release}-updates {comps}
            deb {mirror} {release}-backports {comps}
            deb {security} {release}-security {comps}
            # not modified
            deb http://my.example.com/updates testing main
            """)

        release = 'trusty'
        comps = 'main universe multiverse restricted'
        easl = util.target_path(target, 'etc/apt/sources.list')

        orig_content = tmpl.format(mirror=orig_primary,
                                   security=orig_security,
                                   release=release,
                                   comps=comps)
        orig_content_slash = tmpl.format(mirror=orig_primary + "/",
                                         security=orig_security + "/",
                                         release=release,
                                         comps=comps)
        expected = tmpl.format(mirror=my_primary,
                               security=my_security,
                               release=release,
                               comps=comps)

        # Avoid useless test. Make sure the strings don't start out equal.
        self.assertNotEqual(expected, orig_content)

        util.write_file(easl, orig_content)
        apt_config.handle_apt(cfg, target)
        self.assertEqual(expected, util.load_file(easl))

        util.write_file(easl, orig_content_slash)
        apt_config.handle_apt(cfg, target)
        self.assertEqual(expected, util.load_file(easl))