def test_upstart_frequency_single(self):
        # files should be written out when frequency is ! per-instance
        new_root = tempfile.mkdtemp()
        self.addCleanup(shutil.rmtree, new_root)
        freq = PER_INSTANCE

        self.patchOS(new_root)
        self.patchUtils(new_root)
        paths = helpers.Paths({
            'upstart_dir': "/etc/upstart",
        })

        upstart_job.SUITABLE_UPSTART = True
        util.ensure_dir("/run")
        util.ensure_dir("/etc/upstart")

        with mock.patch.object(util, 'subp') as mockobj:
            h = upstart_job.UpstartJobPartHandler(paths)
            h.handle_part('', handlers.CONTENT_START,
                          None, None, None)
            h.handle_part('blah', 'text/upstart-job',
                          'test.conf', 'blah', freq)
            h.handle_part('', handlers.CONTENT_END,
                          None, None, None)

            self.assertEquals(len(os.listdir('/etc/upstart')), 1)

        mockobj.assert_called_once_with(
            ['initctl', 'reload-configuration'], capture=False)
Exemple #2
0
 def setUp(self):
     super(TestMain, self).setUp()
     self.new_root = self.tmp_dir()
     self.cloud_dir = self.tmp_path('var/lib/cloud/', dir=self.new_root)
     os.makedirs(self.cloud_dir)
     self.replicateTestRoot('simple_ubuntu', self.new_root)
     self.cfg = {
         'datasource_list': ['None'],
         'runcmd': ['ls /etc'],  # test ALL_DISTROS
         'system_info': {'paths': {'cloud_dir': self.cloud_dir,
                                   'run_dir': self.new_root}},
         'write_files': [
             {
                 'path': '/etc/blah.ini',
                 'content': 'blah',
                 'permissions': 0o755,
             },
         ],
         'cloud_init_modules': ['write-files', 'runcmd'],
     }
     cloud_cfg = yaml_dumps(self.cfg)
     ensure_dir(os.path.join(self.new_root, 'etc', 'cloud'))
     self.cloud_cfg_file = os.path.join(
         self.new_root, 'etc', 'cloud', 'cloud.cfg')
     write_file(self.cloud_cfg_file, cloud_cfg)
     self.patchOS(self.new_root)
     self.patchUtils(self.new_root)
     self.stderr = StringIO()
     self.patchStdoutAndStderr(stderr=self.stderr)
def handle(_name, cfg, cloud, log, _args):
    """
    Basically turn a top level 'landscape' entry with a 'client' dict
    and render it to ConfigObj format under '[client]' section in
    /etc/landscape/client.conf
    """

    ls_cloudcfg = cfg.get("landscape", {})

    if not isinstance(ls_cloudcfg, (dict)):
        raise RuntimeError(("'landscape' key existed in config,"
                            " but not a dictionary type,"
                            " is a %s instead"),
                           type_utils.obj_name(ls_cloudcfg))
    if not ls_cloudcfg:
        return

    cloud.distro.install_packages(('landscape-client',))

    merge_data = [
        LSC_BUILTIN_CFG,
        LSC_CLIENT_CFG_FILE,
        ls_cloudcfg,
    ]
    merged = merge_together(merge_data)
    contents = StringIO()
    merged.write(contents)

    util.ensure_dir(os.path.dirname(LSC_CLIENT_CFG_FILE))
    util.write_file(LSC_CLIENT_CFG_FILE, contents.getvalue())
    log.debug("Wrote landscape config file to %s", LSC_CLIENT_CFG_FILE)

    util.write_file(LS_DEFAULT_FILE, "RUN=1\n")
    util.subp(["service", "landscape-client", "restart"])
    def setUp(self):
        super(TestSimpleRun, self).setUp()
        self.new_root = self.tmp_dir()
        self.replicateTestRoot('simple_ubuntu', self.new_root)

        # Seed cloud.cfg file for our tests
        self.cfg = {
            'datasource_list': ['None'],
            'runcmd': ['ls /etc'],  # test ALL_DISTROS
            'spacewalk': {},  # test non-ubuntu distros module definition
            'system_info': {'paths': {'run_dir': self.new_root}},
            'write_files': [
                {
                    'path': '/etc/blah.ini',
                    'content': 'blah',
                    'permissions': 0o755,
                },
            ],
            'cloud_init_modules': ['write-files', 'spacewalk', 'runcmd'],
        }
        cloud_cfg = util.yaml_dumps(self.cfg)
        util.ensure_dir(os.path.join(self.new_root, 'etc', 'cloud'))
        util.write_file(os.path.join(self.new_root, 'etc',
                                     'cloud', 'cloud.cfg'), cloud_cfg)
        self.patchOS(self.new_root)
        self.patchUtils(self.new_root)
    def test_none_ds_forces_run_via_unverified_modules(self):
        """run_section forced skipped modules by using unverified_modules."""

        # re-write cloud.cfg with unverified_modules override
        cfg = copy.deepcopy(self.cfg)
        cfg['unverified_modules'] = ['spacewalk']  # Would have skipped
        cloud_cfg = util.yaml_dumps(cfg)
        util.ensure_dir(os.path.join(self.new_root, 'etc', 'cloud'))
        util.write_file(os.path.join(self.new_root, 'etc',
                                     'cloud', 'cloud.cfg'), cloud_cfg)

        initer = stages.Init()
        initer.read_cfg()
        initer.initialize()
        initer.fetch()
        initer.instancify()
        initer.update()
        initer.cloudify().run('consume_data', initer.consume_data,
                              args=[PER_INSTANCE], freq=PER_INSTANCE)

        mods = stages.Modules(initer)
        (which_ran, failures) = mods.run_section('cloud_init_modules')
        self.assertTrue(len(failures) == 0)
        self.assertIn('spacewalk', which_ran)
        self.assertIn(
            "running unverified_modules: 'spacewalk'",
            self.logs.getvalue())
    def test_none_ds_run_with_no_config_modules(self):
        """run_section will report no modules run when none are configured."""

        # re-write cloud.cfg with unverified_modules override
        cfg = copy.deepcopy(self.cfg)
        # Represent empty configuration in /etc/cloud/cloud.cfg
        cfg['cloud_init_modules'] = None
        cloud_cfg = util.yaml_dumps(cfg)
        util.ensure_dir(os.path.join(self.new_root, 'etc', 'cloud'))
        util.write_file(os.path.join(self.new_root, 'etc',
                                     'cloud', 'cloud.cfg'), cloud_cfg)

        initer = stages.Init()
        initer.read_cfg()
        initer.initialize()
        initer.fetch()
        initer.instancify()
        initer.update()
        initer.cloudify().run('consume_data', initer.consume_data,
                              args=[PER_INSTANCE], freq=PER_INSTANCE)

        mods = stages.Modules(initer)
        (which_ran, failures) = mods.run_section('cloud_init_modules')
        self.assertTrue(len(failures) == 0)
        self.assertEqual([], which_ran)
    def test_upstart_frequency_single(self):
        # files should be written out when frequency is ! per-instance
        new_root = self.makeDir()
        freq = PER_INSTANCE

        self.patchOS(new_root)
        self.patchUtils(new_root)
        paths = helpers.Paths({
            'upstart_dir': "/etc/upstart",
        })

        upstart_job.SUITABLE_UPSTART = True
        util.ensure_dir("/run")
        util.ensure_dir("/etc/upstart")

        mock_subp = self.mocker.replace(util.subp, passthrough=False)
        mock_subp(["initctl", "reload-configuration"], capture=False)
        self.mocker.replay()

        h = upstart_job.UpstartJobPartHandler(paths)
        h.handle_part('', handlers.CONTENT_START,
                      None, None, None)
        h.handle_part('blah', 'text/upstart-job',
                      'test.conf', 'blah', freq)
        h.handle_part('', handlers.CONTENT_END,
                      None, None, None)

        self.assertEquals(1, len(os.listdir('/etc/upstart')))
 def setUp(self):
     super(TestJinjaTemplatePartHandler, self).setUp()
     self.tmp = self.tmp_dir()
     self.run_dir = os.path.join(self.tmp, 'run_dir')
     util.ensure_dir(self.run_dir)
     self.paths = helpers.Paths({
         'cloud_dir': self.tmp, 'run_dir': self.run_dir})
def write_files(datadir, files, dirmode=None):

    def _redact_password(cnt, fname):
        """Azure provides the UserPassword in plain text. So we redact it"""
        try:
            root = ET.fromstring(cnt)
            for elem in root.iter():
                if ('UserPassword' in elem.tag and
                   elem.text != DEF_PASSWD_REDACTION):
                    elem.text = DEF_PASSWD_REDACTION
            return ET.tostring(root)
        except Exception:
            LOG.critical("failed to redact userpassword in {}".format(fname))
            return cnt

    if not datadir:
        return
    if not files:
        files = {}
    util.ensure_dir(datadir, dirmode)
    for (name, content) in files.items():
        fname = os.path.join(datadir, name)
        if 'ovf-env.xml' in name:
            content = _redact_password(content, fname)
        util.write_file(filename=fname, content=content, mode=0o600)
Exemple #10
0
    def test_none_ds(self):
        new_root = self.makeDir()
        self.replicateTestRoot("simple_ubuntu", new_root)
        cfg = {"datasource_list": ["None"], "cloud_init_modules": ["write-files"]}
        ud = self.readResource("user_data.1.txt")
        cloud_cfg = util.yaml_dumps(cfg)
        util.ensure_dir(os.path.join(new_root, "etc", "cloud"))
        util.write_file(os.path.join(new_root, "etc", "cloud", "cloud.cfg"), cloud_cfg)
        self._patchIn(new_root)

        # Now start verifying whats created
        initer = stages.Init()
        initer.read_cfg()
        initer.initialize()
        initer.fetch()
        initer.datasource.userdata_raw = ud
        _iid = initer.instancify()
        initer.update()
        initer.cloudify().run("consume_data", initer.consume_data, args=[PER_INSTANCE], freq=PER_INSTANCE)
        mirrors = initer.distro.get_option("package_mirrors")
        self.assertEquals(1, len(mirrors))
        mirror = mirrors[0]
        self.assertEquals(mirror["arches"], ["i386", "amd64", "blah"])
        mods = stages.Modules(initer)
        (which_ran, failures) = mods.run_section("cloud_init_modules")
        self.assertTrue(len(failures) == 0)
        self.assertTrue(os.path.exists("/etc/blah.ini"))
        self.assertIn("write-files", which_ran)
        contents = util.load_file("/etc/blah.ini")
        self.assertEquals(contents, "blah")
Exemple #11
0
    def test_collect_logs_includes_optional_userdata(self, m_getuid):
        """collect-logs include userdata when --include-userdata is set."""
        m_getuid.return_value = 0
        log1 = self.tmp_path('cloud-init.log', self.new_root)
        write_file(log1, 'cloud-init-log')
        log2 = self.tmp_path('cloud-init-output.log', self.new_root)
        write_file(log2, 'cloud-init-output-log')
        userdata = self.tmp_path('user-data.txt', self.new_root)
        write_file(userdata, 'user-data')
        ensure_dir(self.run_dir)
        write_file(self.tmp_path('results.json', self.run_dir), 'results')
        write_file(self.tmp_path(INSTANCE_JSON_SENSITIVE_FILE, self.run_dir),
                   'sensitive')
        output_tarfile = self.tmp_path('logs.tgz')

        date = datetime.utcnow().date().strftime('%Y-%m-%d')
        date_logdir = 'cloud-init-logs-{0}'.format(date)

        version_out = '/usr/bin/cloud-init 18.2fake\n'
        expected_subp = {
            ('dpkg-query', '--show', "-f=${Version}\n", 'cloud-init'):
                '0.7fake',
            ('cloud-init', '--version'): version_out,
            ('dmesg',): 'dmesg-out\n',
            ('journalctl', '--boot=0', '-o', 'short-precise'): 'journal-out\n',
            ('tar', 'czvf', output_tarfile, date_logdir): ''
        }

        def fake_subp(cmd):
            cmd_tuple = tuple(cmd)
            if cmd_tuple not in expected_subp:
                raise AssertionError(
                    'Unexpected command provided to subp: {0}'.format(cmd))
            if cmd == ['tar', 'czvf', output_tarfile, date_logdir]:
                subp(cmd)  # Pass through tar cmd so we can check output
            return expected_subp[cmd_tuple], ''

        fake_stderr = mock.MagicMock()

        wrap_and_call(
            'cloudinit.cmd.devel.logs',
            {'subp': {'side_effect': fake_subp},
             'sys.stderr': {'new': fake_stderr},
             'CLOUDINIT_LOGS': {'new': [log1, log2]},
             'CLOUDINIT_RUN_DIR': {'new': self.run_dir},
             'USER_DATA_FILE': {'new': userdata}},
            logs.collect_logs, output_tarfile, include_userdata=True)
        # unpack the tarfile and check file contents
        subp(['tar', 'zxvf', output_tarfile, '-C', self.new_root])
        out_logdir = self.tmp_path(date_logdir, self.new_root)
        self.assertEqual(
            'user-data',
            load_file(os.path.join(out_logdir, 'user-data.txt')))
        self.assertEqual(
            'sensitive',
            load_file(os.path.join(out_logdir, 'run', 'cloud-init',
                                   INSTANCE_JSON_SENSITIVE_FILE)))
        fake_stderr.write.assert_any_call('Wrote %s\n' % output_tarfile)
def write_files(datadir, files, dirmode=None):
    if not datadir:
        return
    if not files:
        files = {}
    util.ensure_dir(datadir, dirmode)
    for (name, content) in files.items():
        util.write_file(filename=os.path.join(datadir, name),
                        content=content, mode=0600)
Exemple #13
0
 def replicateTestRoot(self, example_root, target_root):
     real_root = self.resourceLocation()
     real_root = os.path.join(real_root, 'roots', example_root)
     for (dir_path, _dirnames, filenames) in os.walk(real_root):
         real_path = dir_path
         make_path = rebase_path(real_path[len(real_root):], target_root)
         util.ensure_dir(make_path)
         for f in filenames:
             real_path = util.abs_join(real_path, f)
             make_path = util.abs_join(make_path, f)
             shutil.copy(real_path, make_path)
def write_boot_content(content, content_f, link=None, shebang=False,
                       mode=0400):
    """
    Write the content to content_f. Under the following rules:
        1. If no content, remove the file
        2. Write the content
        3. If executable and no file magic, add it
        4. If there is a link, create it

    @param content: what to write
    @param content_f: the file name
    @param backup_d: the directory to save the backup at
    @param link: if defined, location to create a symlink to
    @param shebang: if no file magic, set shebang
    @param mode: file mode

    Becuase of the way that Cloud-init executes scripts (no shell),
    a script will fail to execute if does not have a magic bit (shebang) set
    for the file. If shebang=True, then the script will be checked for a magic
    bit and to the SmartOS default of assuming that bash.
    """

    if not content and os.path.exists(content_f):
        os.unlink(content_f)
    if link and os.path.islink(link):
        os.unlink(link)
    if not content:
        return

    util.write_file(content_f, content, mode=mode)

    if shebang and not content.startswith("#!"):
        try:
            cmd = ["file", "--brief", "--mime-type", content_f]
            (f_type, _err) = util.subp(cmd)
            LOG.debug("script %s mime type is %s", content_f, f_type)
            if f_type.strip() == "text/plain":
                new_content = "\n".join(["#!/bin/bash", content])
                util.write_file(content_f, new_content, mode=mode)
                LOG.debug("added shebang to file %s", content_f)

        except Exception as e:
            util.logexc(LOG, ("Failed to identify script type for %s" %
                             content_f, e))

    if link:
        try:
            if os.path.islink(link):
                os.unlink(link)
            if content and os.path.exists(content_f):
                util.ensure_dir(os.path.dirname(link))
                os.symlink(content_f, link)
        except IOError as e:
            util.logexc(LOG, "failed establishing content link", e)
Exemple #15
0
    def render_network_state(self, network_state, templates=None, target=None):
        fpeni = util.target_path(target, self.eni_path)
        util.ensure_dir(os.path.dirname(fpeni))
        header = self.eni_header if self.eni_header else ""
        util.write_file(fpeni, header + self._render_interfaces(network_state))

        if self.netrules_path:
            netrules = util.target_path(target, self.netrules_path)
            util.ensure_dir(os.path.dirname(netrules))
            util.write_file(netrules,
                            self._render_persistent_net(network_state))
Exemple #16
0
def collect_logs(tarfile, include_userdata, verbosity=0):
    """Collect all cloud-init logs and tar them up into the provided tarfile.

    @param tarfile: The path of the tar-gzipped file to create.
    @param include_userdata: Boolean, true means include user-data.
    """
    if include_userdata and os.getuid() != 0:
        sys.stderr.write(
            "To include userdata, root user is required."
            " Try sudo cloud-init collect-logs\n")
        return 1
    tarfile = os.path.abspath(tarfile)
    date = datetime.utcnow().date().strftime('%Y-%m-%d')
    log_dir = 'cloud-init-logs-{0}'.format(date)
    with tempdir(dir='/tmp') as tmp_dir:
        log_dir = os.path.join(tmp_dir, log_dir)
        version = _write_command_output_to_file(
            ['cloud-init', '--version'],
            os.path.join(log_dir, 'version'),
            "cloud-init --version", verbosity)
        dpkg_ver = _write_command_output_to_file(
            ['dpkg-query', '--show', "-f=${Version}\n", 'cloud-init'],
            os.path.join(log_dir, 'dpkg-version'),
            "dpkg version", verbosity)
        if not version:
            version = dpkg_ver if dpkg_ver else "not-available"
        _debug("collected cloud-init version: %s\n" % version, 1, verbosity)
        _write_command_output_to_file(
            ['dmesg'], os.path.join(log_dir, 'dmesg.txt'),
            "dmesg output", verbosity)
        _write_command_output_to_file(
            ['journalctl', '--boot=0', '-o', 'short-precise'],
            os.path.join(log_dir, 'journal.txt'),
            "systemd journal of current boot", verbosity)

        for log in CLOUDINIT_LOGS:
            _collect_file(log, log_dir, verbosity)
        if include_userdata:
            _collect_file(USER_DATA_FILE, log_dir, verbosity)
        run_dir = os.path.join(log_dir, 'run')
        ensure_dir(run_dir)
        if os.path.exists(CLOUDINIT_RUN_DIR):
            shutil.copytree(CLOUDINIT_RUN_DIR,
                            os.path.join(run_dir, 'cloud-init'),
                            ignore=_copytree_ignore_sensitive_files)
            _debug("collected dir %s\n" % CLOUDINIT_RUN_DIR, 1, verbosity)
        else:
            _debug("directory '%s' did not exist\n" % CLOUDINIT_RUN_DIR, 1,
                   verbosity)
        with chdir(tmp_dir):
            subp(['tar', 'czvf', tarfile, log_dir.replace(tmp_dir + '/', '')])
    sys.stderr.write("Wrote %s\n" % tarfile)
    return 0
Exemple #17
0
def populate_dir(path, files):
    if not os.path.exists(path):
        os.makedirs(path)
    for (name, content) in files.items():
        p = os.path.join(path, name)
        util.ensure_dir(os.path.dirname(p))
        with open(p, "wb") as fp:
            if isinstance(content, six.binary_type):
                fp.write(content)
            else:
                fp.write(content.encode('utf-8'))
            fp.close()
def handle(name, cfg, cloud, log, _args):
    # If there isn't a salt key in the configuration don't do anything
    if 'salt_minion' not in cfg:
        log.debug(("Skipping module named %s,"
                   " no 'salt_minion' key in configuration"), name)
        return

    s_cfg = cfg['salt_minion']
    const = SaltConstants(cfg=s_cfg)

    # Start by installing the salt package ...
    cloud.distro.install_packages(const.pkg_name)

    # Ensure we can configure files at the right dir
    util.ensure_dir(const.conf_dir)

    # ... and then update the salt configuration
    if 'conf' in s_cfg:
        # Add all sections from the conf object to minion config file
        minion_config = os.path.join(const.conf_dir, 'minion')
        minion_data = util.yaml_dumps(s_cfg.get('conf'))
        util.write_file(minion_config, minion_data)

    if 'grains' in s_cfg:
        # add grains to /etc/salt/grains
        grains_config = os.path.join(const.conf_dir, 'grains')
        grains_data = util.yaml_dumps(s_cfg.get('grains'))
        util.write_file(grains_config, grains_data)

    # ... copy the key pair if specified
    if 'public_key' in s_cfg and 'private_key' in s_cfg:
        pki_dir_default = os.path.join(const.conf_dir, "pki/minion")
        if not os.path.isdir(pki_dir_default):
            pki_dir_default = os.path.join(const.conf_dir, "pki")

        pki_dir = s_cfg.get('pki_dir', pki_dir_default)
        with util.umask(0o77):
            util.ensure_dir(pki_dir)
            pub_name = os.path.join(pki_dir, 'minion.pub')
            pem_name = os.path.join(pki_dir, 'minion.pem')
            util.write_file(pub_name, s_cfg['public_key'])
            util.write_file(pem_name, s_cfg['private_key'])

    # we need to have the salt minion service enabled in rc in order to be
    # able to start the service. this does only apply on FreeBSD servers.
    if cloud.distro.osfamily == 'freebsd':
        cloud.distro.updatercconf('salt_minion_enable', 'YES')

    # restart salt-minion. 'service' will start even if not started. if it
    # was started, it needs to be restarted for config change.
    util.subp(['service', const.srv_name, 'restart'], capture=False)
Exemple #19
0
    def render_network_state(self, target, network_state):
        fpeni = os.path.join(target, self.eni_path)
        util.ensure_dir(os.path.dirname(fpeni))
        util.write_file(fpeni, self._render_interfaces(network_state))

        if self.netrules_path:
            netrules = os.path.join(target, self.netrules_path)
            util.ensure_dir(os.path.dirname(netrules))
            util.write_file(netrules,
                            self._render_persistent_net(network_state))

        if self.links_path_prefix:
            self._render_systemd_links(target, network_state,
                                       links_prefix=self.links_path_prefix)
Exemple #20
0
    def test_remove_artifacts_returns_one_on_errors(self):
        """remove_artifacts returns non-zero on failure and prints an error."""
        ensure_dir(self.artifact_dir)
        ensure_dir(os.path.join(self.artifact_dir, 'dir1'))

        with mock.patch('sys.stderr', new_callable=StringIO) as m_stderr:
            retcode = wrap_and_call(
                'cloudinit.cmd.clean',
                {'del_dir': {'side_effect': OSError('oops')},
                 'Init': {'side_effect': self.init_class}},
                clean.remove_artifacts, remove_logs=False)
        self.assertEqual(1, retcode)
        self.assertEqual(
            'ERROR: Could not remove %s/dir1: oops\n' % self.artifact_dir,
            m_stderr.getvalue())
Exemple #21
0
 def test_handle_args_defaults_instance_data(self):
     """When no instance_data argument, default to configured run_dir."""
     user_data = self.tmp_path('user-data', dir=self.tmp)
     run_dir = self.tmp_path('run_dir', dir=self.tmp)
     ensure_dir(run_dir)
     paths = Paths({'run_dir': run_dir})
     self.add_patch('cloudinit.cmd.devel.render.read_cfg_paths', 'm_paths')
     self.m_paths.return_value = paths
     args = self.args(
         user_data=user_data, instance_data=None, debug=False)
     with mock.patch('sys.stderr', new_callable=StringIO):
         self.assertEqual(1, render.handle_args('anyname', args))
     json_file = os.path.join(run_dir, INSTANCE_JSON_FILE)
     self.assertIn(
         'Missing instance-data.json file: %s' % json_file,
         self.logs.getvalue())
Exemple #22
0
    def test_remove_artifacts_removes_unlinks_symlinks(self):
        """remove_artifacts cleans artifacts dir unlinking any symlinks."""
        dir1 = os.path.join(self.artifact_dir, 'dir1')
        ensure_dir(dir1)
        symlink = os.path.join(self.artifact_dir, 'mylink')
        sym_link(dir1, symlink)

        retcode = wrap_and_call(
            'cloudinit.cmd.clean',
            {'Init': {'side_effect': self.init_class}},
            clean.remove_artifacts, remove_logs=False)
        self.assertEqual(0, retcode)
        for path in (dir1, symlink):
            self.assertFalse(
                os.path.exists(path),
                'Unexpected {0} dir'.format(path))
    def test_none_ds(self):
        new_root = tempfile.mkdtemp()
        self.addCleanup(shutil.rmtree, new_root)
        self.replicateTestRoot('simple_ubuntu', new_root)
        cfg = {
            'datasource_list': ['None'],
            'write_files': [
                {
                    'path': '/etc/blah.ini',
                    'content': 'blah',
                    'permissions': 0o755,
                },
            ],
            'cloud_init_modules': ['write-files'],
        }
        cloud_cfg = util.yaml_dumps(cfg)
        util.ensure_dir(os.path.join(new_root, 'etc', 'cloud'))
        util.write_file(os.path.join(new_root, 'etc',
                                     'cloud', 'cloud.cfg'), cloud_cfg)
        self._patchIn(new_root)

        # Now start verifying whats created
        initer = stages.Init()
        initer.read_cfg()
        initer.initialize()
        self.assertTrue(os.path.exists("/var/lib/cloud"))
        for d in ['scripts', 'seed', 'instances', 'handlers', 'sem', 'data']:
            self.assertTrue(os.path.isdir(os.path.join("/var/lib/cloud", d)))

        initer.fetch()
        iid = initer.instancify()
        self.assertEqual(iid, 'iid-datasource-none')
        initer.update()
        self.assertTrue(os.path.islink("var/lib/cloud/instance"))

        initer.cloudify().run('consume_data',
                              initer.consume_data,
                              args=[PER_INSTANCE],
                              freq=PER_INSTANCE)

        mods = stages.Modules(initer)
        (which_ran, failures) = mods.run_section('cloud_init_modules')
        self.assertTrue(len(failures) == 0)
        self.assertTrue(os.path.exists('/etc/blah.ini'))
        self.assertIn('write-files', which_ran)
        contents = util.load_file('/etc/blah.ini')
        self.assertEqual(contents, 'blah')
Exemple #24
0
def render_network_state(target, network_state, eni="etc/network/interfaces",
                         links_prefix=LINKS_FNAME_PREFIX,
                         netrules='etc/udev/rules.d/70-persistent-net.rules'):

    fpeni = os.path.sep.join((target, eni,))
    util.ensure_dir(os.path.dirname(fpeni))
    with open(fpeni, 'w+') as f:
        f.write(render_interfaces(network_state))

    if netrules:
        netrules = os.path.sep.join((target, netrules,))
        util.ensure_dir(os.path.dirname(netrules))
        with open(netrules, 'w+') as f:
            f.write(render_persistent_net(network_state))

    if links_prefix:
        render_systemd_links(target, network_state, links_prefix)
Exemple #25
0
 def test_handle_args_root_uses_sensitive_instance_data(self):
     """When root user, and no instance-data arg, use sensitive.json."""
     user_data = self.tmp_path('user-data', dir=self.tmp)
     write_file(user_data, '##template: jinja\nrendering: {{ my_var }}')
     run_dir = self.tmp_path('run_dir', dir=self.tmp)
     ensure_dir(run_dir)
     json_sensitive = os.path.join(run_dir, INSTANCE_JSON_SENSITIVE_FILE)
     write_file(json_sensitive, '{"my-var": "jinja worked"}')
     paths = Paths({'run_dir': run_dir})
     self.add_patch('cloudinit.cmd.devel.render.read_cfg_paths', 'm_paths')
     self.m_paths.return_value = paths
     args = self.args(
         user_data=user_data, instance_data=None, debug=False)
     with mock.patch('sys.stderr', new_callable=StringIO):
         with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
             with mock.patch('os.getuid') as m_getuid:
                 m_getuid.return_value = 0
                 self.assertEqual(0, render.handle_args('anyname', args))
     self.assertIn('rendering: jinja worked', m_stdout.getvalue())
Exemple #26
0
 def test_handle_args_defaults_instance_data(self):
     """When no instance_data argument, default to configured run_dir."""
     args = self.args(
         debug=False, dump_all=True, format=None, instance_data=None,
         list_keys=False, user_data=None, vendor_data=None, varname=None)
     run_dir = self.tmp_path('run_dir', dir=self.tmp)
     ensure_dir(run_dir)
     paths = Paths({'run_dir': run_dir})
     self.add_patch('cloudinit.cmd.query.read_cfg_paths', 'm_paths')
     self.m_paths.return_value = paths
     with mock.patch('sys.stderr', new_callable=StringIO) as m_stderr:
         self.assertEqual(1, query.handle_args('anyname', args))
     json_file = os.path.join(run_dir, INSTANCE_JSON_FILE)
     self.assertIn(
         'ERROR: Missing instance-data file: %s' % json_file,
         self.logs.getvalue())
     self.assertIn(
         'ERROR: Missing instance-data file: %s' % json_file,
         m_stderr.getvalue())
Exemple #27
0
 def ensure_sudo_dir(self, path, sudo_base="/etc/sudoers"):
     # Ensure the dir is included and that
     # it actually exists as a directory
     sudoers_contents = ""
     base_exists = False
     if os.path.exists(sudo_base):
         sudoers_contents = util.load_file(sudo_base)
         base_exists = True
     found_include = False
     for line in sudoers_contents.splitlines():
         line = line.strip()
         include_match = re.search(r"^#includedir\s+(.*)$", line)
         if not include_match:
             continue
         included_dir = include_match.group(1).strip()
         if not included_dir:
             continue
         included_dir = os.path.abspath(included_dir)
         if included_dir == path:
             found_include = True
             break
     if not found_include:
         try:
             if not base_exists:
                 lines = [
                     ("# See sudoers(5) for more information" ' on "#include" directives:'),
                     "",
                     util.make_header(base="added"),
                     "#includedir %s" % (path),
                     "",
                 ]
                 sudoers_contents = "\n".join(lines)
                 util.write_file(sudo_base, sudoers_contents, 0440)
             else:
                 lines = ["", util.make_header(base="added"), "#includedir %s" % (path), ""]
                 sudoers_contents = "\n".join(lines)
                 util.append_file(sudo_base, sudoers_contents)
             LOG.debug("Added '#includedir %s' to %s" % (path, sudo_base))
         except IOError as e:
             util.logexc(LOG, "Failed to write %s", sudo_base)
             raise e
     util.ensure_dir(path, 0750)
Exemple #28
0
    def render_network_state(self, network_state, templates=None, target=None):
        # check network state for version
        # if v2, then extract network_state.config
        # else render_v2_from_state
        fpnplan = os.path.join(util.target_path(target), self.netplan_path)

        util.ensure_dir(os.path.dirname(fpnplan))
        header = self.netplan_header if self.netplan_header else ""

        # render from state
        content = self._render_content(network_state)

        if not header.endswith("\n"):
            header += "\n"
        util.write_file(fpnplan, header + content)

        if self.clean_default:
            _clean_default(target=target)
        self._netplan_generate(run=self._postcmds)
        self._net_setup_link(run=self._postcmds)
Exemple #29
0
def setup_user_keys(keys, username, options=None):
    # Make sure the users .ssh dir is setup accordingly
    (ssh_dir, pwent) = users_ssh_info(username)
    if not os.path.isdir(ssh_dir):
        util.ensure_dir(ssh_dir, mode=0o700)
        util.chownbyid(ssh_dir, pwent.pw_uid, pwent.pw_gid)

    # Turn the 'update' keys given into actual entries
    parser = AuthKeyLineParser()
    key_entries = []
    for k in keys:
        key_entries.append(parser.parse(str(k), options=options))

    # Extract the old and make the new
    (auth_key_fn, auth_key_entries) = extract_authorized_keys(username)
    with util.SeLinuxGuard(ssh_dir, recursive=True):
        content = update_authorized_keys(auth_key_entries, key_entries)
        util.ensure_dir(os.path.dirname(auth_key_fn), mode=0o700)
        util.write_file(auth_key_fn, content, mode=0o600)
        util.chownbyid(auth_key_fn, pwent.pw_uid, pwent.pw_gid)
Exemple #30
0
    def test_remove_artifacts_returns_one_on_errors(self):
        """remove_artifacts returns non-zero on failure and prints an error."""
        ensure_dir(self.artifact_dir)
        ensure_dir(os.path.join(self.artifact_dir, 'dir1'))

        with mock.patch('sys.stderr', new_callable=StringIO) as m_stderr:
            retcode = wrap_and_call('cloudinit.cmd.clean', {
                'del_dir': {
                    'side_effect': OSError('oops')
                },
                'Init': {
                    'side_effect': self.init_class
                }
            },
                                    clean.remove_artifacts,
                                    remove_logs=False)
        self.assertEqual(1, retcode)
        self.assertEqual(
            'ERROR: Could not remove %s/dir1: oops\n' % self.artifact_dir,
            m_stderr.getvalue())
Exemple #31
0
 def test_handle_args_root_fallsback_to_instance_data(self):
     """When no instance_data argument, root falls back to redacted json."""
     args = self.args(
         debug=False, dump_all=True, format=None, instance_data=None,
         list_keys=False, user_data=None, vendor_data=None, varname=None)
     run_dir = self.tmp_path('run_dir', dir=self.tmp)
     ensure_dir(run_dir)
     paths = Paths({'run_dir': run_dir})
     self.add_patch('cloudinit.cmd.query.read_cfg_paths', 'm_paths')
     self.m_paths.return_value = paths
     with mock.patch('sys.stderr', new_callable=StringIO) as m_stderr:
         with mock.patch('os.getuid') as m_getuid:
             m_getuid.return_value = 0
             self.assertEqual(1, query.handle_args('anyname', args))
     json_file = os.path.join(run_dir, INSTANCE_JSON_FILE)
     sensitive_file = os.path.join(run_dir, INSTANCE_JSON_SENSITIVE_FILE)
     self.assertIn(
         'WARNING: Missing root-readable %s. Using redacted %s instead.' % (
             sensitive_file, json_file),
         m_stderr.getvalue())
Exemple #32
0
    def test_remove_artifacts_returns_one_on_errors(self):
        """remove_artifacts returns non-zero on failure and prints an error."""
        ensure_dir(self.artifact_dir)
        ensure_dir(os.path.join(self.artifact_dir, "dir1"))

        with mock.patch("sys.stderr", new_callable=StringIO) as m_stderr:
            retcode = wrap_and_call(
                "cloudinit.cmd.clean",
                {
                    "del_dir": {"side_effect": OSError("oops")},
                    "Init": {"side_effect": self.init_class},
                },
                clean.remove_artifacts,
                remove_logs=False,
            )
        self.assertEqual(1, retcode)
        self.assertEqual(
            "Error:\nCould not remove %s/dir1: oops\n" % self.artifact_dir,
            m_stderr.getvalue(),
        )
Exemple #33
0
    def render_network_state(self, network_state, target):
        # check network state for version
        # if v2, then extract network_state.config
        # else render_v2_from_state
        fpnplan = os.path.join(util.target_path(target), self.netplan_path)

        util.ensure_dir(os.path.dirname(fpnplan))
        header = self.netplan_header if self.netplan_header else ""

        # render from state
        content = self._render_content(network_state)

        if not header.endswith("\n"):
            header += "\n"
        util.write_file(fpnplan, header + content)

        if self.clean_default:
            _clean_default(target=target)
        self._netplan_generate(run=self._postcmds)
        self._net_setup_link(run=self._postcmds)
Exemple #34
0
def setup_user_keys(keys, username, options=None):
    # Make sure the users .ssh dir is setup accordingly
    (ssh_dir, pwent) = users_ssh_info(username)
    if not os.path.isdir(ssh_dir):
        util.ensure_dir(ssh_dir, mode=0o700)
        util.chownbyid(ssh_dir, pwent.pw_uid, pwent.pw_gid)

    # Turn the 'update' keys given into actual entries
    parser = AuthKeyLineParser()
    key_entries = []
    for k in keys:
        key_entries.append(parser.parse(str(k), options=options))

    # Extract the old and make the new
    (auth_key_fn, auth_key_entries) = extract_authorized_keys(username)
    with util.SeLinuxGuard(ssh_dir, recursive=True):
        content = update_authorized_keys(auth_key_entries, key_entries)
        util.ensure_dir(os.path.dirname(auth_key_fn), mode=0o700)
        util.write_file(auth_key_fn, content, mode=0o600)
        util.chownbyid(auth_key_fn, pwent.pw_uid, pwent.pw_gid)
Exemple #35
0
def handle(name, cfg, cloud, log, _args):
    # If there isn't a salt key in the configuration don't do anything
    if 'salt_minion' not in cfg:
        log.debug(("Skipping module named %s,"
                   " no 'salt_minion' key in configuration"), name)
        return

    salt_cfg = cfg['salt_minion']

    # Start by installing the salt package ...
    cloud.distro.install_packages(('salt-minion',))

    # Ensure we can configure files at the right dir
    config_dir = salt_cfg.get("config_dir", '/etc/salt')
    util.ensure_dir(config_dir)

    # ... and then update the salt configuration
    if 'conf' in salt_cfg:
        # Add all sections from the conf object to /etc/salt/minion
        minion_config = os.path.join(config_dir, 'minion')
        minion_data = util.yaml_dumps(salt_cfg.get('conf'))
        util.write_file(minion_config, minion_data)

    # ... copy the key pair if specified
    if 'public_key' in salt_cfg and 'private_key' in salt_cfg:
        if os.path.isdir("/etc/salt/pki/minion"):
            pki_dir_default = "/etc/salt/pki/minion"
        else:
            pki_dir_default = "/etc/salt/pki"

        pki_dir = salt_cfg.get('pki_dir', pki_dir_default)
        with util.umask(0o77):
            util.ensure_dir(pki_dir)
            pub_name = os.path.join(pki_dir, 'minion.pub')
            pem_name = os.path.join(pki_dir, 'minion.pem')
            util.write_file(pub_name, salt_cfg['public_key'])
            util.write_file(pem_name, salt_cfg['private_key'])

    # restart salt-minion.  'service' will start even if not started.  if it
    # was started, it needs to be restarted for config change.
    util.subp(['service', 'salt-minion', 'restart'], capture=False)
Exemple #36
0
    def render_network_state(self,
                             network_state: NetworkState,
                             templates=None,
                             target=None):
        if not templates:
            templates = self.templates
        file_mode = 0o644
        base_sysconf_dir = subp.target_path(target, self.sysconf_dir)
        for path, data in self._render_sysconfig(base_sysconf_dir,
                                                 network_state,
                                                 self.flavor,
                                                 templates=templates).items():
            util.write_file(path, data, file_mode)
        if self.dns_path:
            dns_path = subp.target_path(target, self.dns_path)
            resolv_content = self._render_dns(network_state,
                                              existing_dns_path=dns_path)
            if resolv_content:
                util.write_file(dns_path, resolv_content, file_mode)
        if self.networkmanager_conf_path:
            nm_conf_path = subp.target_path(target,
                                            self.networkmanager_conf_path)
            nm_conf_content = self._render_networkmanager_conf(
                network_state, templates)
            if nm_conf_content:
                util.write_file(nm_conf_path, nm_conf_content, file_mode)
        if self.netrules_path:
            netrules_content = self._render_persistent_net(network_state)
            netrules_path = subp.target_path(target, self.netrules_path)
            util.write_file(netrules_path, netrules_content, file_mode)

        sysconfig_path = subp.target_path(target, templates.get("control"))
        # Distros configuring /etc/sysconfig/network as a file e.g. Centos
        if sysconfig_path.endswith("network"):
            util.ensure_dir(os.path.dirname(sysconfig_path))
            netcfg = [_make_header(), "NETWORKING=yes"]
            if network_state.use_ipv6:
                netcfg.append("NETWORKING_IPV6=yes")
                netcfg.append("IPV6_AUTOCONF=no")
            util.write_file(sysconfig_path, "\n".join(netcfg) + "\n",
                            file_mode)
Exemple #37
0
def setup_swapfile(fname, size=None, maxsize=None):
    """
    fname: full path string of filename to setup
    size: the size to create. set to "auto" for recommended
    maxsize: the maximum size
    """
    tdir = os.path.dirname(fname)
    if str(size).lower() == "auto":
        try:
            memsize = util.read_meminfo()['total']
        except IOError:
            LOG.debug("Not creating swap: failed to read meminfo")
            return

        util.ensure_dir(tdir)
        size = suggested_swapsize(fsys=tdir, maxsize=maxsize, memsize=memsize)

    if not size:
        LOG.debug("Not creating swap: suggested size was 0")
        return

    mbsize = str(int(size / (2**20)))
    msg = "creating swap file '%s' of %sMB" % (fname, mbsize)
    try:
        util.ensure_dir(tdir)
        util.log_time(LOG.debug,
                      msg,
                      func=util.subp,
                      args=[[
                          'sh', '-c',
                          ('rm -f "$1" && umask 0066 && '
                           '{ fallocate -l "${2}M" "$1" || '
                           'dd if=/dev/zero "of=$1" bs=1M "count=$2"; } && '
                           'mkswap "$1" || { r=$?; rm -f "$1"; exit $r; }'),
                          'setup_swap', fname, mbsize
                      ]])

    except Exception as e:
        raise IOError("Failed %s: %s" % (msg, e))

    return fname
Exemple #38
0
    def test_remove_artifacts_removes_artifacts_removes_seed(self):
        """remove_artifacts removes seed dir when remove_seed is True."""
        dirs = [
            self.artifact_dir,
            os.path.join(self.artifact_dir, 'seed'),
            os.path.join(self.artifact_dir, 'dir1'),
            os.path.join(self.artifact_dir, 'dir2')]
        for _dir in dirs:
            ensure_dir(_dir)

        retcode = wrap_and_call(
            'cloudinit.cmd.clean',
            {'Init': {'side_effect': self.init_class}},
            clean.remove_artifacts, remove_logs=False, remove_seed=True)
        self.assertEqual(0, retcode)
        self.assertTrue(
            os.path.exists(self.artifact_dir), 'Missing artifact dir')
        for deleted_dir in dirs[1:]:
            self.assertFalse(
                os.path.exists(deleted_dir),
                'Unexpected {0} dir'.format(deleted_dir))
Exemple #39
0
 def ensure_sudo_dir(self, path, sudo_base='/etc/sudoers'):
     # Ensure the dir is included and that
     # it actually exists as a directory
     sudoers_contents = ''
     base_exists = False
     if os.path.exists(sudo_base):
         sudoers_contents = util.load_file(sudo_base)
         base_exists = True
     found_include = False
     for line in sudoers_contents.splitlines():
         line = line.strip()
         include_match = re.search(r"^[#|@]includedir\s+(.*)$", line)
         if not include_match:
             continue
         included_dir = include_match.group(1).strip()
         if not included_dir:
             continue
         included_dir = os.path.abspath(included_dir)
         if included_dir == path:
             found_include = True
             break
     if not found_include:
         try:
             if not base_exists:
                 lines = [('# See sudoers(5) for more information'
                           ' on "#include" directives:'), '',
                          util.make_header(base="added"),
                          "#includedir %s" % (path), '']
                 sudoers_contents = "\n".join(lines)
                 util.write_file(sudo_base, sudoers_contents, 0o440)
             else:
                 lines = ['', util.make_header(base="added"),
                          "#includedir %s" % (path), '']
                 sudoers_contents = "\n".join(lines)
                 util.append_file(sudo_base, sudoers_contents)
             LOG.debug("Added '#includedir %s' to %s", path, sudo_base)
         except IOError as e:
             util.logexc(LOG, "Failed to write %s", sudo_base)
             raise e
     util.ensure_dir(path, 0o750)
Exemple #40
0
 def test_handle_args_root_fallback_from_sensitive_instance_data(self):
     """When root user defaults to sensitive.json."""
     user_data = self.tmp_path('user-data', dir=self.tmp)
     run_dir = self.tmp_path('run_dir', dir=self.tmp)
     ensure_dir(run_dir)
     paths = Paths({'run_dir': run_dir})
     self.add_patch('cloudinit.cmd.devel.render.read_cfg_paths', 'm_paths')
     self.m_paths.return_value = paths
     args = self.args(
         user_data=user_data, instance_data=None, debug=False)
     with mock.patch('sys.stderr', new_callable=StringIO):
         with mock.patch('os.getuid') as m_getuid:
             m_getuid.return_value = 0
             self.assertEqual(1, render.handle_args('anyname', args))
     json_file = os.path.join(run_dir, INSTANCE_JSON_FILE)
     json_sensitive = os.path.join(run_dir, INSTANCE_JSON_SENSITIVE_FILE)
     self.assertIn(
         'WARNING: Missing root-readable %s. Using redacted %s' % (
             json_sensitive, json_file), self.logs.getvalue())
     self.assertIn(
         'ERROR: Missing instance-data.json file: %s' % json_file,
         self.logs.getvalue())
Exemple #41
0
    def tmp_cloud(self, distro, sys_cfg=None, metadata=None):
        """Create a cloud with tmp working directory paths.

        @param distro: Name of the distro to attach to the cloud.
        @param metadata: Optional metadata to set on the datasource.

        @return: The built cloud instance.
        """
        self.new_root = self.tmp_dir()
        if not sys_cfg:
            sys_cfg = {}
        tmp_paths = {}
        for var in ['templates_dir', 'run_dir', 'cloud_dir']:
            tmp_paths[var] = self.tmp_path(var, dir=self.new_root)
            util.ensure_dir(tmp_paths[var])
        self.paths = ch.Paths(tmp_paths)
        cls = distros.fetch(distro)
        mydist = cls(distro, sys_cfg, self.paths)
        myds = DataSourceNone.DataSourceNone(sys_cfg, mydist, self.paths)
        if metadata:
            myds.metadata.update(metadata)
        return cloud.Cloud(myds, self.paths, sys_cfg, mydist, None)
Exemple #42
0
    def test_none_ds(self):
        new_root = tempfile.mkdtemp()
        self.addCleanup(shutil.rmtree, new_root)
        self.replicateTestRoot('simple_ubuntu', new_root)
        cfg = {
            'datasource_list': ['None'],
            'cloud_init_modules': ['write-files'],
            'system_info': {'paths': {'run_dir': new_root}}
        }
        ud = helpers.readResource('user_data.1.txt')
        cloud_cfg = safeyaml.dumps(cfg)
        util.ensure_dir(os.path.join(new_root, 'etc', 'cloud'))
        util.write_file(os.path.join(new_root, 'etc',
                                     'cloud', 'cloud.cfg'), cloud_cfg)
        self._patchIn(new_root)

        # Now start verifying whats created
        initer = stages.Init()
        initer.read_cfg()
        initer.initialize()
        initer.fetch()
        initer.datasource.userdata_raw = ud
        initer.instancify()
        initer.update()
        initer.cloudify().run('consume_data',
                              initer.consume_data,
                              args=[PER_INSTANCE],
                              freq=PER_INSTANCE)
        mirrors = initer.distro.get_option('package_mirrors')
        self.assertEqual(1, len(mirrors))
        mirror = mirrors[0]
        self.assertEqual(mirror['arches'], ['i386', 'amd64', 'blah'])
        mods = stages.Modules(initer)
        (which_ran, failures) = mods.run_section('cloud_init_modules')
        self.assertTrue(len(failures) == 0)
        self.assertTrue(os.path.exists('/etc/blah.ini'))
        self.assertIn('write-files', which_ran)
        contents = util.load_file('/etc/blah.ini')
        self.assertEqual(contents, 'blah')
Exemple #43
0
    def render_network_state(self, network_state, templates=None, target=None):
        if not templates:
            templates = self.templates
        file_mode = 0o644
        base_sysconf_dir = util.target_path(target, self.sysconf_dir)
        for path, data in self._render_sysconfig(base_sysconf_dir,
                                                 network_state,
                                                 templates=templates).items():
            util.write_file(path, data, file_mode)
        if self.dns_path:
            dns_path = util.target_path(target, self.dns_path)
            resolv_content = self._render_dns(network_state,
                                              existing_dns_path=dns_path)
            if resolv_content:
                util.write_file(dns_path, resolv_content, file_mode)
        if self.networkmanager_conf_path:
            nm_conf_path = util.target_path(target,
                                            self.networkmanager_conf_path)
            nm_conf_content = self._render_networkmanager_conf(
                network_state, templates)
            if nm_conf_content:
                util.write_file(nm_conf_path, nm_conf_content, file_mode)
        if self.netrules_path:
            netrules_content = self._render_persistent_net(network_state)
            netrules_path = util.target_path(target, self.netrules_path)
            util.write_file(netrules_path, netrules_content, file_mode)
        if available_nm(target=target):
            enable_ifcfg_rh(util.target_path(target, path=NM_CFG_FILE))

        sysconfig_path = util.target_path(target, templates.get('control'))
        # Distros configuring /etc/sysconfig/network as a file e.g. Centos
        if sysconfig_path.endswith('network'):
            util.ensure_dir(os.path.dirname(sysconfig_path))
            netcfg = [_make_header(), 'NETWORKING=yes']
            if network_state.use_ipv6:
                netcfg.append('NETWORKING_IPV6=yes')
                netcfg.append('IPV6_AUTOCONF=no')
            util.write_file(sysconfig_path, "\n".join(netcfg) + "\n",
                            file_mode)
Exemple #44
0
 def test_handle_args_defaults_instance_data(self):
     """When no instance_data argument, default to configured run_dir."""
     args = self.args(debug=False,
                      dump_all=True,
                      format=None,
                      instance_data=None,
                      list_keys=False,
                      user_data=None,
                      vendor_data=None,
                      varname=None)
     run_dir = self.tmp_path('run_dir', dir=self.tmp)
     ensure_dir(run_dir)
     paths = Paths({'run_dir': run_dir})
     self.add_patch('cloudinit.cmd.query.read_cfg_paths', 'm_paths')
     self.m_paths.return_value = paths
     with mock.patch('sys.stderr', new_callable=StringIO) as m_stderr:
         self.assertEqual(1, query.handle_args('anyname', args))
     json_file = os.path.join(run_dir, INSTANCE_JSON_FILE)
     self.assertIn('ERROR: Missing instance-data file: %s' % json_file,
                   self.logs.getvalue())
     self.assertIn('ERROR: Missing instance-data file: %s' % json_file,
                   m_stderr.getvalue())
Exemple #45
0
    def test_remove_artifacts_removes_artifacts_skipping_seed(self):
        """remove_artifacts cleans artifacts dir with exception of seed dir."""
        dirs = [
            self.artifact_dir,
            os.path.join(self.artifact_dir, 'seed'),
            os.path.join(self.artifact_dir, 'dir1'),
            os.path.join(self.artifact_dir, 'dir2')]
        for _dir in dirs:
            ensure_dir(_dir)

        retcode = wrap_and_call(
            'cloudinit.cmd.clean',
            {'Init': {'side_effect': self.init_class}},
            clean.remove_artifacts, remove_logs=False)
        self.assertEqual(0, retcode)
        for expected_dir in dirs[:2]:
            self.assertTrue(
                os.path.exists(expected_dir),
                'Missing {0} dir'.format(expected_dir))
        for deleted_dir in dirs[2:]:
            self.assertFalse(
                os.path.exists(deleted_dir),
                'Unexpected {0} dir'.format(deleted_dir))
Exemple #46
0
def write_files(datadir, files, dirmode=None):
    def _redact_password(cnt, fname):
        """Azure provides the UserPassword in plain text. So we redact it"""
        try:
            root = ET.fromstring(cnt)
            for elem in root.iter():
                if ('UserPassword' in elem.tag
                        and elem.text != DEF_PASSWD_REDACTION):
                    elem.text = DEF_PASSWD_REDACTION
            return ET.tostring(root)
        except Exception:
            LOG.critical("failed to redact userpassword in %s", fname)
            return cnt

    if not datadir:
        return
    if not files:
        files = {}
    util.ensure_dir(datadir, dirmode)
    for (name, content) in files.items():
        fname = os.path.join(datadir, name)
        if 'ovf-env.xml' in name:
            content = _redact_password(content, fname)
        util.write_file(filename=fname, content=content, mode=0o600)
Exemple #47
0
 def test_handle_args_root_uses_instance_sensitive_data(self):
     """When no instance_data argument, root uses semsitive json."""
     user_data = self.tmp_path('user-data', dir=self.tmp)
     vendor_data = self.tmp_path('vendor-data', dir=self.tmp)
     write_file(user_data, 'ud')
     write_file(vendor_data, 'vd')
     run_dir = self.tmp_path('run_dir', dir=self.tmp)
     sensitive_file = os.path.join(run_dir, INSTANCE_JSON_SENSITIVE_FILE)
     write_file(sensitive_file, '{"my-var": "it worked"}')
     ensure_dir(run_dir)
     paths = Paths({'run_dir': run_dir})
     self.add_patch('cloudinit.cmd.query.read_cfg_paths', 'm_paths')
     self.m_paths.return_value = paths
     args = self.args(
         debug=False, dump_all=True, format=None, instance_data=None,
         list_keys=False, user_data=vendor_data, vendor_data=vendor_data,
         varname=None)
     with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
         with mock.patch('os.getuid') as m_getuid:
             m_getuid.return_value = 0
             self.assertEqual(0, query.handle_args('anyname', args))
     self.assertEqual(
         '{\n "my_var": "it worked",\n "userdata": "vd",\n '
         '"vendordata": "vd"\n}\n', m_stdout.getvalue())
Exemple #48
0
 def test_handle_args_root_fallsback_to_instance_data(self):
     """When no instance_data argument, root falls back to redacted json."""
     args = self.args(debug=False,
                      dump_all=True,
                      format=None,
                      instance_data=None,
                      list_keys=False,
                      user_data=None,
                      vendor_data=None,
                      varname=None)
     run_dir = self.tmp_path('run_dir', dir=self.tmp)
     ensure_dir(run_dir)
     paths = Paths({'run_dir': run_dir})
     self.add_patch('cloudinit.cmd.query.read_cfg_paths', 'm_paths')
     self.m_paths.return_value = paths
     with mock.patch('sys.stderr', new_callable=StringIO) as m_stderr:
         with mock.patch('os.getuid') as m_getuid:
             m_getuid.return_value = 0
             self.assertEqual(1, query.handle_args('anyname', args))
     json_file = os.path.join(run_dir, INSTANCE_JSON_FILE)
     sensitive_file = os.path.join(run_dir, INSTANCE_JSON_SENSITIVE_FILE)
     self.assertIn(
         'WARNING: Missing root-readable %s. Using redacted %s instead.' %
         (sensitive_file, json_file), m_stderr.getvalue())
    def test_none_ds(self):
        new_root = self.makeDir()
        self.replicateTestRoot('simple_ubuntu', new_root)
        cfg = {
            'datasource_list': ['None'],
            'write_files': [
                {
                    'path': '/etc/blah.ini',
                    'content': 'blah',
                    'permissions': 0755,
                },
            ],
            'cloud_init_modules': ['write-files'],
        }
        cloud_cfg = util.yaml_dumps(cfg)
        util.ensure_dir(os.path.join(new_root, 'etc', 'cloud'))
        util.write_file(os.path.join(new_root, 'etc', 'cloud', 'cloud.cfg'),
                        cloud_cfg)
        self._patchIn(new_root)

        # Now start verifying whats created
        initer = stages.Init()
        initer.read_cfg()
        initer.initialize()
        self.assertTrue(os.path.exists("/var/lib/cloud"))
        for d in ['scripts', 'seed', 'instances', 'handlers', 'sem', 'data']:
            self.assertTrue(os.path.isdir(os.path.join("/var/lib/cloud", d)))

        initer.fetch()
        iid = initer.instancify()
        self.assertEquals(iid, 'iid-datasource-none')
Exemple #50
0
 def _create_sysfs_parent_directory(self):
     util.ensure_dir(os.path.join('sys', 'class', 'dmi', 'id'))
Exemple #51
0
def handle(_name, cfg, cloud, log, _args):

    # remove the static keys from the pristine image
    if cfg.get("ssh_deletekeys", True):
        key_pth = os.path.join("/etc/ssh/", "ssh_host_*key*")
        for f in glob.glob(key_pth):
            try:
                util.del_file(f)
            except Exception:
                util.logexc(log, "Failed deleting key file %s", f)

    if "ssh_keys" in cfg:
        # if there are keys in cloud-config, use them
        for (key, val) in cfg["ssh_keys"].items():
            if key in CONFIG_KEY_TO_FILE:
                tgt_fn = CONFIG_KEY_TO_FILE[key][0]
                tgt_perms = CONFIG_KEY_TO_FILE[key][1]
                util.write_file(tgt_fn, val, tgt_perms)

        for (priv, pub) in PRIV_TO_PUB.items():
            if pub in cfg['ssh_keys'] or priv not in cfg['ssh_keys']:
                continue
            pair = (CONFIG_KEY_TO_FILE[priv][0], CONFIG_KEY_TO_FILE[pub][0])
            cmd = ['sh', '-xc', KEY_GEN_TPL % pair]
            try:
                # TODO(harlowja): Is this guard needed?
                with util.SeLinuxGuard("/etc/ssh", recursive=True):
                    subp.subp(cmd, capture=False)
                log.debug("Generated a key for %s from %s", pair[0], pair[1])
            except Exception:
                util.logexc(log, "Failed generated a key for %s from %s",
                            pair[0], pair[1])
    else:
        # if not, generate them
        genkeys = util.get_cfg_option_list(cfg, 'ssh_genkeytypes',
                                           GENERATE_KEY_NAMES)
        lang_c = os.environ.copy()
        lang_c['LANG'] = 'C'
        for keytype in genkeys:
            keyfile = KEY_FILE_TPL % (keytype)
            if os.path.exists(keyfile):
                continue
            util.ensure_dir(os.path.dirname(keyfile))
            cmd = ['ssh-keygen', '-t', keytype, '-N', '', '-f', keyfile]

            # TODO(harlowja): Is this guard needed?
            with util.SeLinuxGuard("/etc/ssh", recursive=True):
                try:
                    out, err = subp.subp(cmd, capture=True, env=lang_c)
                    sys.stdout.write(util.decode_binary(out))
                except subp.ProcessExecutionError as e:
                    err = util.decode_binary(e.stderr).lower()
                    if (e.exit_code == 1
                            and err.lower().startswith("unknown key")):
                        log.debug("ssh-keygen: unknown key type '%s'", keytype)
                    else:
                        util.logexc(
                            log, "Failed generating key type %s to "
                            "file %s", keytype, keyfile)

    if "ssh_publish_hostkeys" in cfg:
        host_key_blacklist = util.get_cfg_option_list(
            cfg["ssh_publish_hostkeys"], "blacklist",
            HOST_KEY_PUBLISH_BLACKLIST)
        publish_hostkeys = util.get_cfg_option_bool(
            cfg["ssh_publish_hostkeys"], "enabled", PUBLISH_HOST_KEYS)
    else:
        host_key_blacklist = HOST_KEY_PUBLISH_BLACKLIST
        publish_hostkeys = PUBLISH_HOST_KEYS

    if publish_hostkeys:
        hostkeys = get_public_host_keys(blacklist=host_key_blacklist)
        try:
            cloud.datasource.publish_host_keys(hostkeys)
        except Exception:
            util.logexc(log, "Publishing host keys failed!")

    try:
        (users, _groups) = ug_util.normalize_users_groups(cfg, cloud.distro)
        (user, _user_config) = ug_util.extract_default(users)
        disable_root = util.get_cfg_option_bool(cfg, "disable_root", True)
        disable_root_opts = util.get_cfg_option_str(cfg, "disable_root_opts",
                                                    ssh_util.DISABLE_USER_OPTS)

        keys = []
        if util.get_cfg_option_bool(cfg, 'allow_public_ssh_keys', True):
            keys = cloud.get_public_ssh_keys() or []
        else:
            log.debug('Skipping import of publish SSH keys per '
                      'config setting: allow_public_ssh_keys=False')

        if "ssh_authorized_keys" in cfg:
            cfgkeys = cfg["ssh_authorized_keys"]
            keys.extend(cfgkeys)

        apply_credentials(keys, user, disable_root, disable_root_opts)
    except Exception:
        util.logexc(log, "Applying SSH credentials failed!")
Exemple #52
0
def handle(name, cfg, cloud, log, _args):
    """Handler method activated by cloud-init."""

    # If there isn't a chef key in the configuration don't do anything
    if 'chef' not in cfg:
        log.debug(("Skipping module named %s,"
                   " no 'chef' key in configuration"), name)
        return
    chef_cfg = cfg['chef']

    # Ensure the chef directories we use exist
    chef_dirs = util.get_cfg_option_list(chef_cfg, 'directories')
    if not chef_dirs:
        chef_dirs = list(CHEF_DIRS)
    for d in itertools.chain(chef_dirs, REQUIRED_CHEF_DIRS):
        util.ensure_dir(d)

    vkey_path = chef_cfg.get('validation_key', CHEF_VALIDATION_PEM_PATH)
    vcert = chef_cfg.get('validation_cert')
    # special value 'system' means do not overwrite the file
    # but still render the template to contain 'validation_key'
    if vcert:
        if vcert != "system":
            util.write_file(vkey_path, vcert)
        elif not os.path.isfile(vkey_path):
            log.warn(
                "chef validation_cert provided as 'system', but "
                "validation_key path '%s' does not exist.", vkey_path)

    # Create the chef config from template
    template_fn = cloud.get_template_filename('chef_client.rb')
    if template_fn:
        iid = str(cloud.datasource.get_instance_id())
        params = get_template_params(iid, chef_cfg, log)
        # Do a best effort attempt to ensure that the template values that
        # are associated with paths have there parent directory created
        # before they are used by the chef-client itself.
        param_paths = set()
        for (k, v) in params.items():
            if k in CHEF_RB_TPL_PATH_KEYS and v:
                param_paths.add(os.path.dirname(v))
        util.ensure_dirs(param_paths)
        templater.render_to_file(template_fn, CHEF_RB_PATH, params)
    else:
        log.warn("No template found, not rendering to %s", CHEF_RB_PATH)

    # Set the firstboot json
    fb_filename = util.get_cfg_option_str(chef_cfg,
                                          'firstboot_path',
                                          default=CHEF_FB_PATH)
    if not fb_filename:
        log.info("First boot path empty, not writing first boot json file")
    else:
        initial_json = {}
        if 'run_list' in chef_cfg:
            initial_json['run_list'] = chef_cfg['run_list']
        if 'initial_attributes' in chef_cfg:
            initial_attributes = chef_cfg['initial_attributes']
            for k in list(initial_attributes.keys()):
                initial_json[k] = initial_attributes[k]
        util.write_file(fb_filename, json.dumps(initial_json))

    # Try to install chef, if its not already installed...
    force_install = util.get_cfg_option_bool(chef_cfg,
                                             'force_install',
                                             default=False)
    if not is_installed() or force_install:
        run = install_chef(cloud, chef_cfg, log)
    elif is_installed():
        run = util.get_cfg_option_bool(chef_cfg, 'exec', default=False)
    else:
        run = False
    if run:
        run_chef(chef_cfg, log)
        post_run_chef(chef_cfg, log)
Exemple #53
0
def handle(_name, cfg, cloud, log, _args):

    # remove the static keys from the pristine image
    if cfg.get("ssh_deletekeys", True):
        key_pth = os.path.join("/etc/ssh/", "ssh_host_*key*")
        for f in glob.glob(key_pth):
            try:
                util.del_file(f)
            except:
                util.logexc(log, "Failed deleting key file %s", f)

    if "ssh_keys" in cfg:
        # if there are keys in cloud-config, use them
        for (key, val) in cfg["ssh_keys"].items():
            if key in KEY_2_FILE:
                tgt_fn = KEY_2_FILE[key][0]
                tgt_perms = KEY_2_FILE[key][1]
                util.write_file(tgt_fn, val, tgt_perms)

        for (priv, pub) in PRIV_2_PUB.items():
            if pub in cfg['ssh_keys'] or priv not in cfg['ssh_keys']:
                continue
            pair = (KEY_2_FILE[priv][0], KEY_2_FILE[pub][0])
            cmd = ['sh', '-xc', KEY_GEN_TPL % pair]
            try:
                # TODO(harlowja): Is this guard needed?
                with util.SeLinuxGuard("/etc/ssh", recursive=True):
                    util.subp(cmd, capture=False)
                log.debug("Generated a key for %s from %s", pair[0], pair[1])
            except:
                util.logexc(log, "Failed generated a key for %s from %s",
                            pair[0], pair[1])
    else:
        # if not, generate them
        genkeys = util.get_cfg_option_list(cfg,
                                           'ssh_genkeytypes',
                                           GENERATE_KEY_NAMES)
        for keytype in genkeys:
            keyfile = KEY_FILE_TPL % (keytype)
            util.ensure_dir(os.path.dirname(keyfile))
            if not os.path.exists(keyfile):
                cmd = ['ssh-keygen', '-t', keytype, '-N', '', '-f', keyfile]
                try:
                    # TODO(harlowja): Is this guard needed?
                    with util.SeLinuxGuard("/etc/ssh", recursive=True):
                        util.subp(cmd, capture=False)
                except:
                    util.logexc(log, "Failed generating key type %s to "
                                "file %s", keytype, keyfile)

    try:
        (users, _groups) = ds.normalize_users_groups(cfg, cloud.distro)
        (user, _user_config) = ds.extract_default(users)
        disable_root = util.get_cfg_option_bool(cfg, "disable_root", True)
        disable_root_opts = util.get_cfg_option_str(cfg, "disable_root_opts",
                                                    DISABLE_ROOT_OPTS)

        keys = cloud.get_public_ssh_keys() or []
        if "ssh_authorized_keys" in cfg:
            cfgkeys = cfg["ssh_authorized_keys"]
            keys.extend(cfgkeys)

        apply_credentials(keys, user, disable_root, disable_root_opts)
    except:
        util.logexc(log, "Applying ssh credentials failed!")
Exemple #54
0
def handle(name, cfg, cloud, log, _args):
    # If there isn't a puppet key in the configuration don't do anything
    if 'puppet' not in cfg:
        log.debug(("Skipping module named %s,"
                   " no 'puppet' configuration found"), name)
        return

    puppet_cfg = cfg['puppet']
    # Start by installing the puppet package if necessary...
    install = util.get_cfg_option_bool(puppet_cfg, 'install', True)
    version = util.get_cfg_option_str(puppet_cfg, 'version', None)
    collection = util.get_cfg_option_str(puppet_cfg, 'collection', None)
    install_type = util.get_cfg_option_str(puppet_cfg, 'install_type',
                                           'packages')
    cleanup = util.get_cfg_option_bool(puppet_cfg, 'cleanup', True)
    run = util.get_cfg_option_bool(puppet_cfg, 'exec', default=False)
    aio_install_url = util.get_cfg_option_str(puppet_cfg,
                                              'aio_install_url',
                                              default=AIO_INSTALL_URL)

    # AIO and distro packages use different paths
    if install_type == 'aio':
        puppet_user = '******'
        puppet_bin = '/opt/puppetlabs/bin/puppet'
        puppet_package = 'puppet-agent'
    else:  # default to 'packages'
        puppet_user = '******'
        puppet_bin = 'puppet'
        puppet_package = 'puppet'

    package_name = util.get_cfg_option_str(puppet_cfg, 'package_name',
                                           puppet_package)
    if not install and version:
        log.warning(("Puppet install set to false but version supplied,"
                     " doing nothing."))
    elif install:
        log.debug(("Attempting to install puppet %s from %s"),
                  version if version else 'latest', install_type)

        if install_type == "packages":
            cloud.distro.install_packages((package_name, version))
        elif install_type == "aio":
            install_puppet_aio(aio_install_url, version, collection, cleanup)
        else:
            log.warning("Unknown puppet install type '%s'", install_type)
            run = False

    conf_file = util.get_cfg_option_str(puppet_cfg, 'conf_file',
                                        get_config_value(puppet_bin, 'config'))
    ssl_dir = util.get_cfg_option_str(puppet_cfg, 'ssl_dir',
                                      get_config_value(puppet_bin, 'ssldir'))
    csr_attributes_path = util.get_cfg_option_str(
        puppet_cfg, 'csr_attributes_path',
        get_config_value(puppet_bin, 'csr_attributes'))

    p_constants = PuppetConstants(conf_file, ssl_dir, csr_attributes_path, log)

    # ... and then update the puppet configuration
    if 'conf' in puppet_cfg:
        # Add all sections from the conf object to puppet.conf
        contents = util.load_file(p_constants.conf_path)
        # Create object for reading puppet.conf values
        puppet_config = helpers.DefaultingConfigParser()
        # Read puppet.conf values from original file in order to be able to
        # mix the rest up. First clean them up
        # (TODO(harlowja) is this really needed??)
        cleaned_lines = [i.lstrip() for i in contents.splitlines()]
        cleaned_contents = '\n'.join(cleaned_lines)
        # Move to puppet_config.read_file when dropping py2.7
        puppet_config.read_file(StringIO(cleaned_contents),
                                source=p_constants.conf_path)
        for (cfg_name, cfg) in puppet_cfg['conf'].items():
            # Cert configuration is a special case
            # Dump the puppetserver ca certificate in the correct place
            if cfg_name == 'ca_cert':
                # Puppet ssl sub-directory isn't created yet
                # Create it with the proper permissions and ownership
                util.ensure_dir(p_constants.ssl_dir, 0o771)
                util.chownbyname(p_constants.ssl_dir, puppet_user, 'root')
                util.ensure_dir(p_constants.ssl_cert_dir)

                util.chownbyname(p_constants.ssl_cert_dir, puppet_user, 'root')
                util.write_file(p_constants.ssl_cert_path, cfg)
                util.chownbyname(p_constants.ssl_cert_path, puppet_user,
                                 'root')
            else:
                # Iterate through the config items, we'll use ConfigParser.set
                # to overwrite or create new items as needed
                for (o, v) in cfg.items():
                    if o == 'certname':
                        # Expand %f as the fqdn
                        # TODO(harlowja) should this use the cloud fqdn??
                        v = v.replace("%f", socket.getfqdn())
                        # Expand %i as the instance id
                        v = v.replace("%i", cloud.get_instance_id())
                        # certname needs to be downcased
                        v = v.lower()
                    puppet_config.set(cfg_name, o, v)
            # We got all our config as wanted we'll rename
            # the previous puppet.conf and create our new one
            util.rename(p_constants.conf_path,
                        "%s.old" % (p_constants.conf_path))
            util.write_file(p_constants.conf_path, puppet_config.stringify())

    if 'csr_attributes' in puppet_cfg:
        util.write_file(
            p_constants.csr_attributes_path,
            yaml.dump(puppet_cfg['csr_attributes'], default_flow_style=False))

    # Set it up so it autostarts
    _autostart_puppet(log)

    # Run the agent if needed
    if run:
        log.debug('Running puppet-agent')
        cmd = [puppet_bin, 'agent']
        if 'exec_args' in puppet_cfg:
            cmd_args = puppet_cfg['exec_args']
            if isinstance(cmd_args, (list, tuple)):
                cmd.extend(cmd_args)
            elif isinstance(cmd_args, str):
                cmd.extend(cmd_args.split())
            else:
                log.warning(
                    "Unknown type %s provided for puppet"
                    " 'exec_args' expected list, tuple,"
                    " or string", type(cmd_args))
                cmd.extend(PUPPET_AGENT_DEFAULT_ARGS)
        else:
            cmd.extend(PUPPET_AGENT_DEFAULT_ARGS)
        subp.subp(cmd, capture=False)

    # Start puppetd
    subp.subp(['service', 'puppet', 'start'], capture=False)
Exemple #55
0
 def setUp(self):
     super(TestHostname, self).setUp()
     self.tmp = tempfile.mkdtemp()
     util.ensure_dir(os.path.join(self.tmp, "data"))
     self.addCleanup(shutil.rmtree, self.tmp)
Exemple #56
0
def handle(name, cfg, _cloud, log, args):
    if len(args) != 0:
        resize_root = args[0]
    else:
        resize_root = util.get_cfg_option_str(cfg, "resize_rootfs", True)

    if not util.translate_bool(resize_root, addons=[NOBLOCK]):
        log.debug("Skipping module named %s, resizing disabled", name)
        return

    # TODO(harlowja) is the directory ok to be used??
    resize_root_d = util.get_cfg_option_str(cfg, "resize_rootfs_tmp", "/run")
    util.ensure_dir(resize_root_d)

    # TODO(harlowja): allow what is to be resized to be configurable??
    resize_what = "/"
    result = util.get_mount_info(resize_what, log)
    if not result:
        log.warn("Could not determine filesystem type of %s", resize_what)
        return

    (devpth, fs_type, mount_point) = result

    info = "dev=%s mnt_point=%s path=%s" % (devpth, mount_point, resize_what)
    log.debug("resize_info: %s" % info)

    container = util.is_container()

    # Ensure the path is a block device.
    if (devpth == "/dev/root" and not os.path.exists(devpth)
            and not container):
        devpth = util.rootdev_from_cmdline(util.get_cmdline())
        if devpth is None:
            log.warn("Unable to find device '/dev/root'")
            return
        log.debug("Converted /dev/root to '%s' per kernel cmdline", devpth)

    try:
        statret = os.stat(devpth)
    except OSError as exc:
        if container and exc.errno == errno.ENOENT:
            log.debug(
                "Device '%s' did not exist in container. "
                "cannot resize: %s", devpth, info)
        elif exc.errno == errno.ENOENT:
            log.warn("Device '%s' did not exist. cannot resize: %s", devpth,
                     info)
        else:
            raise exc
        return

    if not os.access(devpth, os.W_OK):
        if container:
            log.debug("'%s' not writable in container. cannot resize: %s",
                      devpth, info)
        else:
            log.warn("'%s' not writable. cannot resize: %s", devpth, info)
        return

    if not stat.S_ISBLK(statret.st_mode) and not stat.S_ISCHR(statret.st_mode):
        if container:
            log.debug("device '%s' not a block device in container."
                      " cannot resize: %s" % (devpth, info))
        else:
            log.warn("device '%s' not a block device. cannot resize: %s" %
                     (devpth, info))
        return

    resizer = None
    if can_skip_resize(fs_type, resize_what, devpth):
        log.debug("Skip resize filesystem type %s for %s", fs_type,
                  resize_what)
        return

    fstype_lc = fs_type.lower()
    for (pfix, root_cmd) in RESIZE_FS_PREFIXES_CMDS:
        if fstype_lc.startswith(pfix):
            resizer = root_cmd
            break

    if not resizer:
        log.warn("Not resizing unknown filesystem type %s for %s", fs_type,
                 resize_what)
        return

    resize_cmd = resizer(resize_what, devpth)
    log.debug("Resizing %s (%s) using %s", resize_what, fs_type,
              ' '.join(resize_cmd))

    if resize_root == NOBLOCK:
        # Fork to a child that will run
        # the resize command
        util.fork_cb(util.log_time,
                     logfunc=log.debug,
                     msg="backgrounded Resizing",
                     func=do_resize,
                     args=(resize_cmd, log))
    else:
        util.log_time(logfunc=log.debug,
                      msg="Resizing",
                      func=do_resize,
                      args=(resize_cmd, log))

    action = 'Resized'
    if resize_root == NOBLOCK:
        action = 'Resizing (via forking)'
    log.debug("%s root filesystem (type=%s, val=%s)", action, fs_type,
              resize_root)
Exemple #57
0
def handle(_name, cfg, cloud, log, _args):
    # fs_spec, fs_file, fs_vfstype, fs_mntops, fs-freq, fs_passno
    def_mnt_opts = "defaults,nobootwait"
    uses_systemd = cloud.distro.uses_systemd()
    if uses_systemd:
        def_mnt_opts = "defaults,nofail,x-systemd.requires=cloud-init.service"

    defvals = [None, None, "auto", def_mnt_opts, "0", "2"]
    defvals = cfg.get("mount_default_fields", defvals)

    # these are our default set of mounts
    defmnts = [["ephemeral0", "/mnt", "auto", defvals[3], "0", "2"],
               ["swap", "none", "swap", "sw", "0", "0"]]

    cfgmnt = []
    if "mounts" in cfg:
        cfgmnt = cfg["mounts"]

    LOG.debug("mounts configuration is %s", cfgmnt)

    fstab_lines = []
    fstab_devs = {}
    fstab_removed = []

    for line in util.load_file(FSTAB_PATH).splitlines():
        if MNT_COMMENT in line:
            fstab_removed.append(line)
            continue

        try:
            toks = WS.split(line)
        except Exception:
            pass
        fstab_devs[toks[0]] = line
        fstab_lines.append(line)

    for i in range(len(cfgmnt)):
        # skip something that wasn't a list
        if not isinstance(cfgmnt[i], list):
            log.warning("Mount option %s not a list, got a %s instead",
                        (i + 1), type_utils.obj_name(cfgmnt[i]))
            continue

        start = str(cfgmnt[i][0])
        sanitized = sanitize_devname(start, cloud.device_name_to_device, log)
        if sanitized != start:
            log.debug("changed %s => %s" % (start, sanitized))

        if sanitized is None:
            log.debug("Ignoring nonexistent named mount %s", start)
            continue
        elif sanitized in fstab_devs:
            log.info("Device %s already defined in fstab: %s", sanitized,
                     fstab_devs[sanitized])
            continue

        cfgmnt[i][0] = sanitized

        # in case the user did not quote a field (likely fs-freq, fs_passno)
        # but do not convert None to 'None' (LP: #898365)
        for j in range(len(cfgmnt[i])):
            if cfgmnt[i][j] is None:
                continue
            else:
                cfgmnt[i][j] = str(cfgmnt[i][j])

    for i in range(len(cfgmnt)):
        # fill in values with defaults from defvals above
        for j in range(len(defvals)):
            if len(cfgmnt[i]) <= j:
                cfgmnt[i].append(defvals[j])
            elif cfgmnt[i][j] is None:
                cfgmnt[i][j] = defvals[j]

        # if the second entry in the list is 'None' this
        # clears all previous entries of that same 'fs_spec'
        # (fs_spec is the first field in /etc/fstab, ie, that device)
        if cfgmnt[i][1] is None:
            for j in range(i):
                if cfgmnt[j][0] == cfgmnt[i][0]:
                    cfgmnt[j][1] = None

    # for each of the "default" mounts, add them only if no other
    # entry has the same device name
    for defmnt in defmnts:
        start = defmnt[0]
        sanitized = sanitize_devname(start, cloud.device_name_to_device, log)
        if sanitized != start:
            log.debug("changed default device %s => %s" % (start, sanitized))

        if sanitized is None:
            log.debug("Ignoring nonexistent default named mount %s", start)
            continue
        elif sanitized in fstab_devs:
            log.debug("Device %s already defined in fstab: %s", sanitized,
                      fstab_devs[sanitized])
            continue

        defmnt[0] = sanitized

        cfgmnt_has = False
        for cfgm in cfgmnt:
            if cfgm[0] == defmnt[0]:
                cfgmnt_has = True
                break

        if cfgmnt_has:
            log.debug(("Not including %s, already"
                       " previously included"), start)
            continue
        cfgmnt.append(defmnt)

    # now, each entry in the cfgmnt list has all fstab values
    # if the second field is None (not the string, the value) we skip it
    actlist = []
    for x in cfgmnt:
        if x[1] is None:
            log.debug("Skipping nonexistent device named %s", x[0])
        else:
            actlist.append(x)

    swapret = handle_swapcfg(cfg.get('swap', {}))
    if swapret:
        actlist.append([swapret, "none", "swap", "sw", "0", "0"])

    if len(actlist) == 0:
        log.debug("No modifications to fstab needed")
        return

    cc_lines = []
    needswap = False
    need_mount_all = False
    dirs = []
    for line in actlist:
        # write 'comment' in the fs_mntops, entry,  claiming this
        line[3] = "%s,%s" % (line[3], MNT_COMMENT)
        if line[2] == "swap":
            needswap = True
        if line[1].startswith("/"):
            dirs.append(line[1])
        cc_lines.append('\t'.join(line))

    mount_points = [
        v['mountpoint'] for k, v in util.mounts().items() if 'mountpoint' in v
    ]
    for d in dirs:
        try:
            util.ensure_dir(d)
        except Exception:
            util.logexc(log, "Failed to make '%s' config-mount", d)
        # dirs is list of directories on which a volume should be mounted.
        # If any of them does not already show up in the list of current
        # mount points, we will definitely need to do mount -a.
        if not need_mount_all and d not in mount_points:
            need_mount_all = True

    sadds = [WS.sub(" ", n) for n in cc_lines]
    sdrops = [WS.sub(" ", n) for n in fstab_removed]

    sops = (["- " + drop for drop in sdrops if drop not in sadds] +
            ["+ " + add for add in sadds if add not in sdrops])

    fstab_lines.extend(cc_lines)
    contents = "%s\n" % ('\n'.join(fstab_lines))
    util.write_file(FSTAB_PATH, contents)

    activate_cmds = []
    if needswap:
        activate_cmds.append(["swapon", "-a"])

    if len(sops) == 0:
        log.debug("No changes to /etc/fstab made.")
    else:
        log.debug("Changes to fstab: %s", sops)
        need_mount_all = True

    if need_mount_all:
        activate_cmds.append(["mount", "-a"])
        if uses_systemd:
            activate_cmds.append(["systemctl", "daemon-reload"])

    fmt = "Activating swap and mounts with: %s"
    for cmd in activate_cmds:
        fmt = "Activate mounts: %s:" + ' '.join(cmd)
        try:
            util.subp(cmd)
            log.debug(fmt, "PASS")
        except util.ProcessExecutionError:
            log.warning(fmt, "FAIL")
            util.logexc(log, fmt, "FAIL")
def handle(name, cfg, cloud, log, _args):
    # If there isn't a puppet key in the configuration don't do anything
    if 'puppet' not in cfg:
        log.debug(("Skipping module named %s,"
                   " no 'puppet' configuration found"), name)
        return

    puppet_cfg = cfg['puppet']

    # Start by installing the puppet package if necessary...
    install = util.get_cfg_option_bool(puppet_cfg, 'install', True)
    version = util.get_cfg_option_str(puppet_cfg, 'version', None)
    if not install and version:
        log.warn(("Puppet install set false but version supplied,"
                  " doing nothing."))
    elif install:
        log.debug(("Attempting to install puppet %s,"),
                  version if version else 'latest')
        cloud.distro.install_packages(('puppet', version))

    # ... and then update the puppet configuration
    if 'conf' in puppet_cfg:
        # Add all sections from the conf object to puppet.conf
        contents = util.load_file(PUPPET_CONF_PATH)
        # Create object for reading puppet.conf values
        puppet_config = helpers.DefaultingConfigParser()
        # Read puppet.conf values from original file in order to be able to
        # mix the rest up. First clean them up
        # (TODO(harlowja) is this really needed??)
        cleaned_lines = [i.lstrip() for i in contents.splitlines()]
        cleaned_contents = '\n'.join(cleaned_lines)
        puppet_config.readfp(StringIO(cleaned_contents),
                             filename=PUPPET_CONF_PATH)
        for (cfg_name, cfg) in puppet_cfg['conf'].iteritems():
            # Cert configuration is a special case
            # Dump the puppet master ca certificate in the correct place
            if cfg_name == 'ca_cert':
                # Puppet ssl sub-directory isn't created yet
                # Create it with the proper permissions and ownership
                util.ensure_dir(PUPPET_SSL_DIR, 0771)
                util.chownbyname(PUPPET_SSL_DIR, 'puppet', 'root')
                util.ensure_dir(PUPPET_SSL_CERT_DIR)
                util.chownbyname(PUPPET_SSL_CERT_DIR, 'puppet', 'root')
                util.write_file(PUPPET_SSL_CERT_PATH, str(cfg))
                util.chownbyname(PUPPET_SSL_CERT_PATH, 'puppet', 'root')
            else:
                # Iterate throug the config items, we'll use ConfigParser.set
                # to overwrite or create new items as needed
                for (o, v) in cfg.iteritems():
                    if o == 'certname':
                        # Expand %f as the fqdn
                        # TODO(harlowja) should this use the cloud fqdn??
                        v = v.replace("%f", socket.getfqdn())
                        # Expand %i as the instance id
                        v = v.replace("%i", cloud.get_instance_id())
                        # certname needs to be downcased
                        v = v.lower()
                    puppet_config.set(cfg_name, o, v)
            # We got all our config as wanted we'll rename
            # the previous puppet.conf and create our new one
            util.rename(PUPPET_CONF_PATH, "%s.old" % (PUPPET_CONF_PATH))
            util.write_file(PUPPET_CONF_PATH, puppet_config.stringify())

    # Set it up so it autostarts
    _autostart_puppet(log)

    # Start puppetd
    util.subp(['service', 'puppet', 'start'], capture=False)
Exemple #59
0
    def test_collect_logs_creates_tarfile(self, m_getuid):
        """collect-logs creates a tarfile with all related cloud-init info."""
        m_getuid.return_value = 100
        log1 = self.tmp_path('cloud-init.log', self.new_root)
        write_file(log1, 'cloud-init-log')
        log2 = self.tmp_path('cloud-init-output.log', self.new_root)
        write_file(log2, 'cloud-init-output-log')
        ensure_dir(self.run_dir)
        write_file(self.tmp_path('results.json', self.run_dir), 'results')
        write_file(self.tmp_path(INSTANCE_JSON_SENSITIVE_FILE, self.run_dir),
                   'sensitive')
        output_tarfile = self.tmp_path('logs.tgz')

        date = datetime.utcnow().date().strftime('%Y-%m-%d')
        date_logdir = 'cloud-init-logs-{0}'.format(date)

        version_out = '/usr/bin/cloud-init 18.2fake\n'
        expected_subp = {
            ('dpkg-query', '--show', "-f=${Version}\n", 'cloud-init'):
            '0.7fake\n',
            ('cloud-init', '--version'): version_out,
            ('dmesg', ): 'dmesg-out\n',
            ('journalctl', '--boot=0', '-o', 'short-precise'): 'journal-out\n',
            ('tar', 'czvf', output_tarfile, date_logdir): ''
        }

        def fake_subp(cmd):
            cmd_tuple = tuple(cmd)
            if cmd_tuple not in expected_subp:
                raise AssertionError(
                    'Unexpected command provided to subp: {0}'.format(cmd))
            if cmd == ['tar', 'czvf', output_tarfile, date_logdir]:
                subp(cmd)  # Pass through tar cmd so we can check output
            return expected_subp[cmd_tuple], ''

        fake_stderr = mock.MagicMock()

        wrap_and_call('cloudinit.cmd.devel.logs', {
            'subp': {
                'side_effect': fake_subp
            },
            'sys.stderr': {
                'new': fake_stderr
            },
            'CLOUDINIT_LOGS': {
                'new': [log1, log2]
            },
            'CLOUDINIT_RUN_DIR': {
                'new': self.run_dir
            }
        },
                      logs.collect_logs,
                      output_tarfile,
                      include_userdata=False)
        # unpack the tarfile and check file contents
        subp(['tar', 'zxvf', output_tarfile, '-C', self.new_root])
        out_logdir = self.tmp_path(date_logdir, self.new_root)
        self.assertFalse(
            os.path.exists(
                os.path.join(out_logdir, 'run', 'cloud-init',
                             INSTANCE_JSON_SENSITIVE_FILE)),
            'Unexpected file found: %s' % INSTANCE_JSON_SENSITIVE_FILE)
        self.assertEqual('0.7fake\n',
                         load_file(os.path.join(out_logdir, 'dpkg-version')))
        self.assertEqual(version_out,
                         load_file(os.path.join(out_logdir, 'version')))
        self.assertEqual('cloud-init-log',
                         load_file(os.path.join(out_logdir, 'cloud-init.log')))
        self.assertEqual(
            'cloud-init-output-log',
            load_file(os.path.join(out_logdir, 'cloud-init-output.log')))
        self.assertEqual('dmesg-out\n',
                         load_file(os.path.join(out_logdir, 'dmesg.txt')))
        self.assertEqual('journal-out\n',
                         load_file(os.path.join(out_logdir, 'journal.txt')))
        self.assertEqual(
            'results',
            load_file(
                os.path.join(out_logdir, 'run', 'cloud-init', 'results.json')))
        fake_stderr.write.assert_any_call('Wrote %s\n' % output_tarfile)
Exemple #60
0
def handle(_name, cfg, cloud, log, _args):

    # remove the static keys from the pristine image
    if cfg.get("ssh_deletekeys", True):
        key_pth = os.path.join("/etc/ssh/", "ssh_host_*key*")
        for f in glob.glob(key_pth):
            try:
                util.del_file(f)
            except Exception:
                util.logexc(log, "Failed deleting key file %s", f)

    if "ssh_keys" in cfg:
        # if there are keys and/or certificates in cloud-config, use them
        for (key, val) in cfg["ssh_keys"].items():
            # skip entry if unrecognized
            if key not in CONFIG_KEY_TO_FILE:
                continue
            tgt_fn = CONFIG_KEY_TO_FILE[key][0]
            tgt_perms = CONFIG_KEY_TO_FILE[key][1]
            util.write_file(tgt_fn, val, tgt_perms)
            # set server to present the most recently identified certificate
            if "_certificate" in key:
                cert_config = {"HostCertificate": tgt_fn}
                ssh_util.update_ssh_config(cert_config)

        for (priv, pub) in PRIV_TO_PUB.items():
            if pub in cfg["ssh_keys"] or priv not in cfg["ssh_keys"]:
                continue
            pair = (CONFIG_KEY_TO_FILE[priv][0], CONFIG_KEY_TO_FILE[pub][0])
            cmd = ["sh", "-xc", KEY_GEN_TPL % pair]
            try:
                # TODO(harlowja): Is this guard needed?
                with util.SeLinuxGuard("/etc/ssh", recursive=True):
                    subp.subp(cmd, capture=False)
                log.debug("Generated a key for %s from %s", pair[0], pair[1])
            except Exception:
                util.logexc(
                    log,
                    "Failed generated a key for %s from %s",
                    pair[0],
                    pair[1],
                )
    else:
        # if not, generate them
        genkeys = util.get_cfg_option_list(cfg, "ssh_genkeytypes",
                                           GENERATE_KEY_NAMES)
        lang_c = os.environ.copy()
        lang_c["LANG"] = "C"
        for keytype in genkeys:
            keyfile = KEY_FILE_TPL % (keytype)
            if os.path.exists(keyfile):
                continue
            util.ensure_dir(os.path.dirname(keyfile))
            cmd = ["ssh-keygen", "-t", keytype, "-N", "", "-f", keyfile]

            # TODO(harlowja): Is this guard needed?
            with util.SeLinuxGuard("/etc/ssh", recursive=True):
                try:
                    out, err = subp.subp(cmd, capture=True, env=lang_c)
                    if not util.get_cfg_option_bool(cfg, "ssh_quiet_keygen",
                                                    False):
                        sys.stdout.write(util.decode_binary(out))

                    gid = util.get_group_id("ssh_keys")
                    if gid != -1:
                        # perform same "sanitize permissions" as sshd-keygen
                        os.chown(keyfile, -1, gid)
                        os.chmod(keyfile, 0o640)
                        os.chmod(keyfile + ".pub", 0o644)
                except subp.ProcessExecutionError as e:
                    err = util.decode_binary(e.stderr).lower()
                    if e.exit_code == 1 and err.lower().startswith(
                            "unknown key"):
                        log.debug("ssh-keygen: unknown key type '%s'", keytype)
                    else:
                        util.logexc(
                            log,
                            "Failed generating key type %s to file %s",
                            keytype,
                            keyfile,
                        )

    if "ssh_publish_hostkeys" in cfg:
        host_key_blacklist = util.get_cfg_option_list(
            cfg["ssh_publish_hostkeys"],
            "blacklist",
            HOST_KEY_PUBLISH_BLACKLIST,
        )
        publish_hostkeys = util.get_cfg_option_bool(
            cfg["ssh_publish_hostkeys"], "enabled", PUBLISH_HOST_KEYS)
    else:
        host_key_blacklist = HOST_KEY_PUBLISH_BLACKLIST
        publish_hostkeys = PUBLISH_HOST_KEYS

    if publish_hostkeys:
        hostkeys = get_public_host_keys(blacklist=host_key_blacklist)
        try:
            cloud.datasource.publish_host_keys(hostkeys)
        except Exception:
            util.logexc(log, "Publishing host keys failed!")

    try:
        (users, _groups) = ug_util.normalize_users_groups(cfg, cloud.distro)
        (user, _user_config) = ug_util.extract_default(users)
        disable_root = util.get_cfg_option_bool(cfg, "disable_root", True)
        disable_root_opts = util.get_cfg_option_str(cfg, "disable_root_opts",
                                                    ssh_util.DISABLE_USER_OPTS)

        keys = []
        if util.get_cfg_option_bool(cfg, "allow_public_ssh_keys", True):
            keys = cloud.get_public_ssh_keys() or []
        else:
            log.debug("Skipping import of publish SSH keys per "
                      "config setting: allow_public_ssh_keys=False")

        if "ssh_authorized_keys" in cfg:
            cfgkeys = cfg["ssh_authorized_keys"]
            keys.extend(cfgkeys)

        apply_credentials(keys, user, disable_root, disable_root_opts)
    except Exception:
        util.logexc(log, "Applying SSH credentials failed!")