Beispiel #1
0
def handle(name, cfg, cloud, log, _args):
    # If there isn't a salt key in the configuration don't do anything
    if "salt_minion" not in cfg:
        log.debug(
            "Skipping module named %s, no 'salt_minion' key in configuration",
            name,
        )
        return

    s_cfg = cfg["salt_minion"]
    const = SaltConstants(cfg=s_cfg)

    # Start by installing the salt package ...
    cloud.distro.install_packages(const.pkg_name)

    # Ensure we can configure files at the right dir
    util.ensure_dir(const.conf_dir)

    # ... and then update the salt configuration
    if "conf" in s_cfg:
        # Add all sections from the conf object to minion config file
        minion_config = os.path.join(const.conf_dir, "minion")
        minion_data = safeyaml.dumps(s_cfg.get("conf"))
        util.write_file(minion_config, minion_data)

    if "grains" in s_cfg:
        # add grains to /etc/salt/grains
        grains_config = os.path.join(const.conf_dir, "grains")
        grains_data = safeyaml.dumps(s_cfg.get("grains"))
        util.write_file(grains_config, grains_data)

    # ... copy the key pair if specified
    if "public_key" in s_cfg and "private_key" in s_cfg:
        pki_dir_default = os.path.join(const.conf_dir, "pki/minion")
        if not os.path.isdir(pki_dir_default):
            pki_dir_default = os.path.join(const.conf_dir, "pki")

        pki_dir = s_cfg.get("pki_dir", pki_dir_default)
        with util.umask(0o77):
            util.ensure_dir(pki_dir)
            pub_name = os.path.join(pki_dir, "minion.pub")
            pem_name = os.path.join(pki_dir, "minion.pem")
            util.write_file(pub_name, s_cfg["public_key"])
            util.write_file(pem_name, s_cfg["private_key"])

    # we need to have the salt minion service enabled in rc in order to be
    # able to start the service. this does only apply on FreeBSD servers.
    if cloud.distro.osfamily == "freebsd":
        rhel_util.update_sysconfig_file(
            "/etc/rc.conf", {"salt_minion_enable": "YES"}
        )

    # restart salt-minion. 'service' will start even if not started. if it
    # was started, it needs to be restarted for config change.
    subp.subp(["service", const.srv_name, "restart"], capture=False)
def handle(name, cfg, cloud, log, _args):
    # If there isn't a salt key in the configuration don't do anything
    if 'salt_minion' not in cfg:
        log.debug(("Skipping module named %s,"
                   " no 'salt_minion' key in configuration"), name)
        return

    s_cfg = cfg['salt_minion']
    const = SaltConstants(cfg=s_cfg)

    # Start by installing the salt package ...
    cloud.distro.install_packages(const.pkg_name)

    # Ensure we can configure files at the right dir
    util.ensure_dir(const.conf_dir)

    # ... and then update the salt configuration
    if 'conf' in s_cfg:
        # Add all sections from the conf object to minion config file
        minion_config = os.path.join(const.conf_dir, 'minion')
        minion_data = safeyaml.dumps(s_cfg.get('conf'))
        util.write_file(minion_config, minion_data)

    if 'grains' in s_cfg:
        # add grains to /etc/salt/grains
        grains_config = os.path.join(const.conf_dir, 'grains')
        grains_data = safeyaml.dumps(s_cfg.get('grains'))
        util.write_file(grains_config, grains_data)

    # ... copy the key pair if specified
    if 'public_key' in s_cfg and 'private_key' in s_cfg:
        pki_dir_default = os.path.join(const.conf_dir, "pki/minion")
        if not os.path.isdir(pki_dir_default):
            pki_dir_default = os.path.join(const.conf_dir, "pki")

        pki_dir = s_cfg.get('pki_dir', pki_dir_default)
        with util.umask(0o77):
            util.ensure_dir(pki_dir)
            pub_name = os.path.join(pki_dir, 'minion.pub')
            pem_name = os.path.join(pki_dir, 'minion.pem')
            util.write_file(pub_name, s_cfg['public_key'])
            util.write_file(pem_name, s_cfg['private_key'])

    # we need to have the salt minion service enabled in rc in order to be
    # able to start the service. this does only apply on FreeBSD servers.
    if cloud.distro.osfamily == 'freebsd':
        cloud.distro.updatercconf('salt_minion_enable', 'YES')

    # restart salt-minion. 'service' will start even if not started. if it
    # was started, it needs to be restarted for config change.
    util.subp(['service', const.srv_name, 'restart'], capture=False)
Beispiel #3
0
    def test_cloud_config_archive(self):
        non_decodable = b'\x11\xc9\xb4gTH\xee\x12'
        data = [{
            'content': '#cloud-config\npassword: gocubs\n'
        }, {
            'content': '#cloud-config\nlocale: chicago\n'
        }, {
            'content': non_decodable
        }]
        message = b'#cloud-config-archive\n' + safeyaml.dumps(data).encode()

        self.reRoot()
        ci = stages.Init()
        ci.datasource = FakeDataSource(message)

        fs = {}

        def fsstore(filename, content, mode=0o0644, omode="wb"):
            fs[filename] = content

        # consuming the user-data provided should write 'cloud_config' file
        # which will have our yaml in it.
        with mock.patch('cloudinit.util.write_file') as mockobj:
            mockobj.side_effect = fsstore
            ci.fetch()
            ci.consume_data()

        cfg = util.load_yaml(fs[ci.paths.get_ipath("cloud_config")])
        self.assertEqual(cfg.get('password'), 'gocubs')
        self.assertEqual(cfg.get('locale'), 'chicago')
Beispiel #4
0
    def setUp(self):
        super(TestSimpleRun, self).setUp()
        self.new_root = self.tmp_dir()
        self.replicateTestRoot("simple_ubuntu", self.new_root)

        # Seed cloud.cfg file for our tests
        self.cfg = {
            "datasource_list": ["None"],
            "runcmd": ["ls /etc"],  # test ALL_DISTROS
            "spacewalk": {},  # test non-ubuntu distros module definition
            "system_info": {
                "paths": {
                    "run_dir": self.new_root
                }
            },
            "write_files": [
                {
                    "path": "/etc/blah.ini",
                    "content": "blah",
                    "permissions": 0o755,
                },
            ],
            "cloud_init_modules": ["write-files", "spacewalk", "runcmd"],
        }
        cloud_cfg = safeyaml.dumps(self.cfg)
        util.ensure_dir(os.path.join(self.new_root, "etc", "cloud"))
        util.write_file(
            os.path.join(self.new_root, "etc", "cloud", "cloud.cfg"),
            cloud_cfg)
        self.patchOS(self.new_root)
        self.patchUtils(self.new_root)
Beispiel #5
0
    def test_none_ds_forces_run_via_unverified_modules(self):
        """run_section forced skipped modules by using unverified_modules."""

        # re-write cloud.cfg with unverified_modules override
        cfg = copy.deepcopy(self.cfg)
        cfg["unverified_modules"] = ["spacewalk"]  # Would have skipped
        cloud_cfg = safeyaml.dumps(cfg)
        util.ensure_dir(os.path.join(self.new_root, "etc", "cloud"))
        util.write_file(
            os.path.join(self.new_root, "etc", "cloud", "cloud.cfg"),
            cloud_cfg)

        initer = stages.Init()
        initer.read_cfg()
        initer.initialize()
        initer.fetch()
        initer.instancify()
        initer.update()
        initer.cloudify().run(
            "consume_data",
            initer.consume_data,
            args=[PER_INSTANCE],
            freq=PER_INSTANCE,
        )

        mods = stages.Modules(initer)
        (which_ran, failures) = mods.run_section("cloud_init_modules")
        self.assertTrue(len(failures) == 0)
        self.assertIn("spacewalk", which_ran)
        self.assertIn("running unverified_modules: 'spacewalk'",
                      self.logs.getvalue())
Beispiel #6
0
    def test_none_ds_run_with_no_config_modules(self):
        """run_section will report no modules run when none are configured."""

        # re-write cloud.cfg with unverified_modules override
        cfg = copy.deepcopy(self.cfg)
        # Represent empty configuration in /etc/cloud/cloud.cfg
        cfg["cloud_init_modules"] = None
        cloud_cfg = safeyaml.dumps(cfg)
        util.ensure_dir(os.path.join(self.new_root, "etc", "cloud"))
        util.write_file(
            os.path.join(self.new_root, "etc", "cloud", "cloud.cfg"),
            cloud_cfg)

        initer = stages.Init()
        initer.read_cfg()
        initer.initialize()
        initer.fetch()
        initer.instancify()
        initer.update()
        initer.cloudify().run(
            "consume_data",
            initer.consume_data,
            args=[PER_INSTANCE],
            freq=PER_INSTANCE,
        )

        mods = stages.Modules(initer)
        (which_ran, failures) = mods.run_section("cloud_init_modules")
        self.assertTrue(len(failures) == 0)
        self.assertEqual([], which_ran)
Beispiel #7
0
    def setUp(self):
        super(TestSimpleRun, self).setUp()
        self.new_root = self.tmp_dir()
        self.replicateTestRoot('simple_ubuntu', self.new_root)

        # Seed cloud.cfg file for our tests
        self.cfg = {
            'datasource_list': ['None'],
            'runcmd': ['ls /etc'],  # test ALL_DISTROS
            'spacewalk': {},  # test non-ubuntu distros module definition
            'system_info': {
                'paths': {
                    'run_dir': self.new_root
                }
            },
            'write_files': [
                {
                    'path': '/etc/blah.ini',
                    'content': 'blah',
                    'permissions': 0o755,
                },
            ],
            'cloud_init_modules': ['write-files', 'spacewalk', 'runcmd'],
        }
        cloud_cfg = safeyaml.dumps(self.cfg)
        util.ensure_dir(os.path.join(self.new_root, 'etc', 'cloud'))
        util.write_file(
            os.path.join(self.new_root, 'etc', 'cloud', 'cloud.cfg'),
            cloud_cfg)
        self.patchOS(self.new_root)
        self.patchUtils(self.new_root)
Beispiel #8
0
 def dump(self):
     state = {
         'version': self._version,
         'config': self._config,
         'network_state': self._network_state,
     }
     return safeyaml.dumps(state)
Beispiel #9
0
 def setUp(self):
     super(TestMain, self).setUp()
     self.new_root = self.tmp_dir()
     self.cloud_dir = self.tmp_path('var/lib/cloud/', dir=self.new_root)
     os.makedirs(self.cloud_dir)
     self.replicateTestRoot('simple_ubuntu', self.new_root)
     self.cfg = {
         'datasource_list': ['None'],
         'runcmd': ['ls /etc'],  # test ALL_DISTROS
         'system_info': {
             'paths': {
                 'cloud_dir': self.cloud_dir,
                 'run_dir': self.new_root
             }
         },
         'write_files': [
             {
                 'path': '/etc/blah.ini',
                 'content': 'blah',
                 'permissions': 0o755,
             },
         ],
         'cloud_init_modules': ['write-files', 'runcmd'],
     }
     cloud_cfg = safeyaml.dumps(self.cfg)
     ensure_dir(os.path.join(self.new_root, 'etc', 'cloud'))
     self.cloud_cfg_file = os.path.join(self.new_root, 'etc', 'cloud',
                                        'cloud.cfg')
     write_file(self.cloud_cfg_file, cloud_cfg)
     self.patchOS(self.new_root)
     self.patchUtils(self.new_root)
     self.stderr = StringIO()
     self.patchStdoutAndStderr(stderr=self.stderr)
Beispiel #10
0
 async def wait_for_cloudinit(self):
     if self.opts.dry_run:
         self.cloud_init_ok = True
         return
     ci_start = time.time()
     status_coro = arun_command(["cloud-init", "status", "--wait"])
     try:
         status_cp = await asyncio.wait_for(status_coro, 600)
     except asyncio.CancelledError:
         status_txt = '<timeout>'
         self.cloud_init_ok = False
     else:
         status_txt = status_cp.stdout
         self.cloud_init_ok = True
     log.debug("waited %ss for cloud-init", time.time() - ci_start)
     if "status: done" in status_txt:
         log.debug("loading cloud config")
         init = stages.Init()
         init.read_cfg()
         init.fetch(existing="trust")
         self.cloud = init.cloudify()
         autoinstall_path = '/autoinstall.yaml'
         if 'autoinstall' in self.cloud.cfg:
             if not os.path.exists(autoinstall_path):
                 atomic_helper.write_file(
                     autoinstall_path,
                     safeyaml.dumps(
                         self.cloud.cfg['autoinstall']).encode('utf-8'),
                     mode=0o600)
         if os.path.exists(autoinstall_path):
             self.opts.autoinstall = autoinstall_path
     else:
         log.debug(
             "cloud-init status: %r, assumed disabled",
             status_txt)
Beispiel #11
0
 def setUp(self):
     super(TestMain, self).setUp()
     self.new_root = self.tmp_dir()
     self.cloud_dir = self.tmp_path("var/lib/cloud/", dir=self.new_root)
     os.makedirs(self.cloud_dir)
     self.replicateTestRoot("simple_ubuntu", self.new_root)
     self.cfg = {
         "datasource_list": ["None"],
         "runcmd": ["ls /etc"],  # test ALL_DISTROS
         "system_info": {
             "paths": {
                 "cloud_dir": self.cloud_dir,
                 "run_dir": self.new_root,
             }
         },
         "write_files": [
             {
                 "path": "/etc/blah.ini",
                 "content": "blah",
                 "permissions": 0o755,
             },
         ],
         "cloud_init_modules": ["write-files", "runcmd"],
     }
     cloud_cfg = safeyaml.dumps(self.cfg)
     ensure_dir(os.path.join(self.new_root, "etc", "cloud"))
     self.cloud_cfg_file = os.path.join(self.new_root, "etc", "cloud",
                                        "cloud.cfg")
     write_file(self.cloud_cfg_file, cloud_cfg)
     self.patchOS(self.new_root)
     self.patchUtils(self.new_root)
     self.stderr = StringIO()
     self.patchStdoutAndStderr(stderr=self.stderr)
Beispiel #12
0
 def dump(self):
     state = {
         "version": self._version,
         "config": self._config,
         "network_state": self._network_state,
     }
     return safeyaml.dumps(state)
Beispiel #13
0
 def _render_section(name, section):
     if section:
         dump = safeyaml.dumps({name: section},
                               explicit_start=False,
                               explicit_end=False,
                               noalias=True)
         txt = util.indent(dump, ' ' * 4)
         return [txt]
     return []
Beispiel #14
0
    def test_main_init_run_net_calls_set_hostname_when_metadata_present(self):
        """When local-hostname metadata is present, call cc_set_hostname."""
        self.cfg['datasource'] = {
            'None': {'metadata': {'local-hostname': 'md-hostname'}}}
        cloud_cfg = safeyaml.dumps(self.cfg)
        write_file(self.cloud_cfg_file, cloud_cfg)
        cmdargs = myargs(
            debug=False, files=None, force=False, local=False, reporter=None,
            subcommand='init')

        def set_hostname(name, cfg, cloud, log, args):
            self.assertEqual('set-hostname', name)
            updated_cfg = copy.deepcopy(self.cfg)
            updated_cfg.update(
                {'def_log_file': '/var/log/cloud-init.log',
                 'log_cfgs': [],
                 'syslog_fix_perms': [
                     'syslog:adm', 'root:adm', 'root:wheel', 'root:root'
                 ],
                 'vendor_data': {'enabled': True, 'prefix': []},
                 'vendor_data2': {'enabled': True, 'prefix': []}})
            updated_cfg.pop('system_info')

            self.assertEqual(updated_cfg, cfg)
            self.assertEqual(main.LOG, log)
            self.assertIsNone(args)

        (_item1, item2) = wrap_and_call(
            'cloudinit.cmd.main',
            {'util.close_stdin': True,
             'netinfo.debug_info': 'my net debug info',
             'cc_set_hostname.handle': {'side_effect': set_hostname},
             'util.fixup_output': ('outfmt', 'errfmt')},
            main.main_init, 'init', cmdargs)
        self.assertEqual([], item2)
        # Instancify is called
        instance_id_path = 'var/lib/cloud/data/instance-id'
        self.assertEqual(
            'iid-datasource-none\n',
            os.path.join(load_file(
                os.path.join(self.new_root, instance_id_path))))
        # modules are run (including write_files)
        self.assertEqual(
            'blah', load_file(os.path.join(self.new_root, 'etc/blah.ini')))
        expected_logs = [
            'network config is disabled by fallback',  # apply_network_config
            'my net debug info',  # netinfo.debug_info
            'no previous run detected'
        ]
        for log in expected_logs:
            self.assertIn(log, self.stderr.getvalue())
Beispiel #15
0
    def test_none_ds(self):
        new_root = tempfile.mkdtemp()
        self.addCleanup(shutil.rmtree, new_root)
        self.replicateTestRoot("simple_ubuntu", new_root)
        cfg = {
            "datasource_list": ["None"],
            "cloud_init_modules": ["write-files"],
            "system_info": {
                "paths": {
                    "run_dir": new_root
                }
            },
        }
        ud = helpers.readResource("user_data.1.txt")
        cloud_cfg = safeyaml.dumps(cfg)
        util.ensure_dir(os.path.join(new_root, "etc", "cloud"))
        util.write_file(os.path.join(new_root, "etc", "cloud", "cloud.cfg"),
                        cloud_cfg)
        self._patchIn(new_root)

        # Now start verifying whats created
        initer = stages.Init()
        initer.read_cfg()
        initer.initialize()
        initer.fetch()
        initer.datasource.userdata_raw = ud
        initer.instancify()
        initer.update()
        initer.cloudify().run(
            "consume_data",
            initer.consume_data,
            args=[PER_INSTANCE],
            freq=PER_INSTANCE,
        )
        mirrors = initer.distro.get_option("package_mirrors")
        self.assertEqual(1, len(mirrors))
        mirror = mirrors[0]
        self.assertEqual(mirror["arches"], ["i386", "amd64", "blah"])
        mods = stages.Modules(initer)
        (which_ran, failures) = mods.run_section("cloud_init_modules")
        self.assertTrue(len(failures) == 0)
        self.assertTrue(os.path.exists("/etc/blah.ini"))
        self.assertIn("write-files", which_ran)
        contents = util.load_file("/etc/blah.ini")
        self.assertEqual(contents, "blah")
Beispiel #16
0
    def test_none_ds(self):
        new_root = tempfile.mkdtemp()
        self.addCleanup(shutil.rmtree, new_root)
        self.replicateTestRoot('simple_ubuntu', new_root)
        cfg = {
            'datasource_list': ['None'],
            'cloud_init_modules': ['write-files'],
            'system_info': {
                'paths': {
                    'run_dir': new_root
                }
            }
        }
        ud = helpers.readResource('user_data.1.txt')
        cloud_cfg = safeyaml.dumps(cfg)
        util.ensure_dir(os.path.join(new_root, 'etc', 'cloud'))
        util.write_file(os.path.join(new_root, 'etc', 'cloud', 'cloud.cfg'),
                        cloud_cfg)
        self._patchIn(new_root)

        # Now start verifying whats created
        initer = stages.Init()
        initer.read_cfg()
        initer.initialize()
        initer.fetch()
        initer.datasource.userdata_raw = ud
        initer.instancify()
        initer.update()
        initer.cloudify().run('consume_data',
                              initer.consume_data,
                              args=[PER_INSTANCE],
                              freq=PER_INSTANCE)
        mirrors = initer.distro.get_option('package_mirrors')
        self.assertEqual(1, len(mirrors))
        mirror = mirrors[0]
        self.assertEqual(mirror['arches'], ['i386', 'amd64', 'blah'])
        mods = stages.Modules(initer)
        (which_ran, failures) = mods.run_section('cloud_init_modules')
        self.assertTrue(len(failures) == 0)
        self.assertTrue(os.path.exists('/etc/blah.ini'))
        self.assertIn('write-files', which_ran)
        contents = util.load_file('/etc/blah.ini')
        self.assertEqual(contents, 'blah')
Beispiel #17
0
 def _write_cloud_config(self):
     if not self.cloud_fn:
         return
     # Capture which files we merged from...
     file_lines = []
     if self.file_names:
         file_lines.append("# from %s files" % (len(self.file_names)))
         for fn in self.file_names:
             if not fn:
                 fn = "?"
             file_lines.append("# %s" % (fn))
         file_lines.append("")
     if self.cloud_buf is not None:
         # Something was actually gathered....
         lines = [
             CLOUD_PREFIX,
             "",
         ]
         lines.extend(file_lines)
         lines.append(safeyaml.dumps(self.cloud_buf))
     else:
         lines = []
     util.write_file(self.cloud_fn, "\n".join(lines), 0o600)
Beispiel #18
0
def render_snap_op(op, name, path=None, cfgfile=None, config=None):
    if op not in ('install', 'config'):
        raise ValueError("cannot render op '%s'" % op)

    shortname = name.partition(NAMESPACE_DELIM)[0]
    try:
        cfg_tmpf = None
        if config is not None:
            # input to 'snappy config packagename' must have nested data. odd.
            # config:
            #   packagename:
            #      config
            # Note, however, we do not touch config files on disk.
            nested_cfg = {'config': {shortname: config}}
            (fd, cfg_tmpf) = temp_utils.mkstemp()
            os.write(fd, safeyaml.dumps(nested_cfg).encode())
            os.close(fd)
            cfgfile = cfg_tmpf

        cmd = [SNAPPY_CMD, op]
        if op == 'install':
            if path:
                cmd.append("--allow-unauthenticated")
                cmd.append(path)
            else:
                cmd.append(name)
            if cfgfile:
                cmd.append(cfgfile)
        elif op == 'config':
            cmd += [name, cfgfile]

        util.subp(cmd)

    finally:
        if cfg_tmpf:
            os.unlink(cfg_tmpf)
Beispiel #19
0
 def dump_network_state(self):
     return safeyaml.dumps(self._network_state)
Beispiel #20
0
def main():
    print('starting server')
    setup_environment()
    # setup_environment sets $APPORT_DATA_DIR which must be set before
    # apport is imported, which is done by this import:
    from subiquity.server.server import SubiquityServer
    parser = make_server_args_parser()
    opts = parser.parse_args(sys.argv[1:])
    logdir = LOGDIR
    if opts.dry_run:
        if opts.snaps_from_examples is None:
            opts.snaps_from_examples = True
        logdir = ".subiquity"
    if opts.socket is None:
        if opts.dry_run:
            opts.socket = '.subiquity/socket'
        else:
            opts.socket = '/run/subiquity/socket'
    os.makedirs(os.path.basename(opts.socket), exist_ok=True)

    block_log_dir = os.path.join(logdir, "block")
    os.makedirs(block_log_dir, exist_ok=True)
    handler = logging.FileHandler(os.path.join(block_log_dir, 'discover.log'))
    handler.setLevel('DEBUG')
    handler.setFormatter(
        logging.Formatter("%(asctime)s %(name)s:%(lineno)d %(message)s"))
    logging.getLogger('probert').addHandler(handler)
    handler.addFilter(lambda rec: rec.name != 'probert.network')
    logging.getLogger('curtin').addHandler(handler)
    logging.getLogger('block-discover').addHandler(handler)

    logfiles = setup_logger(dir=logdir, base='subiquity-server')

    logger = logging.getLogger('subiquity')
    version = os.environ.get("SNAP_REVISION", "unknown")
    logger.info("Starting Subiquity server revision {}".format(version))
    logger.info("Arguments passed: {}".format(sys.argv))

    cloud_init_ok = True
    if not opts.dry_run:
        ci_start = time.time()
        try:
            status_txt = run_command(["cloud-init", "status", "--wait"],
                                     timeout=600).stdout
        except subprocess.TimeoutExpired:
            status_txt = '<timeout>'
            cloud_init_ok = False
        logger.debug("waited %ss for cloud-init", time.time() - ci_start)
        if "status: done" in status_txt:
            logger.debug("loading cloud config")
            init = stages.Init()
            init.read_cfg()
            init.fetch(existing="trust")
            cloud = init.cloudify()
            autoinstall_path = '/autoinstall.yaml'
            if 'autoinstall' in cloud.cfg:
                if not os.path.exists(autoinstall_path):
                    atomic_helper.write_file(
                        autoinstall_path,
                        safeyaml.dumps(
                            cloud.cfg['autoinstall']).encode('utf-8'),
                        mode=0o600)
            if os.path.exists(autoinstall_path):
                opts.autoinstall = autoinstall_path
        else:
            logger.debug("cloud-init status: %r, assumed disabled", status_txt)

    server = SubiquityServer(opts, block_log_dir, cloud_init_ok)

    server.note_file_for_apport("InstallerServerLog", logfiles['debug'])
    server.note_file_for_apport("InstallerServerLogInfo", logfiles['info'])

    server.run()
Beispiel #21
0
def main():
    # Python 3.7+ does more or less this by default, but we need to
    # work with the Python 3.6 in bionic.
    try:
        locale.setlocale(locale.LC_ALL, "")
    except locale.Error:
        locale.setlocale(locale.LC_CTYPE, "C.UTF-8")

    # Prefer utils from $SNAP, over system-wide
    snap = os.environ.get('SNAP')
    if snap:
        os.environ['PATH'] = os.pathsep.join([
            os.path.join(snap, 'bin'),
            os.path.join(snap, 'usr', 'bin'),
            os.environ['PATH'],
        ])
        os.environ["APPORT_DATA_DIR"] = os.path.join(snap, 'share/apport')
    # This must come after setting $APPORT_DATA_DIR.
    from subiquity.core import Subiquity
    opts = parse_options(sys.argv[1:])
    global LOGDIR
    if opts.dry_run:
        LOGDIR = ".subiquity"
        if opts.snaps_from_examples is None:
            opts.snaps_from_examples = True
    logfiles = setup_logger(dir=LOGDIR)

    logger = logging.getLogger('subiquity')
    version = os.environ.get("SNAP_REVISION", "unknown")
    logger.info("Starting Subiquity revision {}".format(version))
    logger.info("Arguments passed: {}".format(sys.argv))

    if not opts.dry_run:
        ci_start = time.time()
        status_txt = run_command(["cloud-init", "status", "--wait"]).stdout
        logger.debug("waited %ss for cloud-init", time.time() - ci_start)
        if "status: done" in status_txt:
            logger.debug("loading cloud config")
            init = stages.Init()
            init.read_cfg()
            init.fetch(existing="trust")
            cloud = init.cloudify()
            autoinstall_path = '/autoinstall.yaml'
            if 'autoinstall' in cloud.cfg:
                if not os.path.exists(autoinstall_path):
                    atomic_helper.write_file(
                        autoinstall_path,
                        safeyaml.dumps(
                            cloud.cfg['autoinstall']).encode('utf-8'),
                        mode=0o600)
            if os.path.exists(autoinstall_path):
                opts.autoinstall = autoinstall_path
        else:
            logger.debug("cloud-init status: %r, assumed disabled", status_txt)

    block_log_dir = os.path.join(LOGDIR, "block")
    os.makedirs(block_log_dir, exist_ok=True)
    handler = logging.FileHandler(os.path.join(block_log_dir, 'discover.log'))
    handler.setLevel('DEBUG')
    handler.setFormatter(
        logging.Formatter("%(asctime)s %(name)s:%(lineno)d %(message)s"))
    logging.getLogger('probert').addHandler(handler)
    handler.addFilter(lambda rec: rec.name != 'probert.network')
    logging.getLogger('curtin').addHandler(handler)
    logging.getLogger('block-discover').addHandler(handler)

    if opts.ssh:
        from subiquity.ui.views.help import (ssh_help_texts,
                                             get_installer_password)
        from subiquitycore.ssh import get_ips_standalone
        texts = ssh_help_texts(get_ips_standalone(),
                               get_installer_password(opts.dry_run))
        for line in texts:
            if hasattr(line, 'text'):
                if line.text.startswith('installer@'):
                    print(' ' * 4 + line.text)
                else:
                    print(line.text)
            else:
                print(line)
        return 0

    if opts.answers is None and os.path.exists(AUTO_ANSWERS_FILE):
        logger.debug("Autoloading answers from %s", AUTO_ANSWERS_FILE)
        opts.answers = AUTO_ANSWERS_FILE

    if opts.answers:
        opts.answers = open(opts.answers)
        try:
            fcntl.flock(opts.answers, fcntl.LOCK_EX | fcntl.LOCK_NB)
        except OSError:
            logger.exception(
                'Failed to lock auto answers file, proceding without it.')
            opts.answers.close()
            opts.answers = None

    subiquity_interface = Subiquity(opts, block_log_dir)

    subiquity_interface.note_file_for_apport("InstallerLog", logfiles['debug'])
    subiquity_interface.note_file_for_apport("InstallerLogInfo",
                                             logfiles['info'])

    subiquity_interface.run()
Beispiel #22
0
    def _render_content(self, network_state):

        # if content already in netplan format, pass it back
        if network_state.version == 2:
            LOG.debug('V2 to V2 passthrough')
            return safeyaml.dumps({'network': network_state.config},
                                  explicit_start=False,
                                  explicit_end=False)

        ethernets = {}
        wifis = {}
        bridges = {}
        bonds = {}
        vlans = {}
        content = []

        interfaces = network_state._network_state.get('interfaces', [])

        nameservers = network_state.dns_nameservers
        searchdomains = network_state.dns_searchdomains

        for config in network_state.iter_interfaces():
            ifname = config.get('name')
            # filter None (but not False) entries up front
            ifcfg = dict((key, value) for (key, value) in config.items()
                         if value is not None)

            if_type = ifcfg.get('type')
            if if_type == 'physical':
                # required_keys = ['name', 'mac_address']
                eth = {
                    'set-name': ifname,
                    'match': ifcfg.get('match', None),
                }
                if eth['match'] is None:
                    macaddr = ifcfg.get('mac_address', None)
                    if macaddr is not None:
                        eth['match'] = {'macaddress': macaddr.lower()}
                    else:
                        del eth['match']
                        del eth['set-name']
                _extract_addresses(ifcfg, eth, ifname, self.features)
                ethernets.update({ifname: eth})

            elif if_type == 'bond':
                # required_keys = ['name', 'bond_interfaces']
                bond = {}
                bond_config = {}
                # extract bond params and drop the bond_ prefix as it's
                # redundent in v2 yaml format
                v2_bond_map = NET_CONFIG_TO_V2.get('bond')
                for match in ['bond_', 'bond-']:
                    bond_params = _get_params_dict_by_match(ifcfg, match)
                    for (param, value) in bond_params.items():
                        newname = v2_bond_map.get(param.replace('_', '-'))
                        if newname is None:
                            continue
                        bond_config.update({newname: value})

                if len(bond_config) > 0:
                    bond.update({'parameters': bond_config})
                if ifcfg.get('mac_address'):
                    bond['macaddress'] = ifcfg.get('mac_address').lower()
                slave_interfaces = ifcfg.get('bond-slaves')
                if slave_interfaces == 'none':
                    _extract_bond_slaves_by_name(interfaces, bond, ifname)
                _extract_addresses(ifcfg, bond, ifname, self.features)
                bonds.update({ifname: bond})

            elif if_type == 'bridge':
                # required_keys = ['name', 'bridge_ports']
                ports = sorted(copy.copy(ifcfg.get('bridge_ports')))
                bridge = {
                    'interfaces': ports,
                }
                # extract bridge params and drop the bridge prefix as it's
                # redundent in v2 yaml format
                match_prefix = 'bridge_'
                params = _get_params_dict_by_match(ifcfg, match_prefix)
                br_config = {}

                # v2 yaml uses different names for the keys
                # and at least one value format change
                v2_bridge_map = NET_CONFIG_TO_V2.get('bridge')
                for (param, value) in params.items():
                    newname = v2_bridge_map.get(param)
                    if newname is None:
                        continue
                    br_config.update({newname: value})
                    if newname in ['path-cost', 'port-priority']:
                        # <interface> <value> -> <interface>: int(<value>)
                        newvalue = {}
                        for val in value:
                            (port, portval) = val.split()
                            newvalue[port] = int(portval)
                        br_config.update({newname: newvalue})

                if len(br_config) > 0:
                    bridge.update({'parameters': br_config})
                if ifcfg.get('mac_address'):
                    bridge['macaddress'] = ifcfg.get('mac_address').lower()
                _extract_addresses(ifcfg, bridge, ifname, self.features)
                bridges.update({ifname: bridge})

            elif if_type == 'vlan':
                # required_keys = ['name', 'vlan_id', 'vlan-raw-device']
                vlan = {
                    'id': ifcfg.get('vlan_id'),
                    'link': ifcfg.get('vlan-raw-device')
                }
                macaddr = ifcfg.get('mac_address', None)
                if macaddr is not None:
                    vlan['macaddress'] = macaddr.lower()
                _extract_addresses(ifcfg, vlan, ifname, self.features)
                vlans.update({ifname: vlan})

        # inject global nameserver values under each all interface which
        # has addresses and do not already have a DNS configuration
        if nameservers or searchdomains:
            nscfg = {'addresses': nameservers, 'search': searchdomains}
            for section in [ethernets, wifis, bonds, bridges, vlans]:
                for _name, cfg in section.items():
                    if 'nameservers' in cfg or 'addresses' not in cfg:
                        continue
                    cfg.update({'nameservers': nscfg})

        # workaround yaml dictionary key sorting when dumping
        def _render_section(name, section):
            if section:
                dump = safeyaml.dumps({name: section},
                                      explicit_start=False,
                                      explicit_end=False,
                                      noalias=True)
                txt = util.indent(dump, ' ' * 4)
                return [txt]
            return []

        content.append("network:\n    version: 2\n")
        content += _render_section('ethernets', ethernets)
        content += _render_section('wifis', wifis)
        content += _render_section('bonds', bonds)
        content += _render_section('bridges', bridges)
        content += _render_section('vlans', vlans)

        return "".join(content)
Beispiel #23
0
def _dumps(obj):
    text = safeyaml.dumps(obj, explicit_start=False, explicit_end=False)
    return text.rstrip()
Beispiel #24
0
 def test_metadata_multiple_ssh_keys(self, m_fn):
     metadata = DataSourceVMware.load_json_or_yaml(VMW_METADATA_YAML)
     metadata["public_keys"] = VMW_MULTIPLE_KEYS
     metadata_yaml = safeyaml.dumps(metadata)
     m_fn.side_effect = [metadata_yaml, "", "", ""]
     self.assert_metadata(metadata, m_fn, m_fn_call_count=4)
Beispiel #25
0
    def test_main_init_run_net_calls_set_hostname_when_metadata_present(self):
        """When local-hostname metadata is present, call cc_set_hostname."""
        self.cfg["datasource"] = {
            "None": {
                "metadata": {
                    "local-hostname": "md-hostname"
                }
            }
        }
        cloud_cfg = safeyaml.dumps(self.cfg)
        write_file(self.cloud_cfg_file, cloud_cfg)
        cmdargs = myargs(
            debug=False,
            files=None,
            force=False,
            local=False,
            reporter=None,
            subcommand="init",
        )

        def set_hostname(name, cfg, cloud, log, args):
            self.assertEqual("set-hostname", name)
            updated_cfg = copy.deepcopy(self.cfg)
            updated_cfg.update({
                "def_log_file":
                "/var/log/cloud-init.log",
                "log_cfgs": [],
                "syslog_fix_perms": [
                    "syslog:adm",
                    "root:adm",
                    "root:wheel",
                    "root:root",
                ],
                "vendor_data": {
                    "enabled": True,
                    "prefix": []
                },
                "vendor_data2": {
                    "enabled": True,
                    "prefix": []
                },
            })
            updated_cfg.pop("system_info")

            self.assertEqual(updated_cfg, cfg)
            self.assertEqual(main.LOG, log)
            self.assertIsNone(args)

        (_item1, item2) = wrap_and_call(
            "cloudinit.cmd.main",
            {
                "util.close_stdin": True,
                "netinfo.debug_info": "my net debug info",
                "cc_set_hostname.handle": {
                    "side_effect": set_hostname
                },
                "util.fixup_output": ("outfmt", "errfmt"),
            },
            main.main_init,
            "init",
            cmdargs,
        )
        self.assertEqual([], item2)
        # Instancify is called
        instance_id_path = "var/lib/cloud/data/instance-id"
        self.assertEqual(
            "iid-datasource-none\n",
            os.path.join(
                load_file(os.path.join(self.new_root, instance_id_path))),
        )
        # modules are run (including write_files)
        self.assertEqual(
            "blah", load_file(os.path.join(self.new_root, "etc/blah.ini")))
        expected_logs = [
            "network config is disabled by fallback",  # apply_network_config
            "my net debug info",  # netinfo.debug_info
            "no previous run detected",
        ]
        for log in expected_logs:
            self.assertIn(log, self.stderr.getvalue())
Beispiel #26
0
    def _render_content(self, network_state: NetworkState):

        # if content already in netplan format, pass it back
        if network_state.version == 2:
            LOG.debug("V2 to V2 passthrough")
            return safeyaml.dumps(
                {"network": network_state.config},
                explicit_start=False,
                explicit_end=False,
            )

        ethernets = {}
        wifis = {}
        bridges = {}
        bonds = {}
        vlans = {}
        content = []

        interfaces = network_state._network_state.get("interfaces", [])

        nameservers = network_state.dns_nameservers
        searchdomains = network_state.dns_searchdomains

        for config in network_state.iter_interfaces():
            ifname = config.get("name")
            # filter None (but not False) entries up front
            ifcfg = dict((key, value) for (key, value) in config.items()
                         if value is not None)

            if_type = ifcfg.get("type")
            if if_type == "physical":
                # required_keys = ['name', 'mac_address']
                eth = {
                    "set-name": ifname,
                    "match": ifcfg.get("match", None),
                }
                if eth["match"] is None:
                    macaddr = ifcfg.get("mac_address", None)
                    if macaddr is not None:
                        eth["match"] = {"macaddress": macaddr.lower()}
                    else:
                        del eth["match"]
                        del eth["set-name"]
                _extract_addresses(ifcfg, eth, ifname, self.features)
                ethernets.update({ifname: eth})

            elif if_type == "bond":
                # required_keys = ['name', 'bond_interfaces']
                bond = {}
                bond_config = {}
                # extract bond params and drop the bond_ prefix as it's
                # redundent in v2 yaml format
                v2_bond_map = NET_CONFIG_TO_V2.get("bond")
                for match in ["bond_", "bond-"]:
                    bond_params = _get_params_dict_by_match(ifcfg, match)
                    for (param, value) in bond_params.items():
                        newname = v2_bond_map.get(param.replace("_", "-"))
                        if newname is None:
                            continue
                        bond_config.update({newname: value})

                if len(bond_config) > 0:
                    bond.update({"parameters": bond_config})
                if ifcfg.get("mac_address"):
                    bond["macaddress"] = ifcfg.get("mac_address").lower()
                slave_interfaces = ifcfg.get("bond-slaves")
                if slave_interfaces == "none":
                    _extract_bond_slaves_by_name(interfaces, bond, ifname)
                _extract_addresses(ifcfg, bond, ifname, self.features)
                bonds.update({ifname: bond})

            elif if_type == "bridge":
                # required_keys = ['name', 'bridge_ports']
                ports = sorted(copy.copy(ifcfg.get("bridge_ports")))
                bridge = {
                    "interfaces": ports,
                }
                # extract bridge params and drop the bridge prefix as it's
                # redundent in v2 yaml format
                match_prefix = "bridge_"
                params = _get_params_dict_by_match(ifcfg, match_prefix)
                br_config = {}

                # v2 yaml uses different names for the keys
                # and at least one value format change
                v2_bridge_map = NET_CONFIG_TO_V2.get("bridge")
                for (param, value) in params.items():
                    newname = v2_bridge_map.get(param)
                    if newname is None:
                        continue
                    br_config.update({newname: value})
                    if newname in ["path-cost", "port-priority"]:
                        # <interface> <value> -> <interface>: int(<value>)
                        newvalue = {}
                        for val in value:
                            (port, portval) = val.split()
                            newvalue[port] = int(portval)
                        br_config.update({newname: newvalue})

                if len(br_config) > 0:
                    bridge.update({"parameters": br_config})
                if ifcfg.get("mac_address"):
                    bridge["macaddress"] = ifcfg.get("mac_address").lower()
                _extract_addresses(ifcfg, bridge, ifname, self.features)
                bridges.update({ifname: bridge})

            elif if_type == "vlan":
                # required_keys = ['name', 'vlan_id', 'vlan-raw-device']
                vlan = {
                    "id": ifcfg.get("vlan_id"),
                    "link": ifcfg.get("vlan-raw-device"),
                }
                macaddr = ifcfg.get("mac_address", None)
                if macaddr is not None:
                    vlan["macaddress"] = macaddr.lower()
                _extract_addresses(ifcfg, vlan, ifname, self.features)
                vlans.update({ifname: vlan})

        # inject global nameserver values under each all interface which
        # has addresses and do not already have a DNS configuration
        if nameservers or searchdomains:
            nscfg = {"addresses": nameservers, "search": searchdomains}
            for section in [ethernets, wifis, bonds, bridges, vlans]:
                for _name, cfg in section.items():
                    if "nameservers" in cfg or "addresses" not in cfg:
                        continue
                    cfg.update({"nameservers": nscfg})

        # workaround yaml dictionary key sorting when dumping
        def _render_section(name, section):
            if section:
                dump = safeyaml.dumps(
                    {name: section},
                    explicit_start=False,
                    explicit_end=False,
                    noalias=True,
                )
                txt = util.indent(dump, " " * 4)
                return [txt]
            return []

        content.append("network:\n    version: 2\n")
        content += _render_section("ethernets", ethernets)
        content += _render_section("wifis", wifis)
        content += _render_section("bonds", bonds)
        content += _render_section("bridges", bridges)
        content += _render_section("vlans", vlans)

        return "".join(content)
Beispiel #27
0
 def test_dumps_returns_string(self):
     self.assertTrue(
         isinstance(yaml.dumps(867 - 5309), (str,)))
Beispiel #28
0
 def test_dumps_is_loadable(self):
     mydata = {'a': 'hey', 'b': ['bee', 'Bea']}
     self.assertEqual(yaml.loads(yaml.dumps(mydata)), mydata)
Beispiel #29
0
def handle_args(name, args):
    if not args.directory.endswith("/"):
        args.directory += "/"

    if not os.path.isdir(args.directory):
        os.makedirs(args.directory)

    if args.debug:
        log.setupBasicLogging(level=log.DEBUG)
    else:
        log.setupBasicLogging(level=log.WARN)
    if args.mac:
        known_macs = {}
        for item in args.mac:
            iface_name, iface_mac = item.split(",", 1)
            known_macs[iface_mac] = iface_name
    else:
        known_macs = None

    net_data = args.network_data.read()
    if args.kind == "eni":
        pre_ns = eni.convert_eni_data(net_data)
    elif args.kind == "yaml":
        pre_ns = safeyaml.load(net_data)
        if 'network' in pre_ns:
            pre_ns = pre_ns.get('network')
        if args.debug:
            sys.stderr.write('\n'.join(
                ["Input YAML", safeyaml.dumps(pre_ns), ""]))
    elif args.kind == 'network_data.json':
        pre_ns = openstack.convert_net_json(
            json.loads(net_data), known_macs=known_macs)
    elif args.kind == 'azure-imds':
        pre_ns = azure.parse_network_config(json.loads(net_data))
    elif args.kind == 'vmware-imc':
        config = ovf.Config(ovf.ConfigFile(args.network_data.name))
        pre_ns = ovf.get_network_config_from_conf(config, False)

    ns = network_state.parse_net_config_data(pre_ns)

    if args.debug:
        sys.stderr.write('\n'.join(
            ["", "Internal State", safeyaml.dumps(ns), ""]))
    distro_cls = distros.fetch(args.distro)
    distro = distro_cls(args.distro, {}, None)
    config = {}
    if args.output_kind == "eni":
        r_cls = eni.Renderer
        config = distro.renderer_configs.get('eni')
    elif args.output_kind == "netplan":
        r_cls = netplan.Renderer
        config = distro.renderer_configs.get('netplan')
        # don't run netplan generate/apply
        config['postcmds'] = False
        # trim leading slash
        config['netplan_path'] = config['netplan_path'][1:]
        # enable some netplan features
        config['features'] = ['dhcp-use-domains', 'ipv6-mtu']
    elif args.output_kind == "networkd":
        r_cls = networkd.Renderer
        config = distro.renderer_configs.get('networkd')
    elif args.output_kind == "sysconfig":
        r_cls = sysconfig.Renderer
        config = distro.renderer_configs.get('sysconfig')
    else:
        raise RuntimeError("Invalid output_kind")

    r = r_cls(config=config)
    sys.stderr.write(''.join([
        "Read input format '%s' from '%s'.\n" % (
            args.kind, args.network_data.name),
        "Wrote output format '%s' to '%s'\n" % (
            args.output_kind, args.directory)]) + "\n")
    r.render_network_state(network_state=ns, target=args.directory)
Beispiel #30
0
def main():
    setup_environment()
    # setup_environment sets $APPORT_DATA_DIR which must be set before
    # apport is imported, which is done by this import:
    from subiquity.core import Subiquity
    parser = make_client_args_parser()
    args = sys.argv[1:]
    if '--dry-run' in args:
        opts, unknown = parser.parse_known_args(args)
        if opts.socket is None:
            os.makedirs('.subiquity', exist_ok=True)
            sock_path = '.subiquity/socket'
            opts.socket = sock_path
            server_args = ['--dry-run', '--socket=' + sock_path] + unknown
            server_parser = make_server_args_parser()
            server_parser.parse_args(server_args)  # just to check
            server_output = open('.subiquity/server-output', 'w')
            server_cmd = [sys.executable, '-m', 'subiquity.cmd.server'] + \
                server_args
            server_proc = subprocess.Popen(server_cmd,
                                           stdout=server_output,
                                           stderr=subprocess.STDOUT)
            opts.server_pid = str(server_proc.pid)
            print("running server pid {}".format(server_proc.pid))
        elif opts.server_pid is not None:
            print("reconnecting to server pid {}".format(opts.server_pid))
        else:
            opts = parser.parse_args(args)
    else:
        opts = parser.parse_args(args)
        if opts.socket is None:
            opts.socket = '/run/subiquity/socket'
    os.makedirs(os.path.basename(opts.socket), exist_ok=True)
    logdir = LOGDIR
    if opts.dry_run:
        if opts.snaps_from_examples is None:
            opts.snaps_from_examples = True
        logdir = ".subiquity"
    logfiles = setup_logger(dir=logdir, base='subiquity')

    logger = logging.getLogger('subiquity')
    version = os.environ.get("SNAP_REVISION", "unknown")
    logger.info("Starting Subiquity revision {}".format(version))
    logger.info("Arguments passed: {}".format(sys.argv))

    if not opts.dry_run:
        ci_start = time.time()
        status_txt = run_command(["cloud-init", "status", "--wait"]).stdout
        logger.debug("waited %ss for cloud-init", time.time() - ci_start)
        if "status: done" in status_txt:
            logger.debug("loading cloud config")
            init = stages.Init()
            init.read_cfg()
            init.fetch(existing="trust")
            cloud = init.cloudify()
            autoinstall_path = '/autoinstall.yaml'
            if 'autoinstall' in cloud.cfg:
                if not os.path.exists(autoinstall_path):
                    atomic_helper.write_file(
                        autoinstall_path,
                        safeyaml.dumps(
                            cloud.cfg['autoinstall']).encode('utf-8'),
                        mode=0o600)
            if os.path.exists(autoinstall_path):
                opts.autoinstall = autoinstall_path
        else:
            logger.debug("cloud-init status: %r, assumed disabled", status_txt)

    block_log_dir = os.path.join(logdir, "block")
    os.makedirs(block_log_dir, exist_ok=True)
    handler = logging.FileHandler(os.path.join(block_log_dir, 'discover.log'))
    handler.setLevel('DEBUG')
    handler.setFormatter(
        logging.Formatter("%(asctime)s %(name)s:%(lineno)d %(message)s"))
    logging.getLogger('probert').addHandler(handler)
    handler.addFilter(lambda rec: rec.name != 'probert.network')
    logging.getLogger('curtin').addHandler(handler)
    logging.getLogger('block-discover').addHandler(handler)

    if opts.ssh:
        from subiquity.ui.views.help import (ssh_help_texts,
                                             get_installer_password)
        from subiquitycore.ssh import get_ips_standalone
        texts = ssh_help_texts(get_ips_standalone(),
                               get_installer_password(opts.dry_run))
        for line in texts:
            if hasattr(line, 'text'):
                if line.text.startswith('installer@'):
                    print(' ' * 4 + line.text)
                else:
                    print(line.text)
            else:
                print(line)
        return 0

    if opts.answers is None and os.path.exists(AUTO_ANSWERS_FILE):
        logger.debug("Autoloading answers from %s", AUTO_ANSWERS_FILE)
        opts.answers = AUTO_ANSWERS_FILE

    if opts.answers:
        opts.answers = open(opts.answers)
        try:
            fcntl.flock(opts.answers, fcntl.LOCK_EX | fcntl.LOCK_NB)
        except OSError:
            logger.exception(
                'Failed to lock auto answers file, proceding without it.')
            opts.answers.close()
            opts.answers = None

    subiquity_interface = Subiquity(opts, block_log_dir)

    subiquity_interface.note_file_for_apport("InstallerLog", logfiles['debug'])
    subiquity_interface.note_file_for_apport("InstallerLogInfo",
                                             logfiles['info'])

    subiquity_interface.run()