def test_zpool_create_pool_iterable(self): """ zpool_create accepts vdev iterables besides list """ zpool_props = {'version': 250, 'ashift': 9} zfs.zpool_create('mypool', ('/dev/virtio-disk1', '/dev/virtio-disk2'), pool_properties=zpool_props) merge_config(zfs.ZPOOL_DEFAULT_PROPERTIES.copy(), zpool_props) params = ["%s=%s" % (k, v) for k, v in zpool_props.items()] args, _ = self.mock_subp.call_args self.assertTrue(set(params).issubset(set(args[0]))) args, _ = self.mock_subp.call_args self.assertIn("/dev/virtio-disk1", args[0]) self.assertIn("/dev/virtio-disk2", args[0])
def zpool_create(poolname, vdevs, mountpoint=None, altroot=None, pool_properties=None, zfs_properties=None): """ Create a zpool called <poolname> comprised of devices specified in <vdevs>. :param poolname: String used to name the pool. :param vdevs: An iterable of strings of block devices paths which *should* start with '/dev/disk/by-id/' to follow best practices. :param pool_properties: A dictionary of key, value pairs to be passed to `zpool create` with the `-o` flag as properties of the zpool. If value is None, then ZPOOL_DEFAULT_PROPERTIES will be used. :param zfs_properties: A dictionary of key, value pairs to be passed to `zpool create` with the `-O` flag as properties of the filesystems created under the pool. If the value is None, then ZFS_DEFAULT_PROPERTIES will be used. :returns: None on success. :raises: ValueError: raises exceptions on missing/badd input :raises: ProcessExecutionError: raised on unhandled exceptions from invoking `zpool create`. """ if not isinstance(poolname, util.string_types) or not poolname: raise ValueError("Invalid poolname: %s", poolname) if isinstance(vdevs, util.string_types) or isinstance(vdevs, dict): raise TypeError("Invalid vdevs: expected list-like iterable") else: try: vdevs = list(vdevs) except TypeError: raise TypeError("vdevs must be iterable, not: %s" % str(vdevs)) pool_cfg = ZPOOL_DEFAULT_PROPERTIES.copy() if pool_properties: merge_config(pool_cfg, pool_properties) zfs_cfg = ZFS_DEFAULT_PROPERTIES.copy() if zfs_properties: merge_config(zfs_cfg, zfs_properties) options = _join_flags('-o', pool_cfg) options.extend(_join_flags('-O', zfs_cfg)) if mountpoint: options.extend(_join_flags('-O', {'mountpoint': mountpoint})) if altroot: options.extend(['-R', altroot]) cmd = ["zpool", "create"] + options + [poolname] + vdevs util.subp(cmd, capture=True)
def _cloud_init_config(self): config = { 'growpart': { 'mode': 'off', }, 'resize_rootfs': False, } if self.identity.hostname is not None: config['preserve_hostname'] = True user = self.identity.user if user: users_and_groups_path = (os.path.join(os.environ.get("SNAP", "."), "users-and-groups")) if os.path.exists(users_and_groups_path): groups = open(users_and_groups_path).read().split() else: groups = ['admin'] groups.append('sudo') groups = [ group for group in groups if group in self.get_target_groups() ] user_info = { 'name': user.username, 'gecos': user.realname, 'passwd': user.password, 'shell': '/bin/bash', 'groups': groups, 'lock_passwd': False, } if self.ssh.authorized_keys: user_info['ssh_authorized_keys'] = self.ssh.authorized_keys config['users'] = [user_info] else: if self.ssh.authorized_keys: config['ssh_authorized_keys'] = self.ssh.authorized_keys if self.ssh.install_server: config['ssh_pwauth'] = self.ssh.pwauth for model_name in POSTINSTALL_MODEL_NAMES: model = getattr(self, model_name) if getattr(model, 'make_cloudconfig', None): merge_config(config, model.make_cloudconfig()) userdata = copy.deepcopy(self.userdata) merge_config(userdata, config) return userdata
def system_upgrade(cfg, target): """run system-upgrade (apt-get dist-upgrade) or other in target. config: system_upgrade: enabled: False """ mycfg = {'system_upgrade': {'enabled': False}} config.merge_config(mycfg, cfg) mycfg = mycfg.get('system_upgrade') if not isinstance(mycfg, dict): LOG.debug("system_upgrade disabled by config. entry not a dict.") return if not config.value_as_boolean(mycfg.get('enabled', True)): LOG.debug("system_upgrade disabled by config.") return util.system_upgrade(target=target)
def test_zpool_create_use_passed_properties(self): """ zpool_create uses provided properties """ zpool_props = {'prop1': 'val1'} zfs_props = {'fsprop1': 'val2'} zfs.zpool_create('mypool', ['/dev/disk/by-id/virtio-abcfoo1'], pool_properties=zpool_props, zfs_properties=zfs_props) all_props = {} merge_config(all_props, zfs.ZPOOL_DEFAULT_PROPERTIES.copy()) merge_config(all_props, zfs.ZFS_DEFAULT_PROPERTIES.copy()) merge_config(all_props, zpool_props.copy()) params = ["%s=%s" % (k, v) for k, v in all_props.items()] args, _ = self.mock_subp.call_args self.assertTrue(set(params).issubset(set(args[0])))
def render(self, syslog_identifier): config = { 'apt': { 'preserve_sources_list': False, }, 'sources': { 'ubuntu00': 'cp:///media/filesystem' }, 'curthooks_commands': { '000-configure-run': [ '/snap/bin/subiquity.subiquity-configure-run', ], '001-configure-apt': [ '/snap/bin/subiquity.subiquity-configure-apt', sys.executable, str(self.network.has_network).lower(), ], }, 'grub': { 'terminal': 'unmodified', 'probe_additional_os': True }, 'install': { 'target': self.target, 'unmount': 'disabled', 'save_install_config': '/var/log/installer/curtin-install-cfg.yaml', 'save_install_log': '/var/log/installer/curtin-install.log', }, 'verbosity': 3, 'pollinate': { 'user_agent': { 'subiquity': "%s_%s" % (os.environ.get("SNAP_VERSION", 'dry-run'), os.environ.get("SNAP_REVISION", 'dry-run')), }, }, 'reporting': { 'subiquity': { 'type': 'journald', 'identifier': syslog_identifier, }, }, 'write_files': { 'etc_machine_id': { 'path': 'etc/machine-id', 'content': self._machine_id(), 'permissions': 0o444, }, 'media_info': { 'path': 'var/log/installer/media-info', 'content': self._media_info(), 'permissions': 0o644, }, }, } for model in self._install_models: log.debug("merging config from %s", model) merge_config(config, model.render()) mp_file = os.path.join(self.root, "run/kernel-meta-package") if os.path.exists(mp_file): with open(mp_file) as fp: kernel_package = fp.read().strip() config['kernel'] = { 'package': kernel_package, } return config
def render(self, syslog_identifier): # Until https://bugs.launchpad.net/curtin/+bug/1876984 gets # fixed, the only way to get curtin to leave the network # config entirely alone is to omit the 'network' stage. stages = [ stage for stage in CONFIG_BUILTIN['stages'] if stage != 'network' ] config = { 'stages': stages, 'sources': { 'ubuntu00': 'cp:///media/filesystem' }, 'curthooks_commands': { '000-configure-run': [ '/snap/bin/subiquity.subiquity-configure-run', ], '001-configure-apt': [ '/snap/bin/subiquity.subiquity-configure-apt', sys.executable, str(self.network.has_network).lower(), ], }, 'grub': { 'terminal': 'unmodified', 'probe_additional_os': True }, 'install': { 'target': self.target, 'unmount': 'disabled', 'save_install_config': '/var/log/installer/curtin-install-cfg.yaml', 'save_install_log': '/var/log/installer/curtin-install.log', }, 'verbosity': 3, 'pollinate': { 'user_agent': { 'subiquity': "%s_%s" % (os.environ.get("SNAP_VERSION", 'dry-run'), os.environ.get("SNAP_REVISION", 'dry-run')), }, }, 'reporting': { 'subiquity': { 'type': 'journald', 'identifier': syslog_identifier, }, }, 'write_files': { 'etc_machine_id': { 'path': 'etc/machine-id', 'content': self._machine_id(), 'permissions': 0o444, }, 'media_info': { 'path': 'var/log/installer/media-info', 'content': self._media_info(), 'permissions': 0o644, }, }, } if os.path.exists('/run/casper-md5check.json'): with open('/run/casper-md5check.json') as fp: config['write_files']['md5check'] = { 'path': 'var/log/installer/casper-md5check.json', 'content': fp.read(), 'permissions': 0o644, } for model_name in INSTALL_MODEL_NAMES: model = getattr(self, model_name) log.debug("merging config from %s", model) merge_config(config, model.render()) mp_file = os.path.join(self.root, "run/kernel-meta-package") if os.path.exists(mp_file): with open(mp_file) as fp: kernel_package = fp.read().strip() config['kernel'] = { 'package': kernel_package, } return config
def cmd_install(args): from .collect_logs import create_log_tarfile cfg = deepcopy(CONFIG_BUILTIN) config.merge_config(cfg, args.config) for source in args.source: src = util.sanitize_source(source) cfg['sources']["%02d_cmdline" % len(cfg['sources'])] = src LOG.info(INSTALL_START_MSG) LOG.debug('LANG=%s', os.environ.get('LANG')) LOG.debug("merged config: %s" % cfg) if not len(cfg.get('sources', [])): raise util.BadUsage("no sources provided to install") for i in cfg['sources']: # we default to tgz for old style sources config cfg['sources'][i] = util.sanitize_source(cfg['sources'][i]) migrate_proxy_settings(cfg) for k in ('http_proxy', 'https_proxy', 'no_proxy'): if k in cfg['proxy']: os.environ[k] = cfg['proxy'][k] instcfg = cfg.get('install', {}) logfile = instcfg.get('log_file') error_tarfile = instcfg.get('error_tarfile') post_files = instcfg.get('post_files', [logfile]) # Generate curtin configuration dump and add to write_files unless # installation config disables dump yaml_dump_file = instcfg.get('save_install_config', SAVE_INSTALL_CONFIG) if yaml_dump_file: write_files = cfg.get('write_files', {}) write_files['curtin_install_cfg'] = { 'path': yaml_dump_file, 'permissions': '0400', 'owner': 'root:root', 'content': config.dump_config(cfg) } cfg['write_files'] = write_files # Load reporter clear_install_log(logfile) legacy_reporter = load_reporter(cfg) legacy_reporter.files = post_files writeline_and_stdout(logfile, INSTALL_START_MSG) args.reportstack.post_files = post_files workingd = None try: workingd = WorkingDir(cfg) dd_images = util.get_dd_images(cfg.get('sources', {})) if len(dd_images) > 1: raise ValueError("You may not use more than one disk image") LOG.debug(workingd.env()) env = os.environ.copy() env.update(workingd.env()) for name in cfg.get('stages'): desc = STAGE_DESCRIPTIONS.get(name, "stage %s" % name) reportstack = events.ReportEventStack( "stage-%s" % name, description=desc, parent=args.reportstack) env['CURTIN_REPORTSTACK'] = reportstack.fullname with reportstack: commands_name = '%s_commands' % name with util.LogTimer(LOG.debug, 'stage_%s' % name): stage = Stage(name, cfg.get(commands_name, {}), env, reportstack=reportstack, logfile=logfile) stage.run() if apply_kexec(cfg.get('kexec'), workingd.target): cfg['power_state'] = {'mode': 'reboot', 'delay': 'now', 'message': "'rebooting with kexec'"} writeline_and_stdout(logfile, INSTALL_PASS_MSG) legacy_reporter.report_success() except Exception as e: exp_msg = INSTALL_FAIL_MSG.format(exception=e) writeline(logfile, exp_msg) LOG.error(exp_msg) legacy_reporter.report_failure(exp_msg) if error_tarfile: create_log_tarfile(error_tarfile, cfg) raise e finally: log_target_path = instcfg.get('save_install_log', SAVE_INSTALL_LOG) if log_target_path and workingd: copy_install_log(logfile, workingd.target, log_target_path) if instcfg.get('unmount', "") == "disabled": LOG.info('Skipping unmount: config disabled target unmounting') elif workingd: # unmount everything (including iscsi disks) util.do_umount(workingd.target, recursive=True) # The open-iscsi service in the ephemeral environment handles # disconnecting active sessions. On Artful release the systemd # unit file has conditionals that are not met at boot time and # results in open-iscsi service not being started; This breaks # shutdown on Artful releases. # Additionally, in release < Artful, if the storage configuration # is layered, like RAID over iscsi volumes, then disconnecting # iscsi sessions before stopping the raid device hangs. # As it turns out, letting the open-iscsi service take down the # session last is the cleanest way to handle all releases # regardless of what may be layered on top of the iscsi disks. # # Check if storage configuration has iscsi volumes and if so ensure # iscsi service is active before exiting install if iscsi.get_iscsi_disks_from_config(cfg): iscsi.restart_iscsi_service() shutil.rmtree(workingd.top) apply_power_state(cfg.get('power_state')) sys.exit(0)
def install_kernel(cfg, target): kernel_cfg = cfg.get('kernel', { 'package': None, 'fallback-package': "linux-generic", 'mapping': {} }) if kernel_cfg is not None: kernel_package = kernel_cfg.get('package') kernel_fallback = kernel_cfg.get('fallback-package') else: kernel_package = None kernel_fallback = None mapping = copy.deepcopy(KERNEL_MAPPING) config.merge_config(mapping, kernel_cfg.get('mapping', {})) # Machines using flash-kernel may need additional dependencies installed # before running. Run those checks in the ephemeral environment so the # target only has required packages installed. See LP:1640519 fk_packages = get_flash_kernel_pkgs() if fk_packages: util.install_packages(fk_packages.split(), target=target) if kernel_package: util.install_packages([kernel_package], target=target) return # uname[2] is kernel name (ie: 3.16.0-7-generic) # version gets X.Y.Z, flavor gets anything after second '-'. kernel = os.uname()[2] codename, _ = util.subp(['lsb_release', '--codename', '--short'], capture=True, target=target) codename = codename.strip() version, abi, flavor = kernel.split('-', 2) try: map_suffix = mapping[codename][version] except KeyError: LOG.warn("Couldn't detect kernel package to install for %s." % kernel) if kernel_fallback is not None: util.install_packages([kernel_fallback], target=target) return package = "linux-{flavor}{map_suffix}".format(flavor=flavor, map_suffix=map_suffix) if util.has_pkg_available(package, target): if util.has_pkg_installed(package, target): LOG.debug("Kernel package '%s' already installed", package) else: LOG.debug("installing kernel package '%s'", package) util.install_packages([package], target=target) else: if kernel_fallback is not None: LOG.info( "Kernel package '%s' not available. " "Installing fallback package '%s'.", package, kernel_fallback) util.install_packages([kernel_fallback], target=target) else: LOG.warn( "Kernel package '%s' not available and no fallback." " System may not boot.", package)
def load_autoinstall_data(self, data): if data is None: return geoip = data.pop('geoip', True) merge_config(self.model.config, data) self.geoip_enabled = geoip and self.model.is_default()