def dmsetup_info(devname): ''' returns dict of info about device mapper dev. {'blkdevname': 'dm-0', 'blkdevs_used': 'sda5', 'name': 'sda5_crypt', 'subsystem': 'CRYPT', 'uuid': 'CRYPT-LUKS1-2b370697149743b0b2407d11f88311f1-sda5_crypt' } ''' _SEP = '=' fields = ('name,uuid,blkdevname,blkdevs_used,subsystem'.split(',')) try: (out, _err) = util.subp([ 'dmsetup', 'info', devname, '-C', '-o', ','.join(fields), '--noheading', '--separator', _SEP ], capture=True) except util.ProcessExecutionError as e: LOG.error('Failed to run dmsetup info: %s', e) return {} values = out.strip().split(_SEP) info = dict(zip(fields, values)) return info
def validate_config(config, sourcefile=None): """Validate storage config object.""" if not sourcefile: sourcefile = '' try: import jsonschema jsonschema.validate(config, STORAGE_CONFIG_SCHEMA) except ImportError: LOG.error('Cannot validate storage config, missing jsonschema') raise except jsonschema.exceptions.ValidationError as e: if isinstance(e.instance, int): msg = 'Unexpected value (%s) for property "%s"' % (e.path[0], e.instance) raise ValueError(msg) if 'type' not in e.instance: msg = "%s in %s" % (e.message, e.instance) raise ValueError(msg) instance_type = e.instance['type'] stype = get_storage_types().get(instance_type) if stype: try: jsonschema.validate(e.instance, stype.schema) except jsonschema.exceptions.ValidationError as f: msg = "%s in %s\n%s" % (f.message, sourcefile, util.json_dumps(e.instance)) raise (ValueError(msg)) else: msg = "Unknown storage type: %s in %s" % (instance_type, e.instance) raise ValueError(msg)
def add_partition(self, partnumber, partsize): """ Add a partition to this DasdDevice specifying partnumber and size. :param partnumber: integer value of partition number (1, 2 or 3) :param partsize: partition sizes in bytes. :raises: ValueError on invalid devname Example fdasd command with defaults: fdasd --verbose --config=/tmp/curtin/dasd-part1.fdasd /dev/dasdb """ LOG.debug( "add_partition: partnumber: %s partsize: %s", partnumber, partsize) partitions = self._ptable_for_new_partition(partnumber, partsize) LOG.debug("fdasd: partitions to be created: %s", partitions) content = "\n".join([ "[%s,%s]" % (part[0], part[1]) for part in partitions ]) LOG.debug("fdasd: content=\n%s", content) wfp = tempfile.NamedTemporaryFile(suffix=".fdasd", delete=False) wfp.close() util.write_file(wfp.name, content) cmd = ['fdasd', '--verbose', '--config=%s' % wfp.name, self.devname] LOG.debug('Partitioning %s with %s', self.devname, cmd) try: out, err = util.subp(cmd, capture=True) except util.ProcessExecutionError as e: LOG.error("Partitioning failed: %s", e) raise finally: if os.path.exists(wfp.name): os.unlink(wfp.name)
def force_devmapper_symlinks(): """Check if /dev/mapper/mpath* files are symlinks, if not trigger udev.""" LOG.debug('Verifying /dev/mapper/mpath* files are symlinks') needs_trigger = [] for mp_id, dm_dev in dmname_to_blkdev_mapping().items(): if mp_id.startswith('mpath'): mapper_path = '/dev/mapper/' + mp_id if not os.path.islink(mapper_path): LOG.warning( 'Found invalid device mapper mp path: %s, removing', mapper_path) util.del_file(mapper_path) needs_trigger.append((mapper_path, dm_dev)) if len(needs_trigger): for (mapper_path, dm_dev) in needs_trigger: LOG.debug('multipath: regenerating symlink for %s (%s)', mapper_path, dm_dev) util.subp([ 'udevadm', 'trigger', '--subsystem-match=block', '--action=add', '/sys/class/block/' + os.path.basename(dm_dev) ]) udev.udevadm_settle(exists=mapper_path) if not os.path.islink(mapper_path): LOG.error('Failed to regenerate udev symlink %s', mapper_path)
def partition(self, partnumber, partsize, strict=True): """ Add a partition to this DasdDevice specifying partnumber and size. :param partnumber: integer value of partition number (1, 2 or 3) :param partsize: partition sizes in bytes. :param strict: boolean which enforces that dasd device exists before issuing fdasd command, defaults to True. :raises: RuntimeError if strict==True and devname does not exist. :raises: ValueError on invalid devname Example fdasd command with defaults: fdasd --verbose --config=/tmp/curtin/dasd-part1.fdasd /dev/dasdb """ if partnumber > 3: raise ValueError('DASD devices only allow 3 partitions') if strict and not os.path.exists(self.devname): raise RuntimeError("devname '%s' does not exist" % self.devname) info = dasdview(self.devname) geo = info['geometry'] existing_partitions = self.get_partition_table() partitions = [] for partinfo in existing_partitions[0:partnumber]: # (devpath, start_track, end_track, nr_tracks, partnum) start = partinfo[1] end = partinfo[2] partitions.append((start, end)) # first partition always starts at track 2 # all others start after the previous partition ends if partnumber == 1: start = 2 else: start = int(partitions[-1][1]) + 1 # end is size + 1 tracks_needed = int(self._bytes_to_tracks(geo, partsize)) end = start + tracks_needed + 1 partitions.append(("%s" % start, "%s" % end)) content = "\n".join( ["[%s,%s]" % (part[0], part[1]) for part in partitions]) LOG.debug("fdasd: partitions to be created: %s", partitions) LOG.debug("fdasd: content=\n%s", content) wfp = tempfile.NamedTemporaryFile(suffix=".fdasd", delete=False) wfp.close() util.write_file(wfp.name, content) cmd = ['fdasd', '--verbose', '--config=%s' % wfp.name, self.devname] LOG.debug('Partitioning %s with %s', self.devname, cmd) try: out, err = util.subp(cmd, capture=True) except util.ProcessExecutionError as e: LOG.error("Partitioning failed: %s", e) raise finally: if os.path.exists(wfp.name): os.unlink(wfp.name)
def _download_layered_images(image_stack, tmp_dir): local_image_stack = [] try: for img_url in image_stack: dest_path = os.path.join(tmp_dir, os.path.basename(img_url)) url_helper.download(img_url, dest_path, retries=3) local_image_stack.append(dest_path) except url_helper.UrlError as e: LOG.error("Failed to download '%s'" % img_url) raise e return local_image_stack
def _discover_get_probert_data(): try: LOG.debug('Importing probert prober') from probert import prober except Exception: LOG.error('Failed to import probert, discover disabled') return {} probe = prober.Prober() LOG.debug('Probing system for storage devices') probe.probe_storage() return probe.get_results()
def _extract_root_fsimage(path, target): mp = tempfile.mkdtemp() try: util.subp(['mount', '-o', 'loop,ro', path, mp], capture=True) except util.ProcessExecutionError as e: LOG.error("Failed to mount '%s' for extraction: %s", path, e) os.rmdir(mp) raise e try: return copy_to_target(mp, target) finally: util.subp(['umount', mp]) os.rmdir(mp)
def load(self, state): if 'version' not in state: LOG.error('Invalid state, missing version field') raise Exception('Invalid state, missing version field') required_keys = NETWORK_STATE_REQUIRED_KEYS[state['version']] if not self.valid_command(state, required_keys): msg = 'Invalid state, missing keys: %s' % (required_keys) LOG.error(msg) raise Exception(msg) # v1 - direct attr mapping, except version for key in [k for k in required_keys if k not in ['version']]: setattr(self, key, state[key]) self.command_handlers = self.get_command_handlers()
def load_reporter(config): """Loads and returns reporter instance stored in config file.""" reporter = config.get('reporter') if reporter is None: LOG.info("'reporter' not found in config file.") return EmptyReporter() name, options = reporter.popitem() module = try_import_module('curtin.reporter.legacy.%s' % name) if module is None: LOG.error("Module for %s reporter could not load." % name) return EmptyReporter() try: return module.load_factory(options) except LoadReporterException: LOG.error("Failed loading %s reporter with %s" % (name, options)) return EmptyReporter()
def zkey_supported(strict=True): """ Return True if zkey cmd present and can generate keys, else False.""" LOG.debug('Checking if zkey encryption is supported...') try: util.load_kernel_module('pkey') except util.ProcessExecutionError as err: msg = "Failed to load 'pkey' kernel module" LOG.error(msg + ": %s" % err) if strict else LOG.warning(msg) return False try: with tempfile.NamedTemporaryFile() as tf: util.subp(['zkey', 'generate', tf.name], capture=True) LOG.debug('zkey encryption supported.') return True except util.ProcessExecutionError as err: msg = "zkey not supported" LOG.error(msg + ": %s" % err) if strict else LOG.warning(msg) return False
def discover(): try: LOG.debug('Importing probert prober') from probert import prober except Exception: LOG.error('Failed to import probert, discover disabled') return {} probe = prober.Prober() LOG.debug('Probing system for storage devices') probe.probe_storage() probe_data = probe.get_results() if 'storage' not in probe_data: raise ValueError('Probing storage failed') LOG.debug('Extracting storage config from discovered devices') try: return storage_config.extract_storage_config(probe_data.get('storage')) except ImportError as e: LOG.exception(e) return {}
def ensure_disk_connected(rfc4173, write_config=True): global _ISCSI_DISKS iscsi_disk = _ISCSI_DISKS.get(rfc4173) if not iscsi_disk: iscsi_disk = IscsiDisk(rfc4173) try: iscsi_disk.connect() except util.ProcessExecutionError: LOG.error('Unable to connect to iSCSI disk (%s)' % rfc4173) # what should we do in this case? raise if write_config: save_iscsi_config(iscsi_disk) _ISCSI_DISKS.update({rfc4173: iscsi_disk}) # this is just a sanity check that the disk is actually present and # the above did what we expected if not os.path.exists(iscsi_disk.devdisk_path): LOG.warn('Unable to find iSCSI disk for target (%s) by path (%s)', iscsi_disk.target, iscsi_disk.devdisk_path) return iscsi_disk
def create_backing_device(backing_device, cache_device, cache_mode, cset_uuid): backing_device_sysfs = sys_block_path(backing_device) target_sysfs_path = os.path.join(backing_device_sysfs, "bcache") # there should not be any pre-existing bcache device bdir = os.path.join(backing_device_sysfs, "bcache") if os.path.exists(bdir): raise RuntimeError('Unexpected old bcache device: %s', backing_device) LOG.debug('Creating a backing device on %s', backing_device) util.subp(["make-bcache", "-B", backing_device]) ensure_bcache_is_registered(backing_device, target_sysfs_path) # via the holders we can identify which bcache device we just created # for a given backing device from .clear_holders import get_holders holders = get_holders(backing_device) if len(holders) != 1: err = ('Invalid number {} of holding devices:' ' "{}"'.format(len(holders), holders)) LOG.error(err) raise ValueError(err) [bcache_dev] = holders LOG.debug('The just created bcache device is {}'.format(holders)) if cache_device: # if we specify both then we need to attach backing to cache if cset_uuid: attach_backing_to_cacheset(backing_device, cache_device, cset_uuid) else: msg = "Invalid cset_uuid: {}".format(cset_uuid) LOG.error(msg) raise ValueError(msg) if cache_mode: set_cache_mode(bcache_dev, cache_mode) return dev_path(bcache_dev)
def _extract_root_layered_fsimage(image_stack, target): mp_base = tempfile.mkdtemp() mps = [] try: # Create a mount point for each image file and mount the image try: for img in image_stack: mp = os.path.join(mp_base, os.path.basename(img) + ".dir") os.mkdir(mp) util.subp(['mount', '-o', 'loop,ro', img, mp], capture=True) mps.insert(0, mp) except util.ProcessExecutionError as e: LOG.error("Failed to mount '%s' for extraction: %s", img, e) raise e # Prepare if len(mps) == 1: root_dir = mps[0] else: # Multiple image files, merge them with an overlay and do the copy root_dir = os.path.join(mp_base, "root.dir") os.mkdir(root_dir) try: util.subp([ 'mount', '-t', 'overlay', 'overlay', '-o', 'lowerdir=' + ':'.join(mps), root_dir ], capture=True) mps.append(root_dir) except util.ProcessExecutionError as e: LOG.error("overlay mount to %s failed: %s", root_dir, e) raise e copy_to_target(root_dir, target) finally: umount_err_mps = [] for mp in reversed(mps): try: util.subp(['umount', mp], capture=True) except util.ProcessExecutionError as e: LOG.error("can't unmount %s: %e", mp, e) umount_err_mps.append(mp) if umount_err_mps: raise util.ProcessExecutionError("Failed to umount: %s", ", ".join(umount_err_mps)) shutil.rmtree(mp_base)
def exclusive_open(path, exclusive=True): """ Obtain an exclusive file-handle to the file/device specified unless caller specifics exclusive=False. """ mode = 'rb+' fd = None if not os.path.exists(path): raise ValueError("No such file at path: %s" % path) flags = os.O_RDWR if exclusive: flags += os.O_EXCL try: fd = os.open(path, flags) try: fd_needs_closing = True with os.fdopen(fd, mode) as fo: yield fo fd_needs_closing = False except OSError: LOG.exception("Failed to create file-object from fd") raise finally: # python2 leaves fd open if there os.fdopen fails if fd_needs_closing and sys.version_info.major == 2: os.close(fd) except OSError: LOG.error("Failed to exclusively open path: %s", path) holders = get_holders(path) LOG.error('Device holders with exclusive access: %s', holders) mount_points = util.list_device_mounts(path) LOG.error('Device mounts: %s', mount_points) fusers = util.fuser_mount(path) LOG.error('Possible users of %s:\n%s', path, fusers) raise
def translate_old_apt_features(cfg): """translate the few old apt related features into the new config format""" predef_apt_cfg = cfg.get("apt") if predef_apt_cfg is None: cfg['apt'] = {} predef_apt_cfg = cfg.get("apt") if cfg.get('apt_proxy') is not None: if predef_apt_cfg.get('proxy') is not None: msg = ("Error in apt_proxy configuration: " "old and new format of apt features " "are mutually exclusive") LOG.error(msg) raise ValueError(msg) cfg['apt']['proxy'] = cfg.get('apt_proxy') LOG.debug("Transferred %s into new format: %s", cfg.get('apt_proxy'), cfg.get('apte')) del cfg['apt_proxy'] if cfg.get('apt_mirrors') is not None: if predef_apt_cfg.get('mirrors') is not None: msg = ("Error in apt_mirror configuration: " "old and new format of apt features " "are mutually exclusive") LOG.error(msg) raise ValueError(msg) old = cfg.get('apt_mirrors') cfg['apt']['primary'] = [{ "arches": ["default"], "uri": old.get('ubuntu_archive') }] cfg['apt']['security'] = [{ "arches": ["default"], "uri": old.get('ubuntu_security') }] LOG.debug("Transferred %s into new format: %s", cfg.get('apt_mirror'), cfg.get('apt')) del cfg['apt_mirrors'] # to work this also needs to disable the default protection psl = predef_apt_cfg.get('preserve_sources_list') if psl is not None: if config.value_as_boolean(psl) is True: msg = ("Error in apt_mirror configuration: " "apt_mirrors and preserve_sources_list: True " "are mutually exclusive") LOG.error(msg) raise ValueError(msg) cfg['apt']['preserve_sources_list'] = False if cfg.get('debconf_selections') is not None: if predef_apt_cfg.get('debconf_selections') is not None: msg = ("Error in debconf_selections configuration: " "old and new format of apt features " "are mutually exclusive") LOG.error(msg) raise ValueError(msg) selsets = cfg.get('debconf_selections') cfg['apt']['debconf_selections'] = selsets LOG.info("Transferred %s into new format: %s", cfg.get('debconf_selections'), cfg.get('apt')) del cfg['debconf_selections'] return cfg
def format(self, blksize=4096, layout='cdl', force=False, set_label=None, keep_label=False, no_label=False, mode='quick'): """ Format DasdDevice with supplied parameters. :param blksize: integer value to configure disk block size in bytes. Must be one of 512, 1024, 2048, 4096; defaults to 4096. :param layout: string specify disk layout format. Must be one of 'cdl' (Compatible Disk Layout, default) or 'ldl' (Linux Disk Layout). :param force: boolean set true to skip sanity checks, defaults to False :param set_label: string to write to the volume label for identification. If no label provided, a label is generated from device number of the dasd. Note: is interpreted as ASCII string and is automatically converted to uppercase and then to EBCDIC. e.g. 'a@b\\$c#' to get A@B$C#. :param keep_label: boolean set true to keep existing label on dasd, ignores label param value, defaults to False. :param no_label: boolean set true to skip writing label to dasd, ignores label and keep_label params, defaults to False. :param mode: string to control format mode. Must be one of 'full' (Format the full disk), 'quick' (Format the first two tracks, default), 'expand' (Format unformatted tracks at device end). :param strict: boolean which enforces that dasd device exists before issuing format command, defaults to True. :raises: RuntimeError if devname does not exist. :raises: ValueError on invalid blocksize, disk_layout and mode. :raises: ProcessExecutionError on errors running 'dasdfmt' command. Example dadsfmt command with defaults: dasdformat -y --blocksize=4096 --disk_layout=cdl \ --mode=quick /dev/dasda """ if not os.path.exists(self.devname): raise RuntimeError("devname '%s' does not exist" % self.devname) if no_label: keep_label = False set_label = None if keep_label: set_label = None valid_blocksize = [512, 1024, 2048, 4096] if blksize not in valid_blocksize: raise ValueError( "blksize: '%s' not one of '%s'" % (blksize, valid_blocksize)) valid_layouts = ['cdl', 'ldl'] if layout not in valid_layouts: raise ValueError("layout: '%s' not one of '%s'" % (layout, valid_layouts)) if not mode: mode = 'quick' valid_modes = ['full', 'quick', 'expand'] if mode not in valid_modes: raise ValueError("mode: '%s' not one of '%s'" % (mode, valid_modes)) opts = [ '-y', '--blocksize=%s' % blksize, '--disk_layout=%s' % layout, '--mode=%s' % mode ] if set_label: opts += ['--label=%s' % set_label] if keep_label: opts += ['--keep_label'] if no_label: opts += ['--no_label'] if force: opts += ['--force'] cmd = ['dasdfmt'] + opts + [self.devname] LOG.debug('Formatting %s with %s', self.devname, cmd) try: out, _err = util.subp(cmd, capture=True) except util.ProcessExecutionError as e: LOG.error("Formatting failed: %s", e) raise
def apply_kexec(kexec, target): """ load kexec kernel from target dir, similar to /etc/init.d/kexec-load kexec: mode: on """ grubcfg = "boot/grub/grub.cfg" target_grubcfg = os.path.join(target, grubcfg) if kexec is None or kexec.get("mode") != "on": return False if not isinstance(kexec, dict): raise TypeError("kexec is not a dict.") if not util.which('kexec'): util.install_packages('kexec-tools') if not os.path.isfile(target_grubcfg): raise ValueError("%s does not exist in target" % grubcfg) with open(target_grubcfg, "r") as fp: default = 0 menu_lines = [] # get the default grub boot entry number and menu entry line numbers for line_num, line in enumerate(fp, 1): if re.search(r"\bset default=\"[0-9]+\"\b", " %s " % line): default = int(re.sub(r"[^0-9]", '', line)) if re.search(r"\bmenuentry\b", " %s " % line): menu_lines.append(line_num) if not menu_lines: LOG.error("grub config file does not have a menuentry\n") return False # get the begin and end line numbers for default menuentry section, # using end of file if it's the last menuentry section begin = menu_lines[default] if begin != menu_lines[-1]: end = menu_lines[default + 1] - 1 else: end = line_num fp.seek(0) lines = fp.readlines() kernel = append = initrd = "" for i in range(begin, end): if 'linux' in lines[i].split(): split_line = shlex.split(lines[i]) kernel = os.path.join(target, split_line[1]) append = "--append=" + ' '.join(split_line[2:]) if 'initrd' in lines[i].split(): split_line = shlex.split(lines[i]) initrd = "--initrd=" + os.path.join(target, split_line[1]) if not kernel: LOG.error("grub config file does not have a kernel\n") return False LOG.debug("kexec -l %s %s %s" % (kernel, append, initrd)) util.subp(args=['kexec', '-l', kernel, append, initrd]) return True
def cmd_install(args): from .collect_logs import create_log_tarfile cfg = deepcopy(CONFIG_BUILTIN) config.merge_config(cfg, args.config) for source in args.source: src = util.sanitize_source(source) cfg['sources']["%02d_cmdline" % len(cfg['sources'])] = src LOG.info(INSTALL_START_MSG) LOG.debug('LANG=%s', os.environ.get('LANG')) LOG.debug("merged config: %s" % cfg) if not len(cfg.get('sources', [])): raise util.BadUsage("no sources provided to install") for i in cfg['sources']: # we default to tgz for old style sources config cfg['sources'][i] = util.sanitize_source(cfg['sources'][i]) migrate_proxy_settings(cfg) for k in ('http_proxy', 'https_proxy', 'no_proxy'): if k in cfg['proxy']: os.environ[k] = cfg['proxy'][k] instcfg = cfg.get('install', {}) logfile = instcfg.get('log_file') error_tarfile = instcfg.get('error_tarfile') post_files = instcfg.get('post_files', [logfile]) # Generate curtin configuration dump and add to write_files unless # installation config disables dump yaml_dump_file = instcfg.get('save_install_config', SAVE_INSTALL_CONFIG) if yaml_dump_file: write_files = cfg.get('write_files', {}) write_files['curtin_install_cfg'] = { 'path': yaml_dump_file, 'permissions': '0400', 'owner': 'root:root', 'content': config.dump_config(cfg) } cfg['write_files'] = write_files # Load reporter clear_install_log(logfile) legacy_reporter = load_reporter(cfg) legacy_reporter.files = post_files writeline_and_stdout(logfile, INSTALL_START_MSG) args.reportstack.post_files = post_files workingd = None try: workingd = WorkingDir(cfg) dd_images = util.get_dd_images(cfg.get('sources', {})) if len(dd_images) > 1: raise ValueError("You may not use more than one disk image") LOG.debug(workingd.env()) env = os.environ.copy() env.update(workingd.env()) for name in cfg.get('stages'): desc = STAGE_DESCRIPTIONS.get(name, "stage %s" % name) reportstack = events.ReportEventStack( "stage-%s" % name, description=desc, parent=args.reportstack) env['CURTIN_REPORTSTACK'] = reportstack.fullname with reportstack: commands_name = '%s_commands' % name with util.LogTimer(LOG.debug, 'stage_%s' % name): stage = Stage(name, cfg.get(commands_name, {}), env, reportstack=reportstack, logfile=logfile) stage.run() if apply_kexec(cfg.get('kexec'), workingd.target): cfg['power_state'] = {'mode': 'reboot', 'delay': 'now', 'message': "'rebooting with kexec'"} writeline_and_stdout(logfile, INSTALL_PASS_MSG) legacy_reporter.report_success() except Exception as e: exp_msg = INSTALL_FAIL_MSG.format(exception=e) writeline(logfile, exp_msg) LOG.error(exp_msg) legacy_reporter.report_failure(exp_msg) if error_tarfile: create_log_tarfile(error_tarfile, cfg) raise e finally: log_target_path = instcfg.get('save_install_log', SAVE_INSTALL_LOG) if log_target_path and workingd: copy_install_log(logfile, workingd.target, log_target_path) if instcfg.get('unmount', "") == "disabled": LOG.info('Skipping unmount: config disabled target unmounting') elif workingd: # unmount everything (including iscsi disks) util.do_umount(workingd.target, recursive=True) # The open-iscsi service in the ephemeral environment handles # disconnecting active sessions. On Artful release the systemd # unit file has conditionals that are not met at boot time and # results in open-iscsi service not being started; This breaks # shutdown on Artful releases. # Additionally, in release < Artful, if the storage configuration # is layered, like RAID over iscsi volumes, then disconnecting # iscsi sessions before stopping the raid device hangs. # As it turns out, letting the open-iscsi service take down the # session last is the cleanest way to handle all releases # regardless of what may be layered on top of the iscsi disks. # # Check if storage configuration has iscsi volumes and if so ensure # iscsi service is active before exiting install if iscsi.get_iscsi_disks_from_config(cfg): iscsi.restart_iscsi_service() shutil.rmtree(workingd.top) apply_power_state(cfg.get('power_state')) sys.exit(0)