def lookup_disk(serial): """ Search for a disk by its serial number using /dev/disk/by-id/ """ # Get all volumes in /dev/disk/by-id/ containing the serial string. The # string specified can be either in the short or long serial format # hack, some serials have spaces, udev usually converts ' ' -> '_' serial_udev = serial.replace(' ', '_') LOG.info('Processing serial %s via udev to %s', serial, serial_udev) disks = list( filter(lambda x: serial_udev in x, os.listdir("/dev/disk/by-id/"))) if not disks or len(disks) < 1: raise ValueError("no disk with serial '%s' found" % serial_udev) # Sort by length and take the shortest path name, as the longer path names # will be the partitions on the disk. Then use os.path.realpath to # determine the path to the block device in /dev/ disks.sort(key=lambda x: len(x)) LOG.debug('lookup_disks found: %s', disks) path = os.path.realpath("/dev/disk/by-id/%s" % disks[0]) LOG.debug('lookup_disks realpath(%s)=%s', disks[0], path) if multipath.is_mpath_device(path): LOG.debug('Detected multipath device, finding a members') info = udevadm_info(path) mpath_members = sorted(multipath.find_mpath_members(info['DM_NAME'])) LOG.debug('mpath members: %s', mpath_members) if len(mpath_members): path = mpath_members[0] if not os.path.exists(path): raise ValueError("path '%s' to block device for disk with serial '%s' \ does not exist" % (path, serial_udev)) return path
def parse_sb_version(device=None, sbdict=None): """ Parse bcache 'sb_version' field to integer if possible. """ if not device and not sbdict: raise ValueError('Supply a device name or bcache superblock dict') if not sbdict: sbdict = superblock_asdict(device=device) if not sbdict: LOG.info('Cannot parse sb.version without bcache superblock') return None if not isinstance(sbdict, dict): raise ValueError('Invalid sbdict type, must be dict') sb_version = sbdict.get('sb.version') try: # 'sb.version': '1 [backing device]' # 'sb.version': '3 [caching device]' version = int(sb_version.split()[0]) except (AttributeError, ValueError): LOG.warning( "Failed to parse bcache 'sb.version' field" " as integer: %s", sb_version) raise return version
def apply_debconf_selections(cfg, target=None): """apply_debconf_selections - push content to debconf""" # debconf_selections: # set1: | # cloud-init cloud-init/datasources multiselect MAAS # set2: pkg pkg/value string bar selsets = cfg.get('debconf_selections') if not selsets: LOG.debug("debconf_selections was not set in config") return LOG.debug('Applying debconf selections') selections = '\n'.join([selsets[key] for key in sorted(selsets.keys())]) debconf_set_selections(selections.encode() + b"\n", target=target) # get a complete list of packages listed in input pkgs_cfgd = set() for key, content in selsets.items(): for line in content.splitlines(): if line.startswith("#"): continue pkg = re.sub(r"[:\s].*", "", line) pkgs_cfgd.add(pkg) pkgs_installed = distro.get_installed_packages(target) need_reconfig = pkgs_cfgd.intersection(pkgs_installed) if len(need_reconfig) == 0: return dpkg_reconfigure(need_reconfig, target=target)
def needs_formatting(self, blksize, layout, volser): """ Determine if DasdDevice attributes matches the required parameters. Note that devices that indicate they are unformatted will require formatting. :param blksize: expected blocksize of the device. :param layout: expected disk layout. :param volser: expected label, if None, label is ignored. :returns: boolean, True if formatting is needed, else False. """ LOG.debug('Checking if dasd %s needs formatting', self.device_id) if self.is_not_formatted(): LOG.debug('dasd %s is not formatted', self.device_id) return True if int(blksize) != int(self.blocksize()): LOG.debug('dasd %s block size (%s) does not match (%s)', self.device_id, self.blocksize(), blksize) return True if layout != self.disk_layout(): LOG.debug('dasd %s disk layout (%s) does not match %s', self.device_id, self.disk_layout(), layout) return True if volser and volser != self.label(): LOG.debug('dasd %s volser (%s) does not match %s', self.device_id, self.label(), volser) return True return False
def attach_backing_to_cacheset(backing_device, cache_device, cset_uuid): LOG.info("Attaching backing device to cacheset: " "{} -> {} cset.uuid: {}".format(backing_device, cache_device, cset_uuid)) backing_device_sysfs = sys_block_path(backing_device) attach = os.path.join(backing_device_sysfs, "bcache", "attach") util.write_file(attach, cset_uuid, mode=None)
def mdadm_remove(devpath): assert_valid_devpath(devpath) LOG.info("mdadm removing: %s" % devpath) out, err = util.subp(["mdadm", "--remove", devpath], rcs=[0], capture=True) LOG.debug("mdadm remove:\n%s\n%s", out, err)
def remove_device(mddev, arraydev): assert_valid_devpath(mddev) LOG.info("mdadm remove %s from array %s", arraydev, mddev) out, err = util.subp(["mdadm", "--remove", mddev, arraydev], rcs=[0], capture=True) LOG.debug("mdadm remove:\n%s\n%s", out, err)
def disable_overlayroot(cfg, target): # cloud images come with overlayroot, but installed systems need disabled disable = cfg.get('disable_overlayroot', True) local_conf = os.path.sep.join([target, 'etc/overlayroot.local.conf']) if disable and os.path.exists(local_conf): LOG.debug("renaming %s to %s", local_conf, local_conf + ".old") shutil.move(local_conf, local_conf + ".old")
def md_present(mdname): """Check if mdname is present in /proc/mdstat""" if not mdname: raise ValueError('md_present requires a valid md name') try: mdstat = util.load_file('/proc/mdstat') except IOError as e: if util.is_file_not_found_exc(e): LOG.warning('Failed to read /proc/mdstat; ' 'md modules might not be loaded') return False else: raise e md_kname = dev_short(mdname) # Find lines like: # md10 : active raid1 vdc1[1] vda2[0] present = [ line for line in mdstat.splitlines() if line.split(":")[0].rstrip() == md_kname ] if len(present) > 0: return True return False
def detect_required_packages(cfg): """ detect packages that will be required in-target by custom config items """ mapping = { 'storage': block.detect_required_packages_mapping(), 'network': net.detect_required_packages_mapping(), } needed_packages = [] for cfg_type, cfg_map in mapping.items(): # skip missing or invalid config items, configs may # only have network or storage, not always both if not isinstance(cfg.get(cfg_type), dict): continue cfg_version = cfg[cfg_type].get('version') if not isinstance(cfg_version, int) or cfg_version not in cfg_map: msg = ('Supplied configuration version "%s", for config type' '"%s" is not present in the known mapping.' % (cfg_version, cfg_type)) raise ValueError(msg) mapped_config = cfg_map[cfg_version] found_reqs = mapped_config['handler'](cfg, mapped_config['mapping']) needed_packages.extend(found_reqs) LOG.debug('Curtin config dependencies requires additional packages: %s', needed_packages) return needed_packages
def ubuntu_core_curthooks(cfg, target=None): """ Ubuntu-Core 16 images cannot execute standard curthooks Instead we copy in any cloud-init configuration to the 'LABEL=writable' partition mounted at target. """ ubuntu_core_target = os.path.join(target, "system-data") cc_target = os.path.join(ubuntu_core_target, 'etc/cloud/cloud.cfg.d') cloudconfig = cfg.get('cloudconfig', None) if cloudconfig: # remove cloud-init.disabled, if found cloudinit_disable = os.path.join(ubuntu_core_target, 'etc/cloud/cloud-init.disabled') if os.path.exists(cloudinit_disable): util.del_file(cloudinit_disable) handle_cloudconfig(cloudconfig, base_dir=cc_target) netconfig = cfg.get('network', None) if netconfig: LOG.info('Writing network configuration') ubuntu_core_netconfig = os.path.join(cc_target, "50-curtin-networking.cfg") util.write_file(ubuntu_core_netconfig, content=config.dump_config({'network': netconfig}))
def add_swap(cfg, target, fstab): # add swap file per cfg to filesystem root at target. update fstab. # # swap: # filename: 'swap.img', # size: None # (or 1G) # maxsize: 2G if 'swap' in cfg and not cfg.get('swap'): LOG.debug("disabling 'add_swap' due to config") return swapcfg = cfg.get('swap', {}) fname = swapcfg.get('filename', None) size = swapcfg.get('size', None) maxsize = swapcfg.get('maxsize', None) if size: size = util.human2bytes(str(size)) if maxsize: maxsize = util.human2bytes(str(maxsize)) swap.setup_swapfile(target=target, fstab=fstab, swapfile=fname, size=size, maxsize=maxsize)
def copy_mdadm_conf(mdadm_conf, target): if not mdadm_conf: LOG.warn("mdadm config must be specified, not copying") return LOG.info("copying mdadm.conf into target") shutil.copy(mdadm_conf, os.path.sep.join([target, 'etc/mdadm/mdadm.conf']))
def copy_iscsi_conf(nodes_dir, target): if not nodes_dir: LOG.warn("nodes directory must be specified, not copying") return LOG.info("copying iscsi nodes database into target") shutil.copytree(nodes_dir, os.path.sep.join([target, 'etc/iscsi/nodes']))
def dmsetup_info(devname): ''' returns dict of info about device mapper dev. {'blkdevname': 'dm-0', 'blkdevs_used': 'sda5', 'name': 'sda5_crypt', 'subsystem': 'CRYPT', 'uuid': 'CRYPT-LUKS1-2b370697149743b0b2407d11f88311f1-sda5_crypt' } ''' _SEP = '=' fields = ('name,uuid,blkdevname,blkdevs_used,subsystem'.split(',')) try: (out, _err) = util.subp([ 'dmsetup', 'info', devname, '-C', '-o', ','.join(fields), '--noheading', '--separator', _SEP ], capture=True) except util.ProcessExecutionError as e: LOG.error('Failed to run dmsetup info: %s', e) return {} values = out.strip().split(_SEP) info = dict(zip(fields, values)) return info
def validate_config(config, sourcefile=None): """Validate storage config object.""" if not sourcefile: sourcefile = '' try: import jsonschema jsonschema.validate(config, STORAGE_CONFIG_SCHEMA) except ImportError: LOG.error('Cannot validate storage config, missing jsonschema') raise except jsonschema.exceptions.ValidationError as e: if isinstance(e.instance, int): msg = 'Unexpected value (%s) for property "%s"' % (e.path[0], e.instance) raise ValueError(msg) if 'type' not in e.instance: msg = "%s in %s" % (e.message, e.instance) raise ValueError(msg) instance_type = e.instance['type'] stype = get_storage_types().get(instance_type) if stype: try: jsonschema.validate(e.instance, stype.schema) except jsonschema.exceptions.ValidationError as f: msg = "%s in %s\n%s" % (f.message, sourcefile, util.json_dumps(e.instance)) raise (ValueError(msg)) else: msg = "Unknown storage type: %s in %s" % (instance_type, e.instance) raise ValueError(msg)
def lvm_scan(activate=True, multipath=False): """ run full scan for volgroups, logical volumes and physical volumes """ # prior to xenial, lvmetad is not packaged, so even if a tool supports # flag --cache it has no effect. In Xenial and newer the --cache flag is # used (if lvmetad is running) to ensure that the data cached by # lvmetad is updated. # before appending the cache flag though, check if lvmetad is running. this # ensures that we do the right thing even if lvmetad is supported but is # not running release = distro.lsb_release().get('codename') if release in [None, 'UNAVAILABLE']: LOG.warning('unable to find release number, assuming xenial or later') release = 'xenial' if multipath: # only operate on mp devices mponly = 'devices{ filter = [ "a|/dev/mapper/mpath.*|", "r|.*|" ] }' for cmd in [['pvscan'], ['vgscan']]: if release != 'precise' and lvmetad_running(): cmd.append('--cache') if multipath: cmd.extend(['--config', mponly]) util.subp(cmd, capture=True)
def start_clear_holders_deps(): """ prepare system for clear holders to be able to scan old devices """ # a mdadm scan has to be started in case there is a md device that needs to # be detected. if the scan fails, it is either because there are no mdadm # devices on the system, or because there is a mdadm device in a damaged # state that could not be started. due to the nature of mdadm tools, it is # difficult to know which is the case. if any errors did occur, then ignore # them, since no action needs to be taken if there were no mdadm devices on # the system, and in the case where there is some mdadm metadata on a disk, # but there was not enough to start the array, the call to wipe_volume on # all disks and partitions should be sufficient to remove the mdadm # metadata mdadm.mdadm_assemble(scan=True, ignore_errors=True) # scan and activate for logical volumes lvm.lvm_scan() lvm.activate_volgroups() # the bcache module needs to be present to properly detect bcache devs # on some systems (precise without hwe kernel) it may not be possible to # lad the bcache module bcause it is not present in the kernel. if this # happens then there is no need to halt installation, as the bcache devices # will never appear and will never prevent the disk from being reformatted util.load_kernel_module('bcache') if not zfs.zfs_supported(): LOG.warning('zfs filesystem is not supported in this environment')
def fail_device(mddev, arraydev): assert_valid_devpath(mddev) LOG.info("mdadm mark faulty: %s in array %s", arraydev, mddev) out, err = util.subp(["mdadm", "--fail", mddev, arraydev], rcs=[0], capture=True) LOG.debug("mdadm mark faulty:\n%s\n%s", out, err)
def restart_iscsi_service(): LOG.info('restarting iscsi service') if util.uses_systemd(): cmd = ['systemctl', 'reload-or-restart', 'open-iscsi'] else: cmd = ['service', 'open-iscsi', 'restart'] util.subp(cmd, capture=True)
def from_fdasd(cls, devname): """Use fdasd to construct a DasdPartitionTable. % fdasd --table /dev/dasdc reading volume label ..: VOL1 reading vtoc ..........: ok Disk /dev/dasdc: cylinders ............: 10017 tracks per cylinder ..: 15 blocks per track .....: 12 bytes per block ......: 4096 volume label .........: VOL1 volume serial ........: 0X1522 max partitions .......: 3 ------------------------------- tracks ------------------------------- Device start end length Id System /dev/dasdc1 2 43694 43693 1 Linux native /dev/dasdc2 43695 87387 43693 2 Linux native /dev/dasdc3 87388 131080 43693 3 Linux native 131081 150254 19174 unused exiting... """ cmd = ['fdasd', '--table', devname] out, _err = util.subp(cmd, capture=True) LOG.debug("from_fdasd output:\n---\n%s\n---\n", out) return cls.from_fdasd_output(devname, out)
def get_iscsi_disks_from_config(cfg): """Return a list of IscsiDisk objects for each iscsi volume present.""" # Construct IscsiDisk objects for each iscsi volume present iscsi_disks = [IscsiDisk(volume) for volume in get_iscsi_volumes_from_config(cfg)] LOG.debug('Found %s iscsi disks in storage config', len(iscsi_disks)) return iscsi_disks
def add_partition(self, partnumber, partsize): """ Add a partition to this DasdDevice specifying partnumber and size. :param partnumber: integer value of partition number (1, 2 or 3) :param partsize: partition sizes in bytes. :raises: ValueError on invalid devname Example fdasd command with defaults: fdasd --verbose --config=/tmp/curtin/dasd-part1.fdasd /dev/dasdb """ LOG.debug( "add_partition: partnumber: %s partsize: %s", partnumber, partsize) partitions = self._ptable_for_new_partition(partnumber, partsize) LOG.debug("fdasd: partitions to be created: %s", partitions) content = "\n".join([ "[%s,%s]" % (part[0], part[1]) for part in partitions ]) LOG.debug("fdasd: content=\n%s", content) wfp = tempfile.NamedTemporaryFile(suffix=".fdasd", delete=False) wfp.close() util.write_file(wfp.name, content) cmd = ['fdasd', '--verbose', '--config=%s' % wfp.name, self.devname] LOG.debug('Partitioning %s with %s', self.devname, cmd) try: out, err = util.subp(cmd, capture=True) except util.ProcessExecutionError as e: LOG.error("Partitioning failed: %s", e) raise finally: if os.path.exists(wfp.name): os.unlink(wfp.name)
def sys_block_path(devname, add=None, strict=True): """ get path to device in /sys/class/block """ toks = ['/sys/class/block'] # insert parent dev if devname is partition devname = os.path.normpath(devname) if devname.startswith('/dev/') and not os.path.exists(devname): LOG.warning('block.sys_block_path: devname %s does not exist', devname) (parent, partnum) = get_blockdev_for_partition(devname, strict=strict) if partnum: toks.append(path_to_kname(parent)) toks.append(path_to_kname(devname)) if add is not None: toks.append(add) path = os.sep.join(toks) if strict and not os.path.exists(path): err = OSError("devname '{}' did not have existing syspath '{}'".format( devname, path)) err.errno = errno.ENOENT raise err return os.path.normpath(path)
def get_backing_device(bcache_kname): """ For a given bcacheN kname, return the backing device bcache sysfs dir. bcache0 -> /sys/.../devices/.../device/bcache """ bcache_deps = '/sys/class/block/%s/slaves' % bcache_kname try: # if the bcache device is deleted, this may fail deps = os.listdir(bcache_deps) except util.FileMissingError as e: LOG.debug('Transient race, bcache slave path not found: %s', e) return None # a running bcache device has two entries in slaves, the cacheset # device, and the backing device. There may only be the backing # device (if a bcache device is found but not currently attached # to a cacheset. if len(deps) == 0: raise RuntimeError('%s unexpected empty dir: %s' % (bcache_kname, bcache_deps)) for dev in (sysfs_path(dep) for dep in deps): if is_backing(dev): return dev return None
def wipe_file(path, reader=None, buflen=4 * 1024 * 1024, exclusive=True): """ wipe the existing file at path. if reader is provided, it will be called as a 'reader(buflen)' to provide data for each write. Otherwise, zeros are used. writes will be done in size of buflen. """ if reader: readfunc = reader else: buf = buflen * b'\0' def readfunc(size): return buf size = util.file_size(path) LOG.debug("%s is %s bytes. wiping with buflen=%s", path, size, buflen) with exclusive_open(path, exclusive=exclusive) as fp: while True: pbuf = readfunc(buflen) pos = fp.tell() if len(pbuf) != buflen and len(pbuf) + pos < size: raise ValueError( "short read on reader got %d expected %d after %d" % (len(pbuf), buflen, pos)) if pos + buflen >= size: fp.write(pbuf[0:size - pos]) break else: fp.write(pbuf)
def dpkg_reconfigure(packages, target=None): # For any packages that are already installed, but have preseed data # we populate the debconf database, but the filesystem configuration # would be preferred on a subsequent dpkg-reconfigure. # so, what we have to do is "know" information about certain packages # to unconfigure them. unhandled = [] to_config = [] for pkg in packages: if pkg in CONFIG_CLEANERS: LOG.debug("unconfiguring %s", pkg) CONFIG_CLEANERS[pkg](target) to_config.append(pkg) else: unhandled.append(pkg) if len(unhandled): LOG.warn( "The following packages were installed and preseeded, " "but cannot be unconfigured: %s", unhandled) if len(to_config): util.subp(['dpkg-reconfigure', '--frontend=noninteractive'] + list(to_config), data=None, target=target, capture=True)
def quick_zero(path, partitions=True, exclusive=True): """ zero 1M at front, 1M at end, and 1M at front if this is a block device and partitions is true, then zero 1M at front and end of each partition. """ buflen = 1024 count = 1024 zero_size = buflen * count offsets = [0, -zero_size] is_block = is_block_device(path) if not (is_block or os.path.isfile(path)): raise ValueError("%s: not an existing file or block device", path) pt_names = [] if partitions and is_block: ptdata = sysfs_partition_data(path) for kname, ptnum, start, size in ptdata: pt_names.append((dev_path(kname), kname, ptnum)) pt_names.reverse() for (pt, kname, ptnum) in pt_names: LOG.debug('Wiping path: dev:%s kname:%s partnum:%s', pt, kname, ptnum) quick_zero(pt, partitions=False) LOG.debug("wiping 1M on %s at offsets %s", path, offsets) return zero_file_at_offsets(path, offsets, buflen=buflen, count=count, exclusive=exclusive)
def generate_sources_list(cfg, release, mirrors, target=None): """ generate_sources_list create a source.list file based on a custom or default template by replacing mirrors and release in the template """ default_mirrors = get_default_mirrors(distro.get_architecture(target)) aptsrc = "/etc/apt/sources.list" params = {'RELEASE': release} for k in mirrors: params[k] = mirrors[k] tmpl = cfg.get('sources_list', None) if tmpl is None: LOG.info( "No custom template provided, fall back to modify" "mirrors in %s on the target system", aptsrc) tmpl = util.load_file(paths.target_path(target, aptsrc)) # Strategy if no custom template was provided: # - Only replacing mirrors # - no reason to replace "release" as it is from target anyway # - The less we depend upon, the more stable this is against changes # - warn if expected original content wasn't found tmpl = mirror_to_placeholder(tmpl, default_mirrors['PRIMARY'], "$MIRROR") tmpl = mirror_to_placeholder(tmpl, default_mirrors['SECURITY'], "$SECURITY") orig = paths.target_path(target, aptsrc) if os.path.exists(orig): os.rename(orig, orig + ".curtin.old") rendered = util.render_string(tmpl, params) disabled = disable_suites(cfg.get('disable_suites'), rendered, release) util.write_file(paths.target_path(target, aptsrc), disabled, mode=0o644)
def lookup_disk(serial): """ Search for a disk by its serial number using /dev/disk/by-id/ """ # Get all volumes in /dev/disk/by-id/ containing the serial string. The # string specified can be either in the short or long serial format # hack, some serials have spaces, udev usually converts ' ' -> '_' serial_udev = serial.replace(' ', '_') LOG.info('Processing serial %s via udev to %s', serial, serial_udev) disks = list( filter(lambda x: serial_udev in x, os.listdir("/dev/disk/by-id/"))) if not disks or len(disks) < 1: raise ValueError("no disk with serial '%s' found" % serial_udev) # Sort by length and take the shortest path name, as the longer path names # will be the partitions on the disk. Then use os.path.realpath to # determine the path to the block device in /dev/ disks.sort(key=lambda x: len(x)) path = os.path.realpath("/dev/disk/by-id/%s" % disks[0]) if not os.path.exists(path): raise ValueError("path '%s' to block device for disk with serial '%s' \ does not exist" % (path, serial_udev)) return path