def ubuntu_core_curthooks(cfg, target=None): """ Ubuntu-Core 16 images cannot execute standard curthooks Instead we copy in any cloud-init configuration to the 'LABEL=writable' partition mounted at target. """ ubuntu_core_target = os.path.join(target, "system-data") cc_target = os.path.join(ubuntu_core_target, 'etc/cloud/cloud.cfg.d') cloudconfig = cfg.get('cloudconfig', None) if cloudconfig: # remove cloud-init.disabled, if found cloudinit_disable = os.path.join(ubuntu_core_target, 'etc/cloud/cloud-init.disabled') if os.path.exists(cloudinit_disable): util.del_file(cloudinit_disable) handle_cloudconfig(cloudconfig, base_dir=cc_target) netconfig = cfg.get('network', None) if netconfig: LOG.info('Writing network configuration') ubuntu_core_netconfig = os.path.join(cc_target, "50-curtin-networking.cfg") util.write_file(ubuntu_core_netconfig, content=config.dump_config({'network': netconfig}))
def set_sync_action(devpath, action=None, retries=None): assert_valid_devpath(devpath) if not action: return if not retries: retries = [0.2] * 60 sync_action = md_sysfs_attr_path(devpath, 'sync_action') if not os.path.exists(sync_action): # arrays without sync_action can't set values return LOG.info("mdadm set sync_action=%s on array %s", action, devpath) for (attempt, wait) in enumerate(retries): try: LOG.debug('mdadm: set sync_action %s attempt %s', devpath, attempt) val = md_sysfs_attr(devpath, 'sync_action').strip() LOG.debug('sync_action = "%s" ? "%s"', val, action) if val != action: LOG.debug("mdadm: setting array sync_action=%s", action) try: util.write_file(sync_action, content=action) except (IOError, OSError) as e: LOG.debug("mdadm: (non-fatal) write to %s failed %s", sync_action, e) else: LOG.debug("mdadm: set array sync_action=%s SUCCESS", action) return except util.ProcessExecutionError: LOG.debug("mdadm: set sync_action failed, retrying in %s seconds", wait) time.sleep(wait) pass
def apply_preserve_sources_list(target): # protect the just generated sources.list from cloud-init cloudfile = "/etc/cloud/cloud.cfg.d/curtin-preserve-sources.cfg" target_ver = distro.get_package_version('cloud-init', target=target) if not target_ver: LOG.info( "Attempt to read cloud-init version from target returned " "'%s', not writing preserve_sources_list config.", target_ver) return cfg = {'apt': {'preserve_sources_list': True}} if target_ver['major'] < 1: # anything cloud-init 0.X.X will get the old config key. cfg = {'apt_preserve_sources_list': True} try: util.write_file(paths.target_path(target, cloudfile), config.dump_config(cfg), mode=0o644) LOG.debug("Set preserve_sources_list to True in %s with: %s", cloudfile, cfg) except IOError: LOG.exception( "Failed to protect /etc/apt/sources.list from cloud-init in '%s'", cloudfile) raise
def generate_sources_list(cfg, release, mirrors, target=None): """ generate_sources_list create a source.list file based on a custom or default template by replacing mirrors and release in the template """ default_mirrors = get_default_mirrors(distro.get_architecture(target)) aptsrc = "/etc/apt/sources.list" params = {'RELEASE': release} for k in mirrors: params[k] = mirrors[k] tmpl = cfg.get('sources_list', None) if tmpl is None: LOG.info( "No custom template provided, fall back to modify" "mirrors in %s on the target system", aptsrc) tmpl = util.load_file(paths.target_path(target, aptsrc)) # Strategy if no custom template was provided: # - Only replacing mirrors # - no reason to replace "release" as it is from target anyway # - The less we depend upon, the more stable this is against changes # - warn if expected original content wasn't found tmpl = mirror_to_placeholder(tmpl, default_mirrors['PRIMARY'], "$MIRROR") tmpl = mirror_to_placeholder(tmpl, default_mirrors['SECURITY'], "$SECURITY") orig = paths.target_path(target, aptsrc) if os.path.exists(orig): os.rename(orig, orig + ".curtin.old") rendered = util.render_string(tmpl, params) disabled = disable_suites(cfg.get('disable_suites'), rendered, release) util.write_file(paths.target_path(target, aptsrc), disabled, mode=0o644)
def copy_mdadm_conf(mdadm_conf, target): if not mdadm_conf: LOG.warn("mdadm config must be specified, not copying") return LOG.info("copying mdadm.conf into target") shutil.copy(mdadm_conf, os.path.sep.join([target, 'etc/mdadm/mdadm.conf']))
def copy_iscsi_conf(nodes_dir, target): if not nodes_dir: LOG.warn("nodes directory must be specified, not copying") return LOG.info("copying iscsi nodes database into target") shutil.copytree(nodes_dir, os.path.sep.join([target, 'etc/iscsi/nodes']))
def lookup_disk(serial): """ Search for a disk by its serial number using /dev/disk/by-id/ """ # Get all volumes in /dev/disk/by-id/ containing the serial string. The # string specified can be either in the short or long serial format # hack, some serials have spaces, udev usually converts ' ' -> '_' serial_udev = serial.replace(' ', '_') LOG.info('Processing serial %s via udev to %s', serial, serial_udev) disks = list( filter(lambda x: serial_udev in x, os.listdir("/dev/disk/by-id/"))) if not disks or len(disks) < 1: raise ValueError("no disk with serial '%s' found" % serial_udev) # Sort by length and take the shortest path name, as the longer path names # will be the partitions on the disk. Then use os.path.realpath to # determine the path to the block device in /dev/ disks.sort(key=lambda x: len(x)) path = os.path.realpath("/dev/disk/by-id/%s" % disks[0]) if not os.path.exists(path): raise ValueError("path '%s' to block device for disk with serial '%s' \ does not exist" % (path, serial_udev)) return path
def lookup_disk(serial): """ Search for a disk by its serial number using /dev/disk/by-id/ """ # Get all volumes in /dev/disk/by-id/ containing the serial string. The # string specified can be either in the short or long serial format # hack, some serials have spaces, udev usually converts ' ' -> '_' serial_udev = serial.replace(' ', '_') LOG.info('Processing serial %s via udev to %s', serial, serial_udev) disks = list( filter(lambda x: serial_udev in x, os.listdir("/dev/disk/by-id/"))) if not disks or len(disks) < 1: raise ValueError("no disk with serial '%s' found" % serial_udev) # Sort by length and take the shortest path name, as the longer path names # will be the partitions on the disk. Then use os.path.realpath to # determine the path to the block device in /dev/ disks.sort(key=lambda x: len(x)) LOG.debug('lookup_disks found: %s', disks) path = os.path.realpath("/dev/disk/by-id/%s" % disks[0]) # /dev/dm-X if multipath.is_mpath_device(path): info = udevadm_info(path) path = os.path.join('/dev/mapper', info['DM_NAME']) # /dev/sdX elif multipath.is_mpath_member(path): mp_name = multipath.find_mpath_id_by_path(path) path = os.path.join('/dev/mapper', mp_name) if not os.path.exists(path): raise ValueError("path '%s' to block device for disk with serial '%s' \ does not exist" % (path, serial_udev)) LOG.debug('block.lookup_disk() returning path %s', path) return path
def parse_sb_version(device=None, sbdict=None): """ Parse bcache 'sb_version' field to integer if possible. """ if not device and not sbdict: raise ValueError('Supply a device name or bcache superblock dict') if not sbdict: sbdict = superblock_asdict(device=device) if not sbdict: LOG.info('Cannot parse sb.version without bcache superblock') return None if not isinstance(sbdict, dict): raise ValueError('Invalid sbdict type, must be dict') sb_version = sbdict.get('sb.version') try: # 'sb.version': '1 [backing device]' # 'sb.version': '3 [caching device]' version = int(sb_version.split()[0]) except (AttributeError, ValueError): LOG.warning( "Failed to parse bcache 'sb.version' field" " as integer: %s", sb_version) raise return version
def _wipe_superblock(blockdev, exclusive=True, strict=True): """ No checks, just call wipe_volume """ retries = [1, 3, 5, 7] LOG.info('wiping superblock on %s', blockdev) for attempt, wait in enumerate(retries): LOG.debug('wiping %s attempt %s/%s', blockdev, attempt + 1, len(retries)) try: block.wipe_volume(blockdev, mode='superblock', exclusive=exclusive, strict=strict) LOG.debug('successfully wiped device %s on attempt %s/%s', blockdev, attempt + 1, len(retries)) return except OSError: if attempt + 1 >= len(retries): raise else: LOG.debug( "wiping device '%s' failed on attempt" " %s/%s. sleeping %ss before retry", blockdev, attempt + 1, len(retries), wait) time.sleep(wait)
def apt_command(args): """ Main entry point for curtin apt-config standalone command This does not read the global config as handled by curthooks, but instead one can specify a different "target" and a new cfg via --config """ cfg = config.load_command_config(args, {}) if args.target is not None: target = args.target else: state = util.load_command_environment() target = state['target'] if target is None: sys.stderr.write("Unable to find target. " "Use --target or set TARGET_MOUNT_POINT\n") sys.exit(2) apt_cfg = cfg.get("apt") # if no apt config section is available, do nothing if apt_cfg is not None: LOG.debug("Handling apt to target %s with config %s", target, apt_cfg) try: with util.ChrootableTarget(target, sys_resolvconf=True): handle_apt(apt_cfg, target) except (RuntimeError, TypeError, ValueError, IOError): LOG.exception("Failed to configure apt features '%s'", apt_cfg) sys.exit(1) else: LOG.info("No apt config provided, skipping") sys.exit(0)
def mdadm_remove(devpath): assert_valid_devpath(devpath) LOG.info("mdadm removing: %s" % devpath) out, err = util.subp(["mdadm", "--remove", devpath], rcs=[0], capture=True) LOG.debug("mdadm remove:\n%s\n%s", out, err)
def shutdown_lvm(device): """ Shutdown specified lvm device. """ device = block.sys_block_path(device) # lvm devices have a dm directory that containes a file 'name' containing # '{volume group}-{logical volume}'. The volume can be freed using lvremove name_file = os.path.join(device, 'dm', 'name') lvm_name = util.load_file(name_file).strip() (vg_name, lv_name) = lvm.split_lvm_name(lvm_name) vg_lv_name = "%s/%s" % (vg_name, lv_name) devname = "/dev/" + vg_lv_name # wipe contents of the logical volume first LOG.info('Wiping lvm logical volume: %s', devname) block.quick_zero(devname, partitions=False) # remove the logical volume LOG.debug('using "lvremove" on %s', vg_lv_name) util.subp(['lvremove', '--force', '--force', vg_lv_name]) # if that was the last lvol in the volgroup, get rid of volgroup if len(lvm.get_lvols_in_volgroup(vg_name)) == 0: pvols = lvm.get_pvols_in_volgroup(vg_name) util.subp(['vgremove', '--force', '--force', vg_name], rcs=[0, 5]) # wipe the underlying physical volumes for pv in pvols: LOG.info('Wiping lvm physical volume: %s', pv) block.quick_zero(pv, partitions=False) # refresh lvmetad lvm.lvm_scan()
def fail_device(mddev, arraydev): assert_valid_devpath(mddev) LOG.info("mdadm mark faulty: %s in array %s", arraydev, mddev) out, err = util.subp(["mdadm", "--fail", mddev, arraydev], rcs=[0], capture=True) LOG.debug("mdadm mark faulty:\n%s\n%s", out, err)
def restart_iscsi_service(): LOG.info('restarting iscsi service') if util.uses_systemd(): cmd = ['systemctl', 'reload-or-restart', 'open-iscsi'] else: cmd = ['service', 'open-iscsi', 'restart'] util.subp(cmd, capture=True)
def remove_device(mddev, arraydev): assert_valid_devpath(mddev) LOG.info("mdadm remove %s from array %s", arraydev, mddev) out, err = util.subp(["mdadm", "--remove", mddev, arraydev], rcs=[0], capture=True) LOG.debug("mdadm remove:\n%s\n%s", out, err)
def attach_backing_to_cacheset(backing_device, cache_device, cset_uuid): LOG.info("Attaching backing device to cacheset: " "{} -> {} cset.uuid: {}".format(backing_device, cache_device, cset_uuid)) backing_device_sysfs = sys_block_path(backing_device) attach = os.path.join(backing_device_sysfs, "bcache", "attach") util.write_file(attach, cset_uuid, mode=None)
def load_and_validate(config_path): """Load and validate storage config file.""" config = curtin_config.load_config(config_path) if 'storage' not in config: LOG.info('Skipping %s, missing "storage" key' % config_path) return return validate_config(config.get('storage'), sourcefile=config_path)
def zero_device(devpath): assert_valid_devpath(devpath) LOG.info("mdadm zero superblock on %s", devpath) out, err = util.subp(["mdadm", "--zero-superblock", devpath], rcs=[0], capture=True) LOG.debug("mdadm zero superblock:\n%s\n%s", out, err)
def mdadm_stop(devpath, retries=None): assert_valid_devpath(devpath) if not retries: retries = [0.2] * 60 sync_action = md_sysfs_attr_path(devpath, 'sync_action') sync_max = md_sysfs_attr_path(devpath, 'sync_max') sync_min = md_sysfs_attr_path(devpath, 'sync_min') LOG.info("mdadm stopping: %s" % devpath) for (attempt, wait) in enumerate(retries): try: LOG.debug('mdadm: stop on %s attempt %s', devpath, attempt) # An array in 'resync' state may not be stoppable, attempt to # cancel an ongoing resync val = md_sysfs_attr(devpath, 'sync_action') LOG.debug('%s/sync_max = %s', sync_action, val) if val != "idle": LOG.debug("mdadm: setting array sync_action=idle") try: util.write_file(sync_action, content="idle") except (IOError, OSError) as e: LOG.debug("mdadm: (non-fatal) write to %s failed %s", sync_action, e) # Setting the sync_{max,min} may can help prevent the array from # changing back to 'resync' which may prevent the array from being # stopped val = md_sysfs_attr(devpath, 'sync_max') LOG.debug('%s/sync_max = %s', sync_max, val) if val != "0": LOG.debug("mdadm: setting array sync_{min,max}=0") try: for sync_file in [sync_max, sync_min]: util.write_file(sync_file, content="0") except (IOError, OSError) as e: LOG.debug('mdadm: (non-fatal) write to %s failed %s', sync_file, e) # one wonders why this command doesn't do any of the above itself? out, err = util.subp(["mdadm", "--manage", "--stop", devpath], capture=True) LOG.debug("mdadm stop command output:\n%s\n%s", out, err) LOG.info("mdadm: successfully stopped %s after %s attempt(s)", devpath, attempt+1) return except util.ProcessExecutionError: LOG.warning("mdadm stop failed, retrying ") if os.path.isfile('/proc/mdstat'): LOG.critical("/proc/mdstat:\n%s", util.load_file('/proc/mdstat')) LOG.debug("mdadm: stop failed, retrying in %s seconds", wait) time.sleep(wait) pass raise OSError('Failed to stop mdadm device %s', devpath)
def do_apt_config(cfg, target): cfg = apt_config.translate_old_apt_features(cfg) apt_cfg = cfg.get("apt") if apt_cfg is not None: LOG.info("curthooks handling apt to target %s with config %s", target, apt_cfg) apt_config.handle_apt(apt_cfg, target) else: LOG.info("No apt config provided, skipping")
def stop_cacheset(cset_uuid): """stop specified bcache cacheset.""" # we may be called with a full path or just the uuid if cset_uuid.startswith('/sys/fs/bcache/'): cset_device = cset_uuid else: cset_device = "/sys/fs/bcache/%s" % cset_uuid LOG.info('Stopping bcache set device: %s', cset_device) _stop_device(cset_device)
def save_iscsi_config(iscsi_disk): state = util.load_command_environment() # A nodes directory will be created in the same directory as the # fstab in the configuration. This will then be copied onto the # system later if state['fstab']: target_nodes_location = target_nodes_directory(state, iscsi_disk) shutil.copy(iscsi_disk.etciscsi_nodefile, target_nodes_location) else: LOG.info("fstab configuration is not present in environment, " "so cannot locate an appropriate directory to write " "iSCSI node file in so not writing iSCSI node file")
def clear_holders(base_paths, try_preserve=False): """ Clear all storage layers depending on the devices specified in 'base_paths' A single device or list of devices can be specified. Device paths can be specified either as paths in /dev or /sys/block Will throw OSError if any holders could not be shut down """ # handle single path if not isinstance(base_paths, (list, tuple)): base_paths = [base_paths] # get current holders and plan how to shut them down holder_trees = [gen_holders_tree(path) for path in base_paths] LOG.info('Current device storage tree:\n%s', '\n'.join(format_holders_tree(tree) for tree in holder_trees)) ordered_devs = plan_shutdown_holder_trees(holder_trees) LOG.info('Shutdown Plan:\n%s', "\n".join(map(str, ordered_devs))) # run shutdown functions for dev_info in ordered_devs: dev_type = DEV_TYPES.get(dev_info['dev_type']) shutdown_function = dev_type.get('shutdown') if not shutdown_function: continue if try_preserve and shutdown_function in DATA_DESTROYING_HANDLERS: LOG.info('shutdown function for holder type: %s is destructive. ' 'attempting to preserve data, so skipping' % dev_info['dev_type']) continue if os.path.exists(dev_info['device']): LOG.info("shutdown running on holder type: '%s' syspath: '%s'", dev_info['dev_type'], dev_info['device']) shutdown_function(dev_info['device'])
def unmount_main(args): """ run util.umount(target, recursive=True) """ if args.target is None: msg = "Missing target. Please provide target path parameter" raise ValueError(msg) if not os.path.exists(args.target): msg = "Cannot unmount target path %s: it does not exist" % args.target raise util.FileMissingError(msg) LOG.info("Unmounting devices from target path: %s", args.target) recursive_mode = not args.disable_recursive_mounts util.do_umount(args.target, recursive=recursive_mode)
def render_netconfig_passthrough(target, netconfig=None): """ Extract original network config and pass it through to cloud-init in target """ cc = 'etc/cloud/cloud.cfg.d/50-curtin-networking.cfg' if not isinstance(netconfig, dict): raise ValueError('Network config must be a dictionary') if 'network' not in netconfig: raise ValueError("Network config must contain the key 'network'") content = config.dump_config(netconfig) cc_passthrough = os.path.sep.join((target, cc,)) LOG.info('Writing network config to %s: %s', cc, cc_passthrough) util.write_file(cc_passthrough, content=content)
def generate_sources_list(cfg, release, mirrors, target=None): """ generate_sources_list create a source.list file based on a custom or default template by replacing mirrors and release in the template """ default_mirrors = get_default_mirrors(util.get_architecture(target)) aptsrc = "/etc/apt/sources.list" params = {'RELEASE': release} for k in mirrors: params[k] = mirrors[k] tmpl = cfg.get('sources_list', None) if tmpl is None: LOG.info( "No custom template provided, fall back to modify" "mirrors in %s on the target system", aptsrc) tmpl = util.load_file(util.target_path(target, aptsrc)) # Strategy if no custom template was provided: # - Only replacing mirrors # - no reason to replace "release" as it is from target anyway # - The less we depend upon, the more stable this is against changes # - warn if expected original content wasn't found tmpl = mirror_to_placeholder(tmpl, default_mirrors['PRIMARY'], "$MIRROR") tmpl = mirror_to_placeholder(tmpl, default_mirrors['SECURITY'], "$SECURITY") orig = util.target_path(target, aptsrc) if os.path.exists(orig): os.rename(orig, orig + ".curtin.old") rendered = util.render_string(tmpl, params) disabled = disable_suites(cfg.get('disable_suites'), rendered, release) util.write_file(util.target_path(target, aptsrc), disabled, mode=0o644) # protect the just generated sources.list from cloud-init cloudfile = "/etc/cloud/cloud.cfg.d/curtin-preserve-sources.cfg" # this has to work with older cloud-init as well, so use old key cloudconf = yaml.dump({'apt_preserve_sources_list': True}, indent=1) try: util.write_file(util.target_path(target, cloudfile), cloudconf, mode=0o644) except IOError: LOG.exception("Failed to protect source.list from cloud-init in (%s)", util.target_path(target, cloudfile)) raise
def apply_networking(target, state): netconf = state.get('network_config') interfaces = state.get('interfaces') def is_valid_src(infile): with open(infile, 'r') as fp: content = fp.read() if len(content.split('\n')) > 1: return True return False if is_valid_src(netconf): LOG.info("applying network_config") apply_net.apply_net(target, network_state=None, network_config=netconf) else: LOG.debug("copying interfaces") copy_interfaces(interfaces, target)
def activate_volgroups(): """ Activate available volgroups and logical volumes within. # found % vgchange -ay 1 logical volume(s) in volume group "vg1sdd" now active # none found (no output) % vgchange -ay """ # vgchange handles syncing with udev by default # see man 8 vgchange and flag --noudevsync out, _ = util.subp(['vgchange', '--activate=y'], capture=True) if out: LOG.info(out)
def shutdown_mdadm(device): """ Shutdown specified mdadm device. """ blockdev = block.sysfs_to_devpath(device) LOG.info('Wiping superblock on raid device: %s', device) _wipe_superblock(blockdev, exclusive=False) md_devs = (mdadm.md_get_devices_list(blockdev) + mdadm.md_get_spares_list(blockdev)) mdadm.set_sync_action(blockdev, action="idle") mdadm.set_sync_action(blockdev, action="frozen") for mddev in md_devs: try: mdadm.fail_device(blockdev, mddev) mdadm.remove_device(blockdev, mddev) except util.ProcessExecutionError as e: LOG.debug('Non-fatal error clearing raid array: %s', e.stderr) pass LOG.debug('using mdadm.mdadm_stop on dev: %s', blockdev) mdadm.mdadm_stop(blockdev) for mddev in md_devs: mdadm.zero_device(mddev) # mdadm stop operation is asynchronous so we must wait for the kernel to # release resources. For more details see LP: #1682456 try: for wait in MDADM_RELEASE_RETRIES: if mdadm.md_present(block.path_to_kname(blockdev)): time.sleep(wait) else: LOG.debug('%s has been removed', blockdev) break if mdadm.md_present(block.path_to_kname(blockdev)): raise OSError('Timeout exceeded for removal of %s', blockdev) except OSError: LOG.critical('Failed to stop mdadm device %s', device) if os.path.exists('/proc/mdstat'): LOG.critical("/proc/mdstat:\n%s", util.load_file('/proc/mdstat')) raise