Esempio n. 1
0
def apt_command(args):
    """ Main entry point for curtin apt-config standalone command
        This does not read the global config as handled by curthooks, but
        instead one can specify a different "target" and a new cfg via --config
        """
    cfg = config.load_command_config(args, {})

    if args.target is not None:
        target = args.target
    else:
        state = util.load_command_environment()
        target = state['target']

    if target is None:
        sys.stderr.write("Unable to find target.  "
                         "Use --target or set TARGET_MOUNT_POINT\n")
        sys.exit(2)

    apt_cfg = cfg.get("apt")
    # if no apt config section is available, do nothing
    if apt_cfg is not None:
        LOG.debug("Handling apt to target %s with config %s", target, apt_cfg)
        try:
            with util.ChrootableTarget(target, sys_resolvconf=True):
                handle_apt(apt_cfg, target)
        except (RuntimeError, TypeError, ValueError, IOError):
            LOG.exception("Failed to configure apt features '%s'", apt_cfg)
            sys.exit(1)
    else:
        LOG.info("No apt config provided, skipping")

    sys.exit(0)
Esempio n. 2
0
def apply_preserve_sources_list(target):
    # protect the just generated sources.list from cloud-init
    cloudfile = "/etc/cloud/cloud.cfg.d/curtin-preserve-sources.cfg"

    target_ver = distro.get_package_version('cloud-init', target=target)
    if not target_ver:
        LOG.info(
            "Attempt to read cloud-init version from target returned "
            "'%s', not writing preserve_sources_list config.", target_ver)
        return

    cfg = {'apt': {'preserve_sources_list': True}}
    if target_ver['major'] < 1:
        # anything cloud-init 0.X.X will get the old config key.
        cfg = {'apt_preserve_sources_list': True}

    try:
        util.write_file(paths.target_path(target, cloudfile),
                        config.dump_config(cfg),
                        mode=0o644)
        LOG.debug("Set preserve_sources_list to True in %s with: %s",
                  cloudfile, cfg)
    except IOError:
        LOG.exception(
            "Failed to protect /etc/apt/sources.list from cloud-init in '%s'",
            cloudfile)
        raise
Esempio n. 3
0
def add_apt_key_raw(key, target=None):
    """
    actual adding of a key as defined in key argument
    to the system
    """
    LOG.debug("Adding key:\n'%s'", key)
    try:
        util.subp(['apt-key', 'add', '-'], data=key.encode(), target=target)
    except util.ProcessExecutionError:
        LOG.exception("failed to add apt GPG Key to apt keyring")
        raise
Esempio n. 4
0
def discover():
    probe_data = _discover_get_probert_data()
    if 'storage' not in probe_data:
        raise ValueError('Probing storage failed')

    LOG.debug('Extracting storage config from discovered devices')
    try:
        return storage_config.extract_storage_config(probe_data.get('storage'))
    except ImportError as e:
        LOG.exception(e)

    return {}
Esempio n. 5
0
def generate_sources_list(cfg, release, mirrors, target=None):
    """ generate_sources_list
        create a source.list file based on a custom or default template
        by replacing mirrors and release in the template
    """
    default_mirrors = get_default_mirrors(util.get_architecture(target))
    aptsrc = "/etc/apt/sources.list"
    params = {'RELEASE': release}
    for k in mirrors:
        params[k] = mirrors[k]

    tmpl = cfg.get('sources_list', None)
    if tmpl is None:
        LOG.info(
            "No custom template provided, fall back to modify"
            "mirrors in %s on the target system", aptsrc)
        tmpl = util.load_file(util.target_path(target, aptsrc))
        # Strategy if no custom template was provided:
        # - Only replacing mirrors
        # - no reason to replace "release" as it is from target anyway
        # - The less we depend upon, the more stable this is against changes
        # - warn if expected original content wasn't found
        tmpl = mirror_to_placeholder(tmpl, default_mirrors['PRIMARY'],
                                     "$MIRROR")
        tmpl = mirror_to_placeholder(tmpl, default_mirrors['SECURITY'],
                                     "$SECURITY")

    orig = util.target_path(target, aptsrc)
    if os.path.exists(orig):
        os.rename(orig, orig + ".curtin.old")

    rendered = util.render_string(tmpl, params)
    disabled = disable_suites(cfg.get('disable_suites'), rendered, release)
    util.write_file(util.target_path(target, aptsrc), disabled, mode=0o644)

    # protect the just generated sources.list from cloud-init
    cloudfile = "/etc/cloud/cloud.cfg.d/curtin-preserve-sources.cfg"
    # this has to work with older cloud-init as well, so use old key
    cloudconf = yaml.dump({'apt_preserve_sources_list': True}, indent=1)
    try:
        util.write_file(util.target_path(target, cloudfile),
                        cloudconf,
                        mode=0o644)
    except IOError:
        LOG.exception("Failed to protect source.list from cloud-init in (%s)",
                      util.target_path(target, cloudfile))
        raise
Esempio n. 6
0
def sfdisk_info(devpath):
    ''' returns dict of sfdisk info about disk partitions
    {
      "label": "gpt",
      "id": "877716F7-31D0-4D56-A1ED-4D566EFE418E",
      "device": "/dev/vda",
      "unit": "sectors",
      "firstlba": 34,
      "lastlba": 41943006,
      "partitions": [
         {"node": "/dev/vda1", "start": 227328, "size": 41715679,
          "type": "0FC63DAF-8483-4772-8E79-3D69D8477DE4",
          "uuid": "60541CAF-E2AC-48CD-BF89-AF16051C833F"},
      ]
    }
    {
      "label":"dos",
      "id":"0xb0dbdde1",
      "device":"/dev/vdb",
      "unit":"sectors",
      "partitions": [
         {"node":"/dev/vdb1", "start":2048, "size":8388608,
          "type":"83", "bootable":true},
         {"node":"/dev/vdb2", "start":8390656, "size":8388608, "type":"83"},
         {"node":"/dev/vdb3", "start":16779264, "size":62914560, "type":"5"},
         {"node":"/dev/vdb5", "start":16781312, "size":31457280, "type":"83"},
         {"node":"/dev/vdb6", "start":48240640, "size":10485760, "type":"83"},
         {"node":"/dev/vdb7", "start":58728448, "size":20965376, "type":"83"}
      ]
    }
    '''
    (parent, partnum) = get_blockdev_for_partition(devpath)
    try:
        (out, _err) = util.subp(['sfdisk', '--json', parent], capture=True)
    except util.ProcessExecutionError as e:
        out = None
        LOG.exception(e)
    if out is not None:
        return util.load_json(out).get('partitiontable', {})

    return {}
Esempio n. 7
0
def handle_apt(cfg, target=None):
    """ handle_apt
        process the config for apt_config. This can be called from
        curthooks if a global apt config was provided or via the "apt"
        standalone command.
    """
    release = distro.lsb_release(target=target)['codename']
    arch = distro.get_architecture(target)
    mirrors = find_apt_mirror_info(cfg, arch)
    LOG.debug("Apt Mirror info: %s", mirrors)

    apply_debconf_selections(cfg, target)

    if not config.value_as_boolean(cfg.get('preserve_sources_list', True)):
        generate_sources_list(cfg, release, mirrors, target)
        apply_preserve_sources_list(target)
        rename_apt_lists(mirrors, target)

    try:
        apply_apt_proxy_config(cfg, target + APT_PROXY_FN,
                               target + APT_CONFIG_FN)
    except (IOError, OSError):
        LOG.exception("Failed to apply proxy or apt config info:")

    # Process 'apt_source -> sources {dict}'
    if 'sources' in cfg:
        params = mirrors
        params['RELEASE'] = release
        params['MIRROR'] = mirrors["MIRROR"]

        matcher = None
        matchcfg = cfg.get('add_apt_repo_match', ADD_APT_REPO_MATCH)
        if matchcfg:
            matcher = re.compile(matchcfg).search

        add_apt_sources(cfg['sources'],
                        target,
                        template_params=params,
                        aa_repo_match=matcher)
Esempio n. 8
0
def discover():
    try:
        LOG.debug('Importing probert prober')
        from probert import prober
    except Exception:
        LOG.error('Failed to import probert, discover disabled')
        return {}

    probe = prober.Prober()
    LOG.debug('Probing system for storage devices')
    probe.probe_storage()
    probe_data = probe.get_results()
    if 'storage' not in probe_data:
        raise ValueError('Probing storage failed')

    LOG.debug('Extracting storage config from discovered devices')
    try:
        return storage_config.extract_storage_config(probe_data.get('storage'))
    except ImportError as e:
        LOG.exception(e)

    return {}
Esempio n. 9
0
def mdadm_examine(devpath, export=MDADM_USE_EXPORT):
    ''' exectute mdadm --examine, and optionally
        append --export.
        Parse and return dict of key=val from output'''
    assert_valid_devpath(devpath)

    cmd = ["mdadm", "--examine"]
    if export:
        cmd.extend(["--export"])

    cmd.extend([devpath])
    try:
        (out, _err) = util.subp(cmd, capture=True)
    except CalledProcessError:
        LOG.exception('Error: not a valid md device: ' + devpath)
        return {}

    if export:
        data = __mdadm_export_to_dict(out)
    else:
        data = __upgrade_detail_dict(__mdadm_detail_to_dict(out))

    return data
Esempio n. 10
0
def exclusive_open(path, exclusive=True):
    """
    Obtain an exclusive file-handle to the file/device specified unless
    caller specifics exclusive=False.
    """
    mode = 'rb+'
    fd = None
    if not os.path.exists(path):
        raise ValueError("No such file at path: %s" % path)

    flags = os.O_RDWR
    if exclusive:
        flags += os.O_EXCL
    try:
        fd = os.open(path, flags)
        try:
            fd_needs_closing = True
            with os.fdopen(fd, mode) as fo:
                yield fo
            fd_needs_closing = False
        except OSError:
            LOG.exception("Failed to create file-object from fd")
            raise
        finally:
            # python2 leaves fd open if there os.fdopen fails
            if fd_needs_closing and sys.version_info.major == 2:
                os.close(fd)
    except OSError:
        LOG.error("Failed to exclusively open path: %s", path)
        holders = get_holders(path)
        LOG.error('Device holders with exclusive access: %s', holders)
        mount_points = util.list_device_mounts(path)
        LOG.error('Device mounts: %s', mount_points)
        fusers = util.fuser_mount(path)
        LOG.error('Possible users of %s:\n%s', path, fusers)
        raise
Esempio n. 11
0
def sfdisk_info(devpath):
    ''' returns dict of sfdisk info about disk partitions
    {
     "/dev/vda1": {
        "size": "20744159",
        "start": "227328",
        "type": "0FC63DAF-8483-4772-8E79-3D69D8477DE4",
        "uuid": "29983666-2A66-4F14-8533-7CE13B715462"
     },
     "device": "/dev/vda",
     "first-lba": "34",
     "label": "gpt",
     "label-id": "E94FCCFE-953D-4D4B-9511-451BBCC17A9A",
     "last-lba": "20971486",
     "unit": "sectors"
    }
    '''
    (parent, partnum) = get_blockdev_for_partition(devpath)
    try:
        (out, _err) = util.subp(['sfdisk', '--dump', parent], capture=True)
    except util.ProcessExecutionError as e:
        LOG.exception(e)
        out = ""
    return _sfdisk_parse(out.splitlines())
Esempio n. 12
0
def add_apt_sources(srcdict,
                    target=None,
                    template_params=None,
                    aa_repo_match=None):
    """
    add entries in /etc/apt/sources.list.d for each abbreviated
    sources.list entry in 'srcdict'.  When rendering template, also
    include the values in dictionary searchList
    """
    if template_params is None:
        template_params = {}

    if aa_repo_match is None:
        raise ValueError('did not get a valid repo matcher')

    if not isinstance(srcdict, dict):
        raise TypeError('unknown apt format: %s' % (srcdict))

    for filename in srcdict:
        ent = srcdict[filename]
        if 'filename' not in ent:
            ent['filename'] = filename

        add_apt_key(ent['filename'], ent, target)

        if 'source' not in ent:
            continue
        source = ent['source']
        if source == 'proposed':
            source = APT_SOURCES_PROPOSED
        source = util.render_string(source, template_params)

        if not ent['filename'].startswith("/"):
            ent['filename'] = os.path.join("/etc/apt/sources.list.d/",
                                           ent['filename'])
        if not ent['filename'].endswith(".list"):
            ent['filename'] += ".list"

        if aa_repo_match(source):
            with util.ChrootableTarget(target,
                                       sys_resolvconf=True) as in_chroot:
                try:
                    in_chroot.subp(["add-apt-repository", source],
                                   retries=(1, 2, 5, 10))
                except util.ProcessExecutionError:
                    LOG.exception("add-apt-repository failed.")
                    raise
            continue

        sourcefn = paths.target_path(target, ent['filename'])
        try:
            contents = "%s\n" % (source)
            util.write_file(sourcefn, contents, omode="a")
        except IOError as detail:
            LOG.exception("failed write to file %s: %s", sourcefn, detail)
            raise

    distro.apt_update(target=target,
                      force=True,
                      comment="apt-source changed config")

    return
Esempio n. 13
0
def extract_storage_config(probe_data, strict=False):
    """ Examine a probert storage dictionary and extract a curtin
        storage configuration that would recreate all of the
        storage devices present in the provided data.

        Returns a storage config dictionary
    """
    convert_map = {
        'bcache': BcacheParser,
        'blockdev': BlockdevParser,
        'dmcrypt': DmcryptParser,
        'filesystem': FilesystemParser,
        'lvm': LvmParser,
        'raid': RaidParser,
        'mount': MountParser,
        'zfs': ZfsParser,
    }
    configs = []
    errors = []
    LOG.debug('Extracting storage config from probe data')
    for ptype, pname in convert_map.items():
        parser = pname(probe_data)
        found_cfgs, found_errs = parser.parse()
        configs.extend(found_cfgs)
        errors.extend(found_errs)

    LOG.debug('Sorting extracted configurations')
    disk = [cfg for cfg in configs if cfg.get('type') == 'disk']
    part = [cfg for cfg in configs if cfg.get('type') == 'partition']
    format = [cfg for cfg in configs if cfg.get('type') == 'format']
    lvols = [cfg for cfg in configs if cfg.get('type') == 'lvm_volgroup']
    lparts = [cfg for cfg in configs if cfg.get('type') == 'lvm_partition']
    raids = [cfg for cfg in configs if cfg.get('type') == 'raid']
    dmcrypts = [cfg for cfg in configs if cfg.get('type') == 'dm_crypt']
    mounts = [cfg for cfg in configs if cfg.get('type') == 'mount']
    bcache = [cfg for cfg in configs if cfg.get('type') == 'bcache']
    zpool = [cfg for cfg in configs if cfg.get('type') == 'zpool']
    zfs = [cfg for cfg in configs if cfg.get('type') == 'zfs']

    ordered = (disk + part + format + lvols + lparts + raids + dmcrypts +
               mounts + bcache + zpool + zfs)

    final_config = {'storage': {'version': 1, 'config': ordered}}
    try:
        LOG.info('Validating extracted storage config components')
        validate_config(final_config['storage'])
    except ValueError as e:
        errors.append(e)

    for e in errors:
        LOG.exception('Validation error: %s\n' % e)
    if len(errors) > 0:
        errmsg = "Extract storage config does not validate."
        LOG.warning(errmsg)
        if strict:
            raise RuntimeError(errmsg)

    # build and merge probed data into a valid storage config by
    # generating a config tree for each item in the probed data
    # and then merging the trees, which resolves dependencies
    # and produced a dependency ordered storage config
    LOG.debug(
        "Extracted (unmerged) storage config:\n%s",
        yaml.dump({'storage': ordered}, indent=4, default_flow_style=False))

    LOG.debug("Generating storage config dependencies")
    ctrees = []
    for cfg in ordered:
        tree = get_config_tree(cfg.get('id'), final_config)
        ctrees.append(tree)

    LOG.debug("Merging storage config dependencies")
    merged_config = {
        'version': 1,
        'config': merge_config_trees_to_list(ctrees)
    }
    LOG.debug(
        "Merged storage config:\n%s",
        yaml.dump({'storage': merged_config},
                  indent=4,
                  default_flow_style=False))
    return {'storage': merged_config}