Example #1
0
def start_build(cfg,
                dnflock,
                gitlock,
                branch,
                recipe_name,
                compose_type,
                test_mode=0):
    """ Start the build

    :param cfg: Configuration object
    :type cfg: ComposerConfig
    :param dnflock: Lock and YumBase for depsolving
    :type dnflock: YumLock
    :param recipe: The recipe to build
    :type recipe: str
    :param compose_type: The type of output to create from the recipe
    :type compose_type: str
    :returns: Unique ID for the build that can be used to track its status
    :rtype: str
    """
    share_dir = cfg.get("composer", "share_dir")
    lib_dir = cfg.get("composer", "lib_dir")

    # Make sure compose_type is valid
    if compose_type not in compose_types(share_dir):
        raise RuntimeError("Invalid compose type (%s), must be one of %s" %
                           (compose_type, compose_types(share_dir)))

    with gitlock.lock:
        (commit_id, recipe) = read_recipe_and_id(gitlock.repo, branch,
                                                 recipe_name)

    # Combine modules and packages and depsolve the list
    # TODO include the version/glob in the depsolving
    module_nver = recipe.module_nver
    package_nver = recipe.package_nver
    projects = sorted(set(module_nver + package_nver),
                      key=lambda p: p[0].lower())
    deps = []
    try:
        # This can possibly update repodata and reset the YumBase object.
        with dnflock.lock_check:
            (installed_size,
             deps) = projects_depsolve_with_size(dnflock.dbo,
                                                 projects,
                                                 recipe.group_names,
                                                 with_core=False)
    except ProjectsError as e:
        log.error("start_build depsolve: %s", str(e))
        raise RuntimeError("Problem depsolving %s: %s" %
                           (recipe["name"], str(e)))

    # Read the kickstart template for this type
    ks_template_path = joinpaths(share_dir, "composer", compose_type) + ".ks"
    ks_template = open(ks_template_path, "r").read()

    # How much space will the packages in the default template take?
    ks_version = makeVersion()
    ks = KickstartParser(ks_version,
                         errorsAreFatal=False,
                         missingIncludeIsFatal=False)
    ks.readKickstartFromString(ks_template + "\n%end\n")
    pkgs = [(name, "*") for name in ks.handler.packages.packageList]
    grps = [grp.name for grp in ks.handler.packages.groupList]
    try:
        with dnflock.lock:
            (template_size, _) = projects_depsolve_with_size(
                dnflock.dbo,
                pkgs,
                grps,
                with_core=not ks.handler.packages.nocore)
    except ProjectsError as e:
        log.error("start_build depsolve: %s", str(e))
        raise RuntimeError("Problem depsolving %s: %s" %
                           (recipe["name"], str(e)))
    log.debug("installed_size = %d, template_size=%d", installed_size,
              template_size)

    # Minimum LMC disk size is 1GiB, and anaconda bumps the estimated size up by 10% (which doesn't always work).
    # XXX BUT Anaconda has a bug, it won't execute a kickstart on a disk smaller than 3000 MB
    # XXX There is an upstream patch pending, but until then, use that as the minimum
    installed_size = max(3e9, int((installed_size + template_size))) * 1.2
    log.debug("/ partition size = %d", installed_size)

    # Create the results directory
    build_id = str(uuid4())
    results_dir = joinpaths(lib_dir, "results", build_id)
    os.makedirs(results_dir)

    # Write the recipe commit hash
    commit_path = joinpaths(results_dir, "COMMIT")
    with open(commit_path, "w") as f:
        f.write(commit_id)

    # Write the original recipe
    recipe_path = joinpaths(results_dir, "blueprint.toml")
    with open(recipe_path, "w") as f:
        f.write(recipe.toml())

    # Write the frozen recipe
    frozen_recipe = recipe.freeze(deps)
    recipe_path = joinpaths(results_dir, "frozen.toml")
    with open(recipe_path, "w") as f:
        f.write(frozen_recipe.toml())

    # Write out the dependencies to the results dir
    deps_path = joinpaths(results_dir, "deps.toml")
    with open(deps_path, "w") as f:
        f.write(toml.dumps({"packages": deps}))

    # Save a copy of the original kickstart
    shutil.copy(ks_template_path, results_dir)

    with dnflock.lock:
        repos = list(dnflock.dbo.repos.iter_enabled())
    if not repos:
        raise RuntimeError("No enabled repos, canceling build.")

    # Create the final kickstart with repos and package list
    ks_path = joinpaths(results_dir, "final-kickstart.ks")
    with open(ks_path, "w") as f:
        ks_url = repo_to_ks(repos[0], "url")
        log.debug("url = %s", ks_url)
        f.write('url %s\n' % ks_url)
        for idx, r in enumerate(repos[1:]):
            ks_repo = repo_to_ks(r, "baseurl")
            log.debug("repo composer-%s = %s", idx, ks_repo)
            f.write('repo --name="composer-%s" %s\n' % (idx, ks_repo))

        # Setup the disk for booting
        # TODO Add GPT and UEFI boot support
        f.write('clearpart --all --initlabel\n')

        # Write the root partition and it's size in MB (rounded up)
        f.write('part / --size=%d\n' % ceil(installed_size / 1024**2))

        f.write(ks_template)

        for d in deps:
            f.write(dep_nevra(d) + "\n")
        f.write("%end\n")

        add_customizations(f, recipe)

    # Setup the config to pass to novirt_install
    log_dir = joinpaths(results_dir, "logs/")
    cfg_args = compose_args(compose_type)

    # Get the title, project, and release version from the host
    if not os.path.exists("/etc/os-release"):
        log.error(
            "/etc/os-release is missing, cannot determine product or release version"
        )
    os_release = flatconfig("/etc/os-release")

    log.debug("os_release = %s", dict(os_release.items()))

    cfg_args["title"] = os_release.get("PRETTY_NAME", "")
    cfg_args["project"] = os_release.get("NAME", "")
    cfg_args["releasever"] = os_release.get("VERSION_ID", "")
    cfg_args["volid"] = ""

    cfg_args.update({
        "compression": "xz",
        "compress_args": [],
        "ks": [ks_path],
        "logfile": log_dir,
        "timeout": 60,  # 60 minute timeout
    })
    with open(joinpaths(results_dir, "config.toml"), "w") as f:
        f.write(toml.dumps(cfg_args))

    # Set the initial status
    open(joinpaths(results_dir, "STATUS"), "w").write("WAITING")

    # Set the test mode, if requested
    if test_mode > 0:
        open(joinpaths(results_dir, "TEST"), "w").write("%s" % test_mode)

    write_timestamp(results_dir, TS_CREATED)
    log.info("Adding %s (%s %s) to compose queue", build_id, recipe["name"],
             compose_type)
    os.symlink(results_dir, joinpaths(lib_dir, "queue/new/", build_id))

    return build_id
Example #2
0
def get_dnf_base_object(installroot,
                        sources,
                        mirrorlists=None,
                        repos=None,
                        enablerepos=None,
                        disablerepos=None,
                        tempdir="/var/tmp",
                        proxy=None,
                        releasever="34",
                        cachedir=None,
                        logdir=None,
                        sslverify=True,
                        dnfplugins=None):
    """ Create a dnf Base object and setup the repositories and installroot

        :param string installroot: Full path to the installroot
        :param list sources: List of source repo urls to use for the installation
        :param list enablerepos: List of repo names to enable
        :param list disablerepos: List of repo names to disable
        :param list mirrorlist: List of mirrors to use
        :param string tempdir: Path of temporary directory
        :param string proxy: http proxy to use when fetching packages
        :param string releasever: Release version to pass to dnf
        :param string cachedir: Directory to use for caching packages
        :param bool noverifyssl: Set to True to ignore the CA of ssl certs. eg. use self-signed ssl for https repos.

        If tempdir is not set /var/tmp is used.
        If cachedir is None a dnf.cache directory is created inside tmpdir
    """
    def sanitize_repo(repo):
        """Convert bare paths to file:/// URIs, and silently reject protocols unhandled by yum"""
        if repo.startswith("/"):
            return "file://{0}".format(repo)
        elif any(
                repo.startswith(p)
                for p in ('http://', 'https://', 'ftp://', 'file://')):
            return repo
        else:
            return None

    mirrorlists = mirrorlists or []

    # sanitize the repositories
    sources = list(sanitize_repo(r) for r in sources)
    mirrorlists = list(sanitize_repo(r) for r in mirrorlists)

    # remove invalid repositories
    sources = list(r for r in sources if r)
    mirrorlists = list(r for r in mirrorlists if r)

    if not cachedir:
        cachedir = os.path.join(tempdir, "dnf.cache")
    if not os.path.isdir(cachedir):
        os.mkdir(cachedir)

    if not logdir:
        logdir = os.path.join(tempdir, "dnf.logs")
        if not os.path.isdir(logdir):
            os.mkdir(logdir)

    dnfbase = dnf.Base()
    # Enable DNF pluings
    # NOTE: These come from the HOST system's environment
    if dnfplugins:
        if dnfplugins[0] == "*":
            # Enable them all
            dnfbase.init_plugins()
        else:
            # Only enable the listed plugins
            dnfbase.init_plugins(disabled_glob=["*"],
                                 enable_plugins=dnfplugins)
    conf = dnfbase.conf
    conf.logdir = logdir
    conf.cachedir = cachedir

    conf.install_weak_deps = False
    conf.releasever = releasever
    conf.installroot = installroot
    conf.prepend_installroot('persistdir')
    # this is a weird 'AppendOption' thing that, when you set it,
    # actually appends. Doing this adds 'nodocs' to the existing list
    # of values, over in libdnf, it does not replace the existing values.
    conf.tsflags = ['nodocs']
    # Log details about the solver
    conf.debug_solver = True

    if proxy:
        conf.proxy = proxy

    if sslverify == False:
        conf.sslverify = False

    # DNF 3.2 needs to have module_platform_id set, otherwise depsolve won't work correctly
    if not os.path.exists("/etc/os-release"):
        log.warning(
            "/etc/os-release is missing, cannot determine platform id, falling back to %s",
            DEFAULT_PLATFORM_ID)
        platform_id = DEFAULT_PLATFORM_ID
    else:
        os_release = flatconfig("/etc/os-release")
        platform_id = os_release.get("PLATFORM_ID", DEFAULT_PLATFORM_ID)
    log.info("Using %s for module_platform_id", platform_id)
    conf.module_platform_id = platform_id

    # Add .repo files
    if repos:
        reposdir = os.path.join(tempdir, "dnf.repos")
        if not os.path.isdir(reposdir):
            os.mkdir(reposdir)
        for r in repos:
            shutil.copy2(r, reposdir)
        conf.reposdir = [reposdir]
        dnfbase.read_all_repos()

    # add the sources
    for i, r in enumerate(sources):
        if "SRPM" in r or "srpm" in r:
            log.info("Skipping source repo: %s", r)
            continue
        repo_name = "lorax-repo-%d" % i
        repo = dnf.repo.Repo(repo_name, conf)
        repo.baseurl = [r]
        if proxy:
            repo.proxy = proxy
        repo.enable()
        dnfbase.repos.add(repo)
        log.info("Added '%s': %s", repo_name, r)
        log.info("Fetching metadata...")
        try:
            repo.load()
        except dnf.exceptions.RepoError as e:
            log.error("Error fetching metadata for %s: %s", repo_name, e)
            return None

    # add the mirrorlists
    for i, r in enumerate(mirrorlists):
        if "SRPM" in r or "srpm" in r:
            log.info("Skipping source repo: %s", r)
            continue
        repo_name = "lorax-mirrorlist-%d" % i
        repo = dnf.repo.Repo(repo_name, conf)
        repo.mirrorlist = r
        if proxy:
            repo.proxy = proxy
        repo.enable()
        dnfbase.repos.add(repo)
        log.info("Added '%s': %s", repo_name, r)
        log.info("Fetching metadata...")
        try:
            repo.load()
        except dnf.exceptions.RepoError as e:
            log.error("Error fetching metadata for %s: %s", repo_name, e)
            return None

    # Enable repos listed on the cmdline
    for r in enablerepos:
        repolist = dnfbase.repos.get_matching(r)
        if not repolist:
            log.warning("%s is an unknown repo, not enabling it", r)
        else:
            repolist.enable()
            log.info("Enabled repo %s", r)

    # Disable repos listed on the cmdline
    for r in disablerepos:
        repolist = dnfbase.repos.get_matching(r)
        if not repolist:
            log.warning("%s is an unknown repo, not disabling it", r)
        else:
            repolist.disable()
            log.info("Disabled repo %s", r)

    dnfbase.fill_sack(load_system_repo=False)
    dnfbase.read_comps()

    return dnfbase
Example #3
0
def get_base_object(conf):
    """Get the DNF object with settings from the config file

    :param conf: configuration object
    :type conf: ComposerParser
    :returns: A DNF Base object
    :rtype: dnf.Base
    """
    cachedir = os.path.abspath(conf.get("composer", "cache_dir"))
    dnfconf = os.path.abspath(conf.get("composer", "dnf_conf"))
    dnfroot = os.path.abspath(conf.get("composer", "dnf_root"))
    repodir = os.path.abspath(conf.get("composer", "repo_dir"))

    # Setup the config for the DNF Base object
    dbo = dnf.Base()
    dbc = dbo.conf
    # TODO - Handle this
    #    dbc.logdir = logdir
    dbc.installroot = dnfroot
    if not os.path.isdir(dnfroot):
        os.makedirs(dnfroot)
    if not os.path.isdir(repodir):
        os.makedirs(repodir)

    dbc.cachedir = cachedir
    dbc.reposdir = [repodir]
    dbc.install_weak_deps = False
    dbc.prepend_installroot('persistdir')
    # this is a weird 'AppendOption' thing that, when you set it,
    # actually appends. Doing this adds 'nodocs' to the existing list
    # of values, over in libdnf, it does not replace the existing values.
    dbc.tsflags = ['nodocs']

    if conf.get_default("dnf", "proxy", None):
        dbc.proxy = conf.get("dnf", "proxy")

    if conf.has_option(
            "dnf", "sslverify") and not conf.getboolean("dnf", "sslverify"):
        dbc.sslverify = False

    _releasever = conf.get_default("composer", "releasever", None)
    if not _releasever:
        # Use the releasever of the host system
        _releasever = dnf.rpm.detect_releasever("/")
    log.info("releasever = %s", _releasever)
    dbc.releasever = _releasever

    # DNF 3.2 needs to have module_platform_id set, otherwise depsolve won't work correctly
    if not os.path.exists("/etc/os-release"):
        log.warning(
            "/etc/os-release is missing, cannot determine platform id, falling back to %s",
            DEFAULT_PLATFORM_ID)
        platform_id = DEFAULT_PLATFORM_ID
    else:
        os_release = flatconfig("/etc/os-release")
        platform_id = os_release.get("PLATFORM_ID", DEFAULT_PLATFORM_ID)
    log.info("Using %s for module_platform_id", platform_id)
    dbc.module_platform_id = platform_id

    # Make sure metadata is always current
    dbc.metadata_expire = 0
    dbc.metadata_expire_filter = "never"

    # write the dnf configuration file
    with open(dnfconf, "w") as f:
        f.write(dbc.dump())

    # dnf needs the repos all in one directory, composer uses repodir for this
    # if system repos are supposed to be used, copy them into repodir, overwriting any previous copies
    if not conf.has_option("repos", "use_system_repos") or conf.getboolean(
            "repos", "use_system_repos"):
        for repo_file in glob("/etc/yum.repos.d/*.repo"):
            shutil.copy2(repo_file, repodir)
    dbo.read_all_repos()

    # Update the metadata from the enabled repos to speed up later operations
    log.info("Updating repository metadata")
    try:
        dbo.fill_sack(load_system_repo=False)
        dbo.read_comps()
        dbo.update_cache()
    except dnf.exceptions.Error as e:
        log.error("Failed to update metadata: %s", str(e))
        raise RuntimeError("Fetching metadata failed: %s" % str(e))

    return dbo
Example #4
0
def get_base_object(conf):
    """Get the DNF object with settings from the config file

    :param conf: configuration object
    :type conf: ComposerParser
    :returns: A DNF Base object
    :rtype: dnf.Base
    """
    cachedir = os.path.abspath(conf.get("composer", "cache_dir"))
    dnfconf = os.path.abspath(conf.get("composer", "dnf_conf"))
    dnfroot = os.path.abspath(conf.get("composer", "dnf_root"))
    repodir = os.path.abspath(conf.get("composer", "repo_dir"))

    # Setup the config for the DNF Base object
    dbo = dnf.Base()
    dbc = dbo.conf
    # TODO - Handle this
    #    dbc.logdir = logdir
    dbc.installroot = dnfroot
    if not os.path.isdir(dnfroot):
        os.makedirs(dnfroot)
    if not os.path.isdir(repodir):
        os.makedirs(repodir)

    dbc.cachedir = cachedir
    dbc.reposdir = [repodir]
    dbc.install_weak_deps = False
    dbc.prepend_installroot('persistdir')
    # this is a weird 'AppendOption' thing that, when you set it,
    # actually appends. Doing this adds 'nodocs' to the existing list
    # of values, over in libdnf, it does not replace the existing values.
    dbc.tsflags = ['nodocs']

    if conf.get_default("dnf", "proxy", None):
        dbc.proxy = conf.get("dnf", "proxy")

    if conf.has_option(
            "dnf", "sslverify") and not conf.getboolean("dnf", "sslverify"):
        dbc.sslverify = False

    # If the system repos are enabled read the dnf vars from /etc/dnf/vars/
    if not conf.has_option("repos", "use_system_repos") or conf.getboolean(
            "repos", "use_system_repos"):
        dbc.substitutions.update_from_etc("/")
        log.info("dnf vars: %s", dbc.substitutions)

    _releasever = conf.get_default("composer", "releasever", None)
    if not _releasever:
        # Use the releasever of the host system
        _releasever = dnf.rpm.detect_releasever("/")
    log.info("releasever = %s", _releasever)
    dbc.releasever = _releasever

    # DNF 3.2 needs to have module_platform_id set, otherwise depsolve won't work correctly
    if not os.path.exists("/etc/os-release"):
        log.warning(
            "/etc/os-release is missing, cannot determine platform id, falling back to %s",
            DEFAULT_PLATFORM_ID)
        platform_id = DEFAULT_PLATFORM_ID
    else:
        os_release = flatconfig("/etc/os-release")
        platform_id = os_release.get("PLATFORM_ID", DEFAULT_PLATFORM_ID)
    log.info("Using %s for module_platform_id", platform_id)
    dbc.module_platform_id = platform_id

    # Make sure metadata is always current
    dbc.metadata_expire = 0
    dbc.metadata_expire_filter = "never"

    # write the dnf configuration file
    with open(dnfconf, "w") as f:
        f.write(dbc.dump())

    # dnf needs the repos all in one directory, composer uses repodir for this
    # if system repos are supposed to be used, copy them into repodir, overwriting any previous copies
    if not conf.has_option("repos", "use_system_repos") or conf.getboolean(
            "repos", "use_system_repos"):
        for repo_file in glob("/etc/yum.repos.d/*.repo"):
            shutil.copy2(repo_file, repodir)
    dbo.read_all_repos()

    # Remove any duplicate repo entries. These can cause problems with Anaconda, which will fail
    # with space problems.
    repos = sorted(list(r.id for r in dbo.repos.iter_enabled()))
    seen = {"baseurl": [], "mirrorlist": [], "metalink": []}
    for source_name in repos:
        remove = False
        repo = dbo.repos.get(source_name, None)
        if repo is None:
            log.warning("repo %s vanished while removing duplicates",
                        source_name)
            continue
        if repo.baseurl:
            if repo.baseurl[0] in seen["baseurl"]:
                log.info("Removing duplicate repo: %s baseurl=%s", source_name,
                         repo.baseurl[0])
                remove = True
            else:
                seen["baseurl"].append(repo.baseurl[0])
        elif repo.mirrorlist:
            if repo.mirrorlist in seen["mirrorlist"]:
                log.info("Removing duplicate repo: %s mirrorlist=%s",
                         source_name, repo.mirrorlist)
                remove = True
            else:
                seen["mirrorlist"].append(repo.mirrorlist)
        elif repo.metalink:
            if repo.metalink in seen["metalink"]:
                log.info("Removing duplicate repo: %s metalink=%s",
                         source_name, repo.metalink)
                remove = True
            else:
                seen["metalink"].append(repo.metalink)

        if remove:
            del dbo.repos[source_name]

    # Update the metadata from the enabled repos to speed up later operations
    log.info("Updating repository metadata")
    try:
        dbo.fill_sack(load_system_repo=False)
        dbo.read_comps()
        dbo.update_cache()
    except dnf.exceptions.Error as e:
        log.error("Failed to update metadata: %s", str(e))
        raise RuntimeError("Fetching metadata failed: %s" % str(e))

    return dbo