def test_good_ks_virt(self): """Test a good kickstart with virt""" opts = DataHolder(no_virt=False, make_fsimage=False, make_pxe_live=False) ks_version = makeVersion() ks = KickstartParser(ks_version, errorsAreFatal=False, missingIncludeIsFatal=False) ks.readKickstartFromString("url --url=http://dl.fedoraproject.com\n" "network --bootproto=dhcp --activate\n" "repo --name=other --baseurl=http://dl.fedoraproject.com\n" "part / --size=4096\n" "shutdown\n") self.assertEqual(check_kickstart(ks, opts), [])
def test_disk_size_align(self): """Test aligning the disk size""" opts = DataHolder(no_virt=True, make_fsimage=False, make_iso=False, make_pxe_live=False, image_size_align=1024) ks_version = makeVersion() ks = KickstartParser(ks_version, errorsAreFatal=False, missingIncludeIsFatal=False) ks.readKickstartFromString("url --url=http://dl.fedoraproject.com\n" "network --bootproto=dhcp --activate\n" "repo --name=other --baseurl=http://dl.fedoraproject.com\n" "part / --size=4096\n" "shutdown\n") self.assertEqual(calculate_disk_size(opts, ks), 5120)
def make_runtime(opts, mount_dir, work_dir, size=None): """ Make the squashfs image from a directory :param opts: options passed to livemedia-creator :type opts: argparse options :param str mount_dir: Directory tree to compress :param str work_dir: Output compressed image to work_dir+images/install.img :param int size: Size of disk image, in GiB :returns: rc of squashfs creation :rtype: int """ kernel_arch = get_arch(mount_dir) # Fake dnf object fake_dbo = FakeDNF(conf=DataHolder(installroot=mount_dir)) # Fake arch with only basearch set arch = ArchData(kernel_arch) # TODO: Need to get release info from someplace... product = DataHolder(name=opts.project, version=opts.releasever, release="", variant="", bugurl="", isfinal=False) rb = RuntimeBuilder(product, arch, fake_dbo) compression, compressargs = squashfs_args(opts) if opts.squashfs_only: log.info("Creating a squashfs only runtime") return rb.create_squashfs_runtime(joinpaths(work_dir, RUNTIME), size=size, compression=compression, compressargs=compressargs) else: log.info("Creating a squashfs+ext4 runtime") return rb.create_ext4_runtime(joinpaths(work_dir, RUNTIME), size=size, compression=compression, compressargs=compressargs)
def test_squashfs_args(self): """Test squashfs_args results""" test_arches = {"x86_64": ("xz", ["-Xbcj", "x86"]), "ppc64le": ("xz", ["-Xbcj", "powerpc"]), "s390x": ("xz", []), "ia64": ("xz", []), "aarch64": ("xz", []) } for arch in test_arches: opts = DataHolder(compression=None, compress_args=[], arch=arch) self.assertEqual(squashfs_args(opts), test_arches[arch], (opts, squashfs_args(opts))) opts = DataHolder(compression="lzma", compress_args=[], arch="x86_64") self.assertEqual(squashfs_args(opts), ("lzma", []), (opts, squashfs_args(opts))) opts = DataHolder(compression="xz", compress_args=["-X32767"], arch="x86_64") self.assertEqual(squashfs_args(opts), ("xz", ["-X32767"]), (opts, squashfs_args(opts))) opts = DataHolder(compression="xz", compress_args=["-X32767", "-Xbcj x86"], arch="x86_64") self.assertEqual(squashfs_args(opts), ("xz", ["-X32767", "-Xbcj", "x86"]), (opts, squashfs_args(opts)))
def test_shutdown_virt(self): """Test a kickstart with reboot instead of shutdown""" opts = DataHolder(no_virt=False, make_fsimage=True, make_pxe_live=False) ks_version = makeVersion() ks = KickstartParser(ks_version, errorsAreFatal=False, missingIncludeIsFatal=False) ks.readKickstartFromString("url --url=http://dl.fedoraproject.com\n" "network --bootproto=dhcp --activate\n" "repo --name=other --baseurl=http://dl.fedoraproject.com\n" "part / --size=4096\n" "reboot\n") errors = check_kickstart(ks, opts) self.assertTrue("must include shutdown when using virt" in errors[0])
def test_autopart(self): """Test a kickstart with autopart""" opts = DataHolder(no_virt=True, make_fsimage=True, make_pxe_live=False) ks_version = makeVersion() ks = KickstartParser(ks_version, errorsAreFatal=False, missingIncludeIsFatal=False) ks.readKickstartFromString("url --url=http://dl.fedoraproject.com\n" "network --bootproto=dhcp --activate\n" "repo --name=other --baseurl=http://dl.fedoraproject.com\n" "autopart\n" "shutdown\n") errors = check_kickstart(ks, opts) self.assertTrue("Filesystem images must use a single" in errors[0])
def test_nomethod_novirt(self): """Test a kickstart with repo and no url""" opts = DataHolder(no_virt=True, make_fsimage=False, make_pxe_live=False) ks_version = makeVersion() ks = KickstartParser(ks_version, errorsAreFatal=False, missingIncludeIsFatal=False) ks.readKickstartFromString("network --bootproto=dhcp --activate\n" "repo --name=other --baseurl=http://dl.fedoraproject.com\n" "part / --size=4096\n" "shutdown\n") errors = check_kickstart(ks, opts) self.assertTrue("Only url, nfs and ostreesetup" in errors[0]) self.assertTrue("repo can only be used with the url" in errors[1])
def test_dracut_args(self): """Test dracut_args results""" # Use default args opts = DataHolder(dracut_args=None, dracut_conf=None) self.assertEqual(dracut_args(opts), DRACUT_DEFAULT) # Use a config file from --dracut-conf opts = DataHolder(dracut_args=None, dracut_conf="/var/tmp/project/lmc-dracut.conf") self.assertEqual(dracut_args(opts), ["--conf", "/var/tmp/project/lmc-dracut.conf"]) # Use --dracut-arg opts = DataHolder(dracut_args=[ "--xz", "--omit plymouth", "--add livenet dmsquash-live dmsquash-live-ntfs" ], dracut_conf=None) self.assertEqual(dracut_args(opts), [ "--xz", "--omit", "plymouth", "--add", "livenet dmsquash-live dmsquash-live-ntfs" ])
def no_network_test(self): """Test a kickstart with missing network command""" opts = DataHolder(no_virt=True, make_fsimage=False, make_pxe_live=False) ks_version = makeVersion() ks = KickstartParser(ks_version, errorsAreFatal=False, missingIncludeIsFatal=False) ks.readKickstartFromString("url --url=http://dl.fedoraproject.com\n" "part / --size=4096\n" "shutdown\n") errors = check_kickstart(ks, opts) self.assertTrue("The kickstart must activate networking" in errors[0])
def get_branding(self, skip, product): """Select the branding from the available 'system-release' packages The *best* way to control this is to have a single package in the repo provide 'system-release' When there are more than 1 package it will: - Make a list of the available packages - If variant is set look for a package ending with lower(variant) and use that - If there are one or more non-generic packages, use the first one after sorting Returns the package names of the system-release and release logos package """ if skip: return DataHolder(release=None, logos=None) release = None q = self.dbo.sack.query() a = q.available() pkgs = sorted([p.name for p in a.filter(provides='system-release') if not p.name.startswith("generic")]) if not pkgs: logger.error("No system-release packages found, could not get the release") return DataHolder(release=None, logos=None) logger.debug("system-release packages: %s", pkgs) if product.variant: variant = [p for p in pkgs if p.endswith("-"+product.variant.lower())] if variant: release = variant[0] if not release: release = pkgs[0] # release logger.info('got release: %s', release) # logos uses the basename from release (fedora, redhat, centos, ...) logos, _suffix = release.split('-', 1) return DataHolder(release=release, logos=logos+"-logos")
def __init__(self, product, arch, inroot, outroot, runtime, isolabel, domacboot=True, doupgrade=True, templatedir=None, add_templates=None, add_template_vars=None, workdir=None): # NOTE: if you pass an arg named "runtime" to a mako template it'll # clobber some mako internal variables - hence "runtime_img". self.vars = DataHolder(arch=arch, product=product, runtime_img=runtime, runtime_base=basename(runtime), inroot=inroot, outroot=outroot, basearch=arch.basearch, libdir=arch.libdir, isolabel=isolabel, udev=udev_escape, domacboot=domacboot, doupgrade=doupgrade, workdir=workdir, lower=string_lower) self._runner = LoraxTemplateRunner(inroot, outroot, templatedir=templatedir) self._runner.defaults = self.vars self.add_templates = add_templates or [] self.add_template_vars = add_template_vars or {} self.templatedir = templatedir self.treeinfo_data = None
def make_appliance(disk_img, name, template, outfile, networks=None, ram=1024, vcpus=1, arch=None, title="Linux", project="Linux", releasever="29"): """ Generate an appliance description file :param str disk_img: Full path of the disk image :param str name: Name of the appliance, passed to the template :param str template: Full path of Mako template :param str outfile: Full path of file to write, using template :param list networks: List of networks(str) from the kickstart :param int ram: Ram, in MiB, passed to template. Default is 1024 :param int vcpus: CPUs, passed to template. Default is 1 :param str arch: CPU architecture. Default is 'x86_64' :param str title: Title, passed to template. Default is 'Linux' :param str project: Project, passed to template. Default is 'Linux' :param str releasever: Release version, passed to template. Default is 29 """ if not (disk_img and template and outfile): return None log.info("Creating appliance definition using %s", template) if not arch: arch = "x86_64" log.info("Calculating SHA256 checksum of %s", disk_img) sha256 = hashlib.sha256() with open(disk_img, "rb") as f: while True: data = f.read(1024**2) if not data: break sha256.update(data) log.info("SHA256 of %s is %s", disk_img, sha256.hexdigest()) disk_info = DataHolder(name=os.path.basename(disk_img), format="raw", checksum_type="sha256", checksum=sha256.hexdigest()) try: result = Template(filename=template).render(disks=[disk_info], name=name, arch=arch, memory=ram, vcpus=vcpus, networks=networks, title=title, project=project, releasever=releasever) except Exception: log.error(text_error_template().render()) raise with open(outfile, "w") as f: f.write(result)
def displaymode_test(self): """Test a kickstart with displaymode set""" opts = DataHolder(no_virt=True, make_fsimage=False, make_pxe_live=False) ks_version = makeVersion() ks = KickstartParser(ks_version, errorsAreFatal=False, missingIncludeIsFatal=False) ks.readKickstartFromString( "url --url=http://dl.fedoraproject.com\n" "network --bootproto=dhcp --activate\n" "repo --name=other --baseurl=http://dl.fedoraproject.com\n" "part / --size=4096\n" "shutdown\n" "graphical\n") errors = check_kickstart(ks, opts) self.assertTrue("must not set a display mode" in errors[0])
def setUpClass(self): self.maxDiff = None self.config = dict() repo_dir = tempfile.mkdtemp(prefix="lorax.test.repo.") self.config["REPO_DIR"] = repo_dir self.config["COMPOSER_CFG"] = configure(root_dir=repo_dir, test_config=True) lifted.config.configure(self.config["COMPOSER_CFG"]) os.makedirs(joinpaths(self.config["COMPOSER_CFG"].get("composer", "share_dir"), "composer")) errors = make_queue_dirs(self.config["COMPOSER_CFG"], os.getgid()) if errors: raise RuntimeError("\n".join(errors)) lib_dir = self.config["COMPOSER_CFG"].get("composer", "lib_dir") share_dir = self.config["COMPOSER_CFG"].get("composer", "share_dir") tmp = self.config["COMPOSER_CFG"].get("composer", "tmp") self.monitor_cfg = DataHolder(composer_dir=lib_dir, share_dir=share_dir, uid=0, gid=0, tmp=tmp)
def start_queue_monitor(cfg, uid, gid): """Start the queue monitor as a mp process :param cfg: Configuration settings :type cfg: ComposerConfig :param uid: User ID that owns the queue :type uid: int :param gid: Group ID that owns the queue :type gid: int :returns: None """ lib_dir = cfg.get("composer", "lib_dir") share_dir = cfg.get("composer", "share_dir") tmp = cfg.get("composer", "tmp") monitor_cfg = DataHolder(cfg=cfg, composer_dir=lib_dir, share_dir=share_dir, uid=uid, gid=gid, tmp=tmp) p = mp.Process(target=monitor, args=(monitor_cfg,)) p.daemon = True p.start()
def __init__(self, product, arch, dbo, templatedir=None, installpkgs=None, excludepkgs=None, add_templates=None, add_template_vars=None): root = dbo.conf.installroot # use a copy of product so we can modify it locally product = product.copy() product.name = product.name.lower() self.vars = DataHolder(arch=arch, product=product, dbo=dbo, root=root, basearch=arch.basearch, libdir=arch.libdir) self.dbo = dbo self._runner = LoraxTemplateRunner(inroot=root, outroot=root, dbo=dbo, templatedir=templatedir) self.add_templates = add_templates or [] self.add_template_vars = add_template_vars or {} self._installpkgs = installpkgs or [] self._excludepkgs = excludepkgs or [] self._runner.defaults = self.vars self.dbo.reset()
def test_make_runtime_squashfs_ext4(self): """Test making a runtime squashfs+ext4 only image""" with tempfile.TemporaryDirectory(prefix="lorax.test.") as work_dir: with tempfile.TemporaryDirectory(prefix="lorax.test.root.") as mount_dir: # Make a fake kernel and initrd mkFakeBoot(mount_dir) opts = DataHolder(project="Fedora", releasever="devel", compression="xz", compress_args=[], arch="x86_64", squashfs_only=False) make_runtime(opts, mount_dir, work_dir) # Make sure it made an install.img self.assertTrue(os.path.exists(joinpaths(work_dir, "images/install.img"))) # Make sure it looks like a squashfs filesystem file_details = get_file_magic(joinpaths(work_dir, "images/install.img")) self.assertTrue("Squashfs" in file_details) # Make sure there is a rootfs.img inside the squashfs cmd = ["unsquashfs", "-n", "-l", joinpaths(work_dir, "images/install.img")] results = runcmd_output(cmd) self.assertTrue("rootfs.img" in results)
def make_squashfs_test(self): """Test making a squashfs image""" with tempfile.TemporaryDirectory(prefix="lorax.test.") as work_dir: with tempfile.NamedTemporaryFile( prefix="lorax.test.disk.") as disk_img: # Make a small ext4 disk image mksparse(disk_img.name, 42 * 1024**2) runcmd([ "mkfs.ext4", "-L", "Anaconda", "-b", "4096", "-m", "0", disk_img.name ]) opts = DataHolder(compression="xz", arch="x86_64") make_squashfs(opts, disk_img.name, work_dir) # Make sure it made an install.img self.assertTrue( os.path.exists(joinpaths(work_dir, "images/install.img"))) # Make sure it looks like a squashfs filesystem file_details = get_file_magic( joinpaths(work_dir, "images/install.img")) self.assertTrue("Squashfs" in file_details)
def make_compose(cfg, results_dir): """Run anaconda with the final-kickstart.ks from results_dir :param cfg: Configuration settings :type cfg: DataHolder :param results_dir: The directory containing the metadata and results for the build :type results_dir: str :returns: Nothing :raises: May raise various exceptions This takes the final-kickstart.ks, and the settings in config.toml and runs Anaconda in no-virt mode (directly on the host operating system). Exceptions should be caught at the higer level. If there is a failure, the build artifacts will be cleaned up, and any logs will be moved into logs/anaconda/ and their ownership will be set to the user from the cfg object. """ # Check on the ks's presence ks_path = joinpaths(results_dir, "final-kickstart.ks") if not os.path.exists(ks_path): raise RuntimeError("Missing kickstart file at %s" % ks_path) # The anaconda logs are copied into ./anaconda/ in this directory log_dir = joinpaths(results_dir, "logs/") if not os.path.exists(log_dir): os.makedirs(log_dir) # Load the compose configuration cfg_path = joinpaths(results_dir, "config.toml") if not os.path.exists(cfg_path): raise RuntimeError("Missing config.toml for %s" % results_dir) cfg_dict = toml.loads(open(cfg_path, "r").read()) # The keys in cfg_dict correspond to the arguments setup in livemedia-creator # keys that define what to build should be setup in compose_args, and keys with # defaults should be setup here. # Make sure that image_name contains no path components cfg_dict["image_name"] = os.path.basename(cfg_dict["image_name"]) # Only support novirt installation, set some other defaults cfg_dict["no_virt"] = True cfg_dict["disk_image"] = None cfg_dict["fs_image"] = None cfg_dict["keep_image"] = False cfg_dict["domacboot"] = False cfg_dict["anaconda_args"] = "" cfg_dict["proxy"] = "" cfg_dict["armplatform"] = "" cfg_dict["squashfs_args"] = None cfg_dict["lorax_templates"] = find_templates(cfg.share_dir) cfg_dict["tmp"] = cfg.tmp cfg_dict["dracut_args"] = None # Use default args for dracut # TODO How to support other arches? cfg_dict["arch"] = None # Compose things in a temporary directory inside the results directory cfg_dict["result_dir"] = joinpaths(results_dir, "compose") os.makedirs(cfg_dict["result_dir"]) install_cfg = DataHolder(**cfg_dict) # Some kludges for the 99-copy-logs %post, failure in it will crash the build for f in ["/tmp/NOSAVE_INPUT_KS", "/tmp/NOSAVE_LOGS"]: open(f, "w") # Placing a CANCEL file in the results directory will make execWithRedirect send anaconda a SIGTERM def cancel_build(): return os.path.exists(joinpaths(results_dir, "CANCEL")) log.debug("cfg = %s", install_cfg) try: test_path = joinpaths(results_dir, "TEST") if os.path.exists(test_path): # Pretend to run the compose time.sleep(10) try: test_mode = int(open(test_path, "r").read()) except Exception: test_mode = 1 if test_mode == 1: raise RuntimeError("TESTING FAILED compose") else: open(joinpaths(results_dir, install_cfg.image_name), "w").write("TEST IMAGE") else: run_creator(install_cfg, callback_func=cancel_build) # Extract the results of the compose into results_dir and cleanup the compose directory move_compose_results(install_cfg, results_dir) finally: # Make sure any remaining temporary directories are removed (eg. if there was an exception) for d in glob(joinpaths(cfg.tmp, "lmc-*")): if os.path.isdir(d): shutil.rmtree(d) elif os.path.isfile(d): os.unlink(d) # Make sure that everything under the results directory is owned by the user user = pwd.getpwuid(cfg.uid).pw_name group = grp.getgrgid(cfg.gid).gr_name log.debug("Install finished, chowning results to %s:%s", user, group) subprocess.call(["chown", "-R", "%s:%s" % (user, group), results_dir])
def make_livecd(opts, mount_dir, work_dir): """ Take the content from the disk image and make a livecd out of it :param opts: options passed to livemedia-creator :type opts: argparse options :param str mount_dir: Directory tree to compress :param str work_dir: Output compressed image to work_dir+images/install.img This uses wwood's squashfs live initramfs method: * put the real / into LiveOS/rootfs.img * make a squashfs of the LiveOS/rootfs.img tree * This is loaded by dracut when the cmdline is passed to the kernel: root=live:CDLABEL=<volid> rd.live.image """ kernel_arch = get_arch(mount_dir) arch = ArchData(kernel_arch) # TODO: Need to get release info from someplace... product = DataHolder(name=opts.project, version=opts.releasever, release="", variant="", bugurl="", isfinal=False) # Link /images to work_dir/images to make the templates happy if os.path.islink(joinpaths(mount_dir, "images")): os.unlink(joinpaths(mount_dir, "images")) execWithRedirect( "/bin/ln", ["-s", joinpaths(work_dir, "images"), joinpaths(mount_dir, "images")]) # The templates expect the config files to be in /tmp/config_files # I think these should be release specific, not from lorax, but for now configdir = joinpaths(opts.lorax_templates, "live/config_files/") configdir_path = "tmp/config_files" fullpath = joinpaths(mount_dir, configdir_path) if os.path.exists(fullpath): remove(fullpath) copytree(configdir, fullpath) isolabel = opts.volid or "{0.name}-{0.version}-{1.basearch}".format( product, arch) if len(isolabel) > 32: isolabel = isolabel[:32] log.warning("Truncating isolabel to 32 chars: %s", isolabel) tb = TreeBuilder(product=product, arch=arch, domacboot=opts.domacboot, inroot=mount_dir, outroot=work_dir, runtime=RUNTIME, isolabel=isolabel, templatedir=joinpaths(opts.lorax_templates, "live/")) log.info("Rebuilding initrds") if not opts.dracut_args: dracut_args = DRACUT_DEFAULT else: dracut_args = [] for arg in opts.dracut_args: dracut_args += arg.split(" ", 1) log.info("dracut args = %s", dracut_args) tb.rebuild_initrds(add_args=dracut_args) log.info("Building boot.iso") tb.build() return work_dir
def fakednf_test(self): """Test FakeDNF class""" fake_dbo = FakeDNF(conf=DataHolder( installroot="/a/fake/install/root/")) self.assertEqual(fake_dbo.conf.installroot, "/a/fake/install/root/")
def run(self, dbo, product, version, release, variant="", bugurl="", isfinal=False, workdir=None, outputdir=None, buildarch=None, volid=None, domacboot=True, doupgrade=True, remove_temp=False, installpkgs=None, excludepkgs=None, size=2, add_templates=None, add_template_vars=None, add_arch_templates=None, add_arch_template_vars=None, verify=True, user_dracut_args=None, squashfs_only=False): assert self._configured installpkgs = installpkgs or [] excludepkgs = excludepkgs or [] if domacboot: try: runcmd(["rpm", "-q", "hfsplus-tools"]) except CalledProcessError: logger.critical( "you need to install hfsplus-tools to create mac images") sys.exit(1) # set up work directory self.workdir = workdir or tempfile.mkdtemp(prefix="pylorax.work.") if not os.path.isdir(self.workdir): os.makedirs(self.workdir) # set up log directory logdir = self.conf.get("lorax", "logdir") if not os.path.isdir(logdir): os.makedirs(logdir) self.init_stream_logging() self.init_file_logging(logdir) logger.debug("version is %s", vernum) log_selinux_state() logger.debug("using work directory %s", self.workdir) logger.debug("using log directory %s", logdir) # set up output directory self.outputdir = outputdir or tempfile.mkdtemp(prefix="pylorax.out.") if not os.path.isdir(self.outputdir): os.makedirs(self.outputdir) logger.debug("using output directory %s", self.outputdir) # do we have root privileges? logger.info("checking for root privileges") if not os.geteuid() == 0: logger.critical("no root privileges") sys.exit(1) # do we have a proper dnf base object? logger.info("checking dnf base object") if not isinstance(dbo, dnf.Base): logger.critical("no dnf base object") sys.exit(1) self.inroot = dbo.conf.installroot logger.debug("using install root: %s", self.inroot) if not buildarch: buildarch = get_buildarch(dbo) logger.info("setting up build architecture") self.arch = ArchData(buildarch) for attr in ('buildarch', 'basearch', 'libdir'): logger.debug("self.arch.%s = %s", attr, getattr(self.arch, attr)) logger.info("setting up build parameters") self.product = DataHolder(name=product, version=version, release=release, variant=variant, bugurl=bugurl, isfinal=isfinal) logger.debug("product data: %s", self.product) # NOTE: if you change isolabel, you need to change pungi to match, or # the pungi images won't boot. isolabel = volid or "%s-%s-%s" % ( self.product.name, self.product.version, self.arch.basearch) if len(isolabel) > 32: logger.fatal("the volume id cannot be longer than 32 characters") sys.exit(1) # NOTE: rb.root = dbo.conf.installroot (== self.inroot) rb = RuntimeBuilder(product=self.product, arch=self.arch, dbo=dbo, templatedir=self.templatedir, installpkgs=installpkgs, excludepkgs=excludepkgs, add_templates=add_templates, add_template_vars=add_template_vars) logger.info("installing runtime packages") rb.install() # write .buildstamp buildstamp = BuildStamp(self.product.name, self.product.version, self.product.bugurl, self.product.isfinal, self.arch.buildarch, self.product.variant) buildstamp.write(joinpaths(self.inroot, ".buildstamp")) if self.debug: rb.writepkglists(joinpaths(logdir, "pkglists")) rb.writepkgsizes(joinpaths(logdir, "original-pkgsizes.txt")) logger.info("doing post-install configuration") rb.postinstall() # write .discinfo discinfo = DiscInfo(self.product.release, self.arch.basearch) discinfo.write(joinpaths(self.outputdir, ".discinfo")) logger.info("backing up installroot") installroot = joinpaths(self.workdir, "installroot") linktree(self.inroot, installroot) logger.info("generating kernel module metadata") rb.generate_module_data() logger.info("cleaning unneeded files") rb.cleanup() if verify: logger.info("verifying the installroot") if not rb.verify(): sys.exit(1) else: logger.info("Skipping verify") if self.debug: rb.writepkgsizes(joinpaths(logdir, "final-pkgsizes.txt")) logger.info("creating the runtime image") runtime = "images/install.img" compression = self.conf.get("compression", "type") compressargs = self.conf.get("compression", "args").split() # pylint: disable=no-member if self.conf.getboolean("compression", "bcj"): if self.arch.bcj: compressargs += ["-Xbcj", self.arch.bcj] else: logger.info("no BCJ filter for arch %s", self.arch.basearch) if squashfs_only: # Create an ext4 rootfs.img and compress it with squashfs rb.create_squashfs_runtime(joinpaths(installroot, runtime), compression=compression, compressargs=compressargs, size=size) else: # Create an ext4 rootfs.img and compress it with squashfs rb.create_ext4_runtime(joinpaths(installroot, runtime), compression=compression, compressargs=compressargs, size=size) rb.finished() logger.info("preparing to build output tree and boot images") treebuilder = TreeBuilder(product=self.product, arch=self.arch, inroot=installroot, outroot=self.outputdir, runtime=runtime, isolabel=isolabel, domacboot=domacboot, doupgrade=doupgrade, templatedir=self.templatedir, add_templates=add_arch_templates, add_template_vars=add_arch_template_vars, workdir=self.workdir) logger.info("rebuilding initramfs images") if not user_dracut_args: dracut_args = DRACUT_DEFAULT else: dracut_args = [] for arg in user_dracut_args: dracut_args += arg.split(" ", 1) anaconda_args = dracut_args + [ "--add", "anaconda pollcdrom qemu qemu-net" ] # ppc64 cannot boot an initrd > 32MiB so remove some drivers if self.arch.basearch in ("ppc64", "ppc64le"): dracut_args.extend(["--omit-drivers", REMOVE_PPC64_DRIVERS]) # Only omit dracut modules from the initrd so that they're kept for # upgrade.img anaconda_args.extend(["--omit", REMOVE_PPC64_MODULES]) logger.info("dracut args = %s", dracut_args) logger.info("anaconda args = %s", anaconda_args) treebuilder.rebuild_initrds(add_args=anaconda_args) logger.info("populating output tree and building boot images") treebuilder.build() # write .treeinfo file and we're done treeinfo = TreeInfo(self.product.name, self.product.version, self.product.variant, self.arch.basearch) for section, data in treebuilder.treeinfo_data.items(): treeinfo.add_section(section, data) treeinfo.write(joinpaths(self.outputdir, ".treeinfo")) # cleanup if remove_temp: remove(self.workdir)
def run(self, dbo, product, version, release, variant="", bugurl="", isfinal=False, workdir=None, outputdir=None, buildarch=None, volid=None, domacboot=True, doupgrade=True, remove_temp=False, installpkgs=None, excludepkgs=None, size=2, add_templates=None, add_template_vars=None, add_arch_templates=None, add_arch_template_vars=None, verify=True): assert self._configured installpkgs = installpkgs or [] excludepkgs = excludepkgs or [] if domacboot: try: runcmd(["rpm", "-q", "hfsplus-tools"]) except CalledProcessError: logger.critical( "you need to install hfsplus-tools to create mac images") sys.exit(1) # set up work directory self.workdir = workdir or tempfile.mkdtemp(prefix="pylorax.work.") if not os.path.isdir(self.workdir): os.makedirs(self.workdir) # set up log directory logdir = self.conf.get("lorax", "logdir") if not os.path.isdir(logdir): os.makedirs(logdir) self.init_stream_logging() self.init_file_logging(logdir) logger.debug("version is %s", vernum) logger.debug("using work directory %s", self.workdir) logger.debug("using log directory %s", logdir) # set up output directory self.outputdir = outputdir or tempfile.mkdtemp(prefix="pylorax.out.") if not os.path.isdir(self.outputdir): os.makedirs(self.outputdir) logger.debug("using output directory %s", self.outputdir) # do we have root privileges? logger.info("checking for root privileges") if not os.geteuid() == 0: logger.critical("no root privileges") sys.exit(1) # is selinux disabled? # With selinux in enforcing mode the rpcbind package required for # dracut nfs module, which is in turn required by anaconda module, # will not get installed, because it's preinstall scriptlet fails, # resulting in an incomplete initial ramdisk image. # The reason is that the scriptlet runs tools from the shadow-utils # package in chroot, particularly groupadd and useradd to add the # required rpc group and rpc user. This operation fails, because # the selinux context on files in the chroot, that the shadow-utils # tools need to access (/etc/group, /etc/passwd, /etc/shadow etc.), # is wrong and selinux therefore disallows access to these files. logger.info("checking the selinux mode") if selinux.is_selinux_enabled() and selinux.security_getenforce(): logger.critical("selinux must be disabled or in Permissive mode") sys.exit(1) # do we have a proper dnf base object? logger.info("checking dnf base object") if not isinstance(dbo, dnf.Base): logger.critical("no dnf base object") sys.exit(1) self.inroot = dbo.conf.installroot logger.debug("using install root: %s", self.inroot) if not buildarch: buildarch = get_buildarch(dbo) logger.info("setting up build architecture") self.arch = ArchData(buildarch) for attr in ('buildarch', 'basearch', 'libdir'): logger.debug("self.arch.%s = %s", attr, getattr(self.arch, attr)) logger.info("setting up build parameters") self.product = DataHolder(name=product, version=version, release=release, variant=variant, bugurl=bugurl, isfinal=isfinal) logger.debug("product data: %s", self.product) # NOTE: if you change isolabel, you need to change pungi to match, or # the pungi images won't boot. isolabel = volid or "%s-%s-%s" % ( self.product.name, self.product.version, self.arch.basearch) if len(isolabel) > 32: logger.fatal("the volume id cannot be longer than 32 characters") sys.exit(1) # NOTE: rb.root = dbo.conf.installroot (== self.inroot) rb = RuntimeBuilder(product=self.product, arch=self.arch, dbo=dbo, templatedir=self.templatedir, installpkgs=installpkgs, excludepkgs=excludepkgs, add_templates=add_templates, add_template_vars=add_template_vars) logger.info("installing runtime packages") rb.install() # write .buildstamp buildstamp = BuildStamp(self.product.name, self.product.version, self.product.bugurl, self.product.isfinal, self.arch.buildarch, self.product.variant) buildstamp.write(joinpaths(self.inroot, ".buildstamp")) if self.debug: rb.writepkglists(joinpaths(logdir, "pkglists")) rb.writepkgsizes(joinpaths(logdir, "original-pkgsizes.txt")) logger.info("doing post-install configuration") rb.postinstall() # write .discinfo discinfo = DiscInfo(self.product.release, self.arch.basearch) discinfo.write(joinpaths(self.outputdir, ".discinfo")) logger.info("backing up installroot") installroot = joinpaths(self.workdir, "installroot") linktree(self.inroot, installroot) logger.info("generating kernel module metadata") rb.generate_module_data() logger.info("cleaning unneeded files") rb.cleanup() if verify: logger.info("verifying the installroot") if not rb.verify(): sys.exit(1) else: logger.info("Skipping verify") if self.debug: rb.writepkgsizes(joinpaths(logdir, "final-pkgsizes.txt")) logger.info("creating the runtime image") runtime = "images/install.img" compression = self.conf.get("compression", "type") compressargs = self.conf.get("compression", "args").split() # pylint: disable=no-member if self.conf.getboolean("compression", "bcj"): if self.arch.bcj: compressargs += ["-Xbcj", self.arch.bcj] else: logger.info("no BCJ filter for arch %s", self.arch.basearch) rb.create_runtime(joinpaths(installroot, runtime), compression=compression, compressargs=compressargs, size=size) rb.finished() logger.info("preparing to build output tree and boot images") treebuilder = TreeBuilder(product=self.product, arch=self.arch, inroot=installroot, outroot=self.outputdir, runtime=runtime, isolabel=isolabel, domacboot=domacboot, doupgrade=doupgrade, templatedir=self.templatedir, add_templates=add_arch_templates, add_template_vars=add_arch_template_vars, workdir=self.workdir) logger.info("rebuilding initramfs images") dracut_args = [ "--xz", "--install", "/.buildstamp", "--no-early-microcode", "--add", "fips" ] anaconda_args = dracut_args + [ "--add", "anaconda pollcdrom qemu qemu-net" ] # ppc64 cannot boot an initrd > 32MiB so remove some drivers if self.arch.basearch in ("ppc64", "ppc64le"): dracut_args.extend(["--omit-drivers", REMOVE_PPC64_DRIVERS]) # Only omit dracut modules from the initrd so that they're kept for # upgrade.img anaconda_args.extend(["--omit", REMOVE_PPC64_MODULES]) treebuilder.rebuild_initrds(add_args=anaconda_args) logger.info("populating output tree and building boot images") treebuilder.build() # write .treeinfo file and we're done treeinfo = TreeInfo(self.product.name, self.product.version, self.product.variant, self.arch.basearch) for section, data in treebuilder.treeinfo_data.items(): treeinfo.add_section(section, data) treeinfo.write(joinpaths(self.outputdir, ".treeinfo")) # cleanup if remove_temp: remove(self.workdir)