def findkernels(root="/", kdir="boot"): # To find possible flavors, awk '/BuildKernel/ { print $4 }' kernel.spec flavors = ('debug', 'PAE', 'PAEdebug', 'smp', 'xen', 'lpae') kre = re.compile(r"vmlinuz-(?P<version>.+?\.(?P<arch>[a-z0-9_]+)" r"(.(?P<flavor>{0}))?)$".format("|".join(flavors))) kernels = [] bootfiles = os.listdir(joinpaths(root, kdir)) for f in bootfiles: match = kre.match(f) if match: kernel = DataHolder(path=joinpaths(kdir, f)) kernel.update(match.groupdict()) # sets version, arch, flavor kernels.append(kernel) # look for associated initrd/initramfs/etc. for kernel in kernels: for f in bootfiles: if f.endswith('-'+kernel.version+'.img'): imgtype, _rest = f.split('-',1) # special backwards-compat case if imgtype == 'initramfs': imgtype = 'initrd' kernel[imgtype] = DataHolder(path=joinpaths(kdir, f)) logger.debug("kernels=%s", kernels) return kernels
def generate_module_data(self): root = self.vars.root moddir = joinpaths(root, "lib/modules/") for kver in os.listdir(moddir): ksyms = joinpaths(root, "boot/System.map-%s" % kver) logger.info("doing depmod and module-info for %s", kver) runcmd(["depmod", "-a", "-F", ksyms, "-b", root, kver]) generate_module_info(moddir+kver, outfile=moddir+"module-info")
def postinstall(self): '''Do some post-install setup work with runtime-postinstall.tmpl''' # copy configdir into runtime root beforehand configdir = joinpaths(self._runner.templatedir,"config_files") configdir_path = "tmp/config_files" fullpath = joinpaths(self.vars.root, configdir_path) if os.path.exists(fullpath): remove(fullpath) copytree(configdir, fullpath) self._runner.run("runtime-postinstall.tmpl", configdir=configdir_path)
def create_runtime(self, outfile="/var/tmp/squashfs.img", compression="xz", compressargs=None, size=2): # make live rootfs image - must be named "LiveOS/rootfs.img" for dracut compressargs = compressargs or [] workdir = joinpaths(os.path.dirname(outfile), "runtime-workdir") os.makedirs(joinpaths(workdir, "LiveOS")) imgutils.mkrootfsimg(self.vars.root, joinpaths(workdir, "LiveOS/rootfs.img"), "Anaconda", size=size) # squash the live rootfs and clean up workdir imgutils.mksquashfs(workdir, outfile, compression, compressargs) remove(workdir)
def hardlink(self, src, dest): ''' hardlink SRC DEST Create a hardlink at DEST which is linked to SRC. ''' if isdir(self._out(dest)): dest = joinpaths(dest, basename(src)) os.link(self._out(src), self._out(dest))
def writepkgsizes(self, pkgsizefile): '''debugging data: write a big list of pkg sizes''' fobj = open(pkgsizefile, "w") getsize = lambda f: os.lstat(f).st_size if os.path.exists(f) else 0 q = self.dbo.sack.query() for p in sorted(q.installed()): pkgsize = sum(getsize(joinpaths(self.vars.root,f)) for f in p.files) fobj.write("{0.name}.{0.arch}: {1}\n".format(p, pkgsize))
def rebuild_initrds(self, add_args=None, backup="", prefix=""): '''Rebuild all the initrds in the tree. If backup is specified, each initrd will be renamed with backup as a suffix before rebuilding. If backup is empty, the existing initrd files will be overwritten. If suffix is specified, the existing initrd is untouched and a new image is built with the filename "${prefix}-${kernel.version}.img" If the initrd doesn't exist its name will be created based on the name of the kernel. ''' add_args = add_args or [] dracut = ["dracut", "--nomdadmconf", "--nolvmconf"] + add_args if not backup: dracut.append("--force") if not self.kernels: raise Exception("No kernels found, cannot rebuild_initrds") # Hush some dracut warnings. TODO: bind-mount proc in place? open(joinpaths(self.vars.inroot,"/proc/modules"),"w") for kernel in self.kernels: if prefix: idir = os.path.dirname(kernel.path) outfile = joinpaths(idir, prefix+'-'+kernel.version+'.img') elif hasattr(kernel, "initrd"): # If there is an existing initrd, use that outfile = kernel.initrd.path else: # Construct an initrd from the kernel name outfile = kernel.path.replace("vmlinuz-", "initrd-") + ".img" logger.info("rebuilding %s", outfile) if backup: initrd = joinpaths(self.vars.inroot, outfile) if os.path.exists(initrd): os.rename(initrd, initrd + backup) cmd = dracut + [outfile, kernel.version] runcmd(cmd, root=self.vars.inroot) # ppc64 cannot boot images > 32MiB, check size and warn if self.vars.arch.basearch in ("ppc64", "ppc64le") and os.path.exists(outfile): st = os.stat(outfile) if st.st_size > 32 * 1024 * 1024: logging.warning("ppc64 initrd %s is > 32MiB", outfile) os.unlink(joinpaths(self.vars.inroot,"/proc/modules"))
def rglob(pathname, root="/", fatal=False): seen = set() rootlen = len(root)+1 for f in glob.iglob(joinpaths(root, pathname)): if f not in seen: seen.add(f) yield f[rootlen:] # remove the root to produce relative path if fatal and not seen: raise IOError("nothing matching %s in %s" % (pathname, root))
def find_templates(templatedir="/usr/share/lorax"): """ Find the templates to use. :param str templatedir: Top directory to search for templates :returns: Path to templates :rtype: str If there is a templates.d directory under templatedir the lowest numbered directory entry is returned. eg. /usr/share/lorax/templates.d/99-generic/ """ if os.path.isdir(joinpaths(templatedir, "templates.d")): try: templatedir = sorted(glob(joinpaths(templatedir, "templates.d", "*")))[0] except IndexError: pass return templatedir
def writepkglists(self, pkglistdir): '''debugging data: write out lists of package contents''' if not os.path.isdir(pkglistdir): os.makedirs(pkglistdir) q = self.dbo.sack.query() for pkgobj in q.installed(): with open(joinpaths(pkglistdir, pkgobj.name), "w") as fobj: for fname in pkgobj.files: fobj.write("{0}\n".format(fname))
def dracut_hooks_path(self): """ Return the path to the lorax dracut hooks scripts Use the configured share dir if it is setup, otherwise default to /usr/share/lorax/dracut_hooks """ if self.templatedir: return joinpaths(self.templatedir, "dracut_hooks") else: return "/usr/share/lorax/dracut_hooks"
def copy_dracut_hooks(self, hooks): """ Copy the hook scripts in hooks into the installroot's /tmp/ and return a list of commands to pass to dracut when creating the initramfs hooks is a list of tuples with the name of the hook script and the target dracut hook directory (eg. [("99anaconda-copy-ks.sh", "/lib/dracut/hooks/pre-pivot")]) """ dracut_commands = [] for hook_script, dracut_path in hooks: src = joinpaths(self.dracut_hooks_path, hook_script) if not os.path.exists(src): logger.error("Missing lorax dracut hook script %s", (src)) continue dst = joinpaths(self.vars.inroot, "/tmp/", hook_script) copy2(src, dst) dracut_commands += ["--include", joinpaths("/tmp/", hook_script), dracut_path] return dracut_commands
def create_ext4_runtime(self, outfile="/var/tmp/squashfs.img", compression="xz", compressargs=None, size=2): """Create a squashfs compressed ext4 runtime""" # make live rootfs image - must be named "LiveOS/rootfs.img" for dracut compressargs = compressargs or [] workdir = joinpaths(os.path.dirname(outfile), "runtime-workdir") os.makedirs(joinpaths(workdir, "LiveOS")) # Catch problems with the rootfs being too small and clearly log them try: imgutils.mkrootfsimg(self.vars.root, joinpaths(workdir, "LiveOS/rootfs.img"), "Anaconda", size=size) except CalledProcessError as e: if e.stdout and "No space left on device" in e.stdout: logger.error("The rootfs ran out of space with size=%d", size) raise # squash the live rootfs and clean up workdir rc = imgutils.mksquashfs(workdir, outfile, compression, compressargs) remove(workdir) return rc
def make_runtime(opts, mount_dir, work_dir, size=None): """ Make the squashfs image from a directory :param opts: options passed to livemedia-creator :type opts: argparse options :param str mount_dir: Directory tree to compress :param str work_dir: Output compressed image to work_dir+images/install.img :param int size: Size of disk image, in GiB :returns: rc of squashfs creation :rtype: int """ kernel_arch = get_arch(mount_dir) # Fake dnf object fake_dbo = FakeDNF(conf=DataHolder(installroot=mount_dir)) # Fake arch with only basearch set arch = ArchData(kernel_arch) # TODO: Need to get release info from someplace... product = DataHolder(name=opts.project, version=opts.releasever, release="", variant="", bugurl="", isfinal=False) rb = RuntimeBuilder(product, arch, fake_dbo) compression, compressargs = squashfs_args(opts) if opts.squashfs_only: log.info("Creating a squashfs only runtime") return rb.create_squashfs_runtime(joinpaths(work_dir, RUNTIME), size=size, compression=compression, compressargs=compressargs) else: log.info("Creating a squashfs+ext4 runtime") return rb.create_ext4_runtime(joinpaths(work_dir, RUNTIME), size=size, compression=compression, compressargs=compressargs)
def compose_detail(results_dir): """Return details about the build. :param results_dir: The directory containing the metadata and results for the build :type results_dir: str :returns: A dictionary with details about the compose :rtype: dict :raises: IOError if it cannot read the directory, STATUS, or blueprint file. The following details are included in the dict: * id - The uuid of the comoposition * queue_status - The final status of the composition (FINISHED or FAILED) * timestamp - The time of the last status change * compose_type - The type of output generated (tar, iso, etc.) * blueprint - Blueprint name * version - Blueprint version * image_size - Size of the image, if finished. 0 otherwise. """ build_id = os.path.basename(os.path.abspath(results_dir)) status = open(joinpaths(results_dir, "STATUS")).read().strip() mtime = os.stat(joinpaths(results_dir, "STATUS")).st_mtime blueprint = recipe_from_file(joinpaths(results_dir, "blueprint.toml")) compose_type = get_compose_type(results_dir) image_path = get_image_name(results_dir)[1] if status == "FINISHED" and os.path.exists(image_path): image_size = os.stat(image_path).st_size else: image_size = 0 return { "id": build_id, "queue_status": status, "timestamp": mtime, "compose_type": compose_type, "blueprint": blueprint["name"], "version": blueprint["version"], "image_size": image_size }
def make_livecd(opts, mount_dir, work_dir): """ Take the content from the disk image and make a livecd out of it :param opts: options passed to livemedia-creator :type opts: argparse options :param str mount_dir: Directory tree to compress :param str work_dir: Output compressed image to work_dir+images/install.img This uses wwood's squashfs live initramfs method: * put the real / into LiveOS/rootfs.img * make a squashfs of the LiveOS/rootfs.img tree * This is loaded by dracut when the cmdline is passed to the kernel: root=live:CDLABEL=<volid> rd.live.image """ kernel_arch = get_arch(mount_dir) arch = ArchData(kernel_arch) # TODO: Need to get release info from someplace... product = DataHolder(name=opts.project, version=opts.releasever, release="", variant="", bugurl="", isfinal=False) # Link /images to work_dir/images to make the templates happy if os.path.islink(joinpaths(mount_dir, "images")): os.unlink(joinpaths(mount_dir, "images")) rc = execWithRedirect("/bin/ln", ["-s", joinpaths(work_dir, "images"), joinpaths(mount_dir, "images")]) if rc: raise RuntimeError("Failed to symlink images from mount_dir to work_dir") # The templates expect the config files to be in /tmp/config_files # I think these should be release specific, not from lorax, but for now configdir = joinpaths(opts.lorax_templates,"live/config_files/") configdir_path = "tmp/config_files" fullpath = joinpaths(mount_dir, configdir_path) if os.path.exists(fullpath): remove(fullpath) copytree(configdir, fullpath) isolabel = opts.volid or "{0.name}-{0.version}-{1.basearch}".format(product, arch) if len(isolabel) > 32: isolabel = isolabel[:32] log.warning("Truncating isolabel to 32 chars: %s", isolabel) tb = TreeBuilder(product=product, arch=arch, domacboot=opts.domacboot, inroot=mount_dir, outroot=work_dir, runtime=RUNTIME, isolabel=isolabel, templatedir=joinpaths(opts.lorax_templates,"live/"), extra_boot_args=opts.extra_boot_args) log.info("Rebuilding initrds") log.info("dracut args = %s", dracut_args(opts)) tb.rebuild_initrds(add_args=dracut_args(opts)) log.info("Building boot.iso") tb.build() return work_dir
def uuid_tar(cfg, uuid, metadata=False, image=False, logs=False): """Return a tar of the build data :param cfg: Configuration settings :type cfg: ComposerConfig :param uuid: The UUID of the build :type uuid: str :param metadata: Set to true to include all the metadata needed to reproduce the build :type metadata: bool :param image: Set to true to include the output image :type image: bool :param logs: Set to true to include the logs from the build :type logs: bool :returns: A stream of bytes from tar :rtype: A generator :raises: RuntimeError if there was a problem (eg. missing config file) This yields an uncompressed tar's data to the caller. It includes the selected data to the caller by returning the Popen stdout from the tar process. """ uuid_dir = joinpaths(cfg.get("composer", "lib_dir"), "results", uuid) if not os.path.exists(uuid_dir): raise RuntimeError("%s is not a valid build_id" % uuid) # Load the compose configuration cfg_path = joinpaths(uuid_dir, "config.toml") if not os.path.exists(cfg_path): raise RuntimeError("Missing config.toml for %s" % uuid) cfg_dict = toml.loads(open(cfg_path, "r").read()) image_name = cfg_dict["image_name"] def include_file(f): if f.endswith("/logs"): return logs if f.endswith(image_name): return image return metadata filenames = [os.path.basename(f) for f in glob(joinpaths(uuid_dir, "*")) if include_file(f)] tar = Popen(["tar", "-C", uuid_dir, "-cf-"] + filenames, stdout=PIPE) return tar.stdout
def partition_mount_test(self): """Test PartitionMount context manager (requires loop)""" with tempfile.NamedTemporaryFile( prefix="lorax.test.disk.") as disk_img: self.assertTrue(mkfakediskimg(disk_img.name)) # Make sure it can mount the / with /etc/passwd with PartitionMount(disk_img.name) as img_mount: self.assertTrue(img_mount is not None) self.assertTrue(os.path.isdir(img_mount.mount_dir)) self.assertTrue( os.path.exists( joinpaths(img_mount.mount_dir, "/etc/passwd"))) # Make sure submount works with PartitionMount(disk_img.name, submount="/a-sub-mount/") as img_mount: self.assertTrue(img_mount is not None) self.assertTrue(os.path.isdir(img_mount.mount_dir)) self.assertTrue( os.path.exists( joinpaths(img_mount.mount_dir, "/etc/passwd"))) # Make sure it can mount the /boot partition with a custom mount_ok function def mount_ok(mount_dir): kernels = glob.glob(joinpaths(mount_dir, "vmlinuz-*")) return len(kernels) > 0 with PartitionMount(disk_img.name, mount_ok=mount_ok) as img_mount: self.assertTrue(img_mount is not None) self.assertTrue(os.path.isdir(img_mount.mount_dir)) self.assertFalse( os.path.exists( joinpaths(img_mount.mount_dir, "/etc/passwd"))) self.assertTrue( os.path.exists( joinpaths(img_mount.mount_dir, "vmlinuz-4.18.13-200.fc28.x86_64"))) self.assertTrue( os.path.exists( joinpaths(img_mount.mount_dir, "initramfs-4.18.13-200.fc28.x86_64.img")))
def generate_module_info(moddir, outfile=None): def module_desc(mod): output = runcmd_output(["modinfo", "-F", "description", mod]) return output.strip() def read_module_set(name): return set(l.strip() for l in open(joinpaths(moddir,name)) if ".ko" in l) modsets = {'scsi':read_module_set("modules.block"), 'eth':read_module_set("modules.networking")} modinfo = list() for root, _dirs, files in os.walk(moddir): for modtype, modset in modsets.items(): for mod in modset.intersection(files): # modules in this dir (name, _ext) = os.path.splitext(mod) # foo.ko -> (foo, .ko) desc = module_desc(joinpaths(root,mod)) or "%s driver" % name modinfo.append(dict(name=name, type=modtype, desc=desc)) out = open(outfile or joinpaths(moddir,"module-info"), "w") out.write("Version 0\n") for mod in sorted(modinfo, key=lambda m: m.get('name')): out.write('{name}\n\t{type}\n\t"{desc:.65}"\n'.format(**mod))
def get_image_name(uuid_dir): """Return the filename and full path of the build's image file :param uuid: The UUID of the build :type uuid: str :returns: The image filename and full path :rtype: tuple of strings :raises: RuntimeError if there was a problem (eg. invalid uuid, missing config file) """ uuid = os.path.basename(os.path.abspath(uuid_dir)) if not os.path.exists(uuid_dir): raise RuntimeError("%s is not a valid build_id" % uuid) # Load the compose configuration cfg_path = joinpaths(uuid_dir, "config.toml") if not os.path.exists(cfg_path): raise RuntimeError("Missing config.toml for %s" % uuid) cfg_dict = toml.loads(open(cfg_path, "r").read()) image_name = cfg_dict["image_name"] return (image_name, joinpaths(uuid_dir, image_name))
def api_docs(path=None): # Find the html docs try: # This assumes it is running from the source tree docs_path = os.path.abspath( joinpaths(os.path.dirname(__file__), "../../../docs/html")) except IndexError: docs_path = glob("/usr/share/doc/lorax-*/html/")[0] if not path: path = "index.html" return send_from_directory(docs_path, path)
def test_make_runtime_squashfs_ext4(self): """Test making a runtime squashfs+ext4 only image""" with tempfile.TemporaryDirectory(prefix="lorax.test.") as work_dir: with tempfile.TemporaryDirectory(prefix="lorax.test.root.") as mount_dir: # Make a fake kernel and initrd mkFakeBoot(mount_dir) opts = DataHolder(project="Fedora", releasever="devel", compression="xz", compress_args=[], arch="x86_64", squashfs_only=False) make_runtime(opts, mount_dir, work_dir) # Make sure it made an install.img self.assertTrue(os.path.exists(joinpaths(work_dir, "images/install.img"))) # Make sure it looks like a squashfs filesystem file_details = get_file_magic(joinpaths(work_dir, "images/install.img")) self.assertTrue("Squashfs" in file_details) # Make sure there is a rootfs.img inside the squashfs cmd = ["unsquashfs", "-n", "-l", joinpaths(work_dir, "images/install.img")] results = runcmd_output(cmd) self.assertTrue("rootfs.img" in results)
def test_get_profile_path(self): """Make sure that _get_profile_path strips path elements from the input""" path = _get_profile_path(self.config["upload"], "aws", "staging-settings", exists=False) self.assertEqual( path, os.path.abspath( joinpaths(self.config["upload"]["settings_dir"], "aws/staging-settings.toml"))) path = _get_profile_path(self.config["upload"], "../../../../foo/bar/aws", "/not/my/path/staging-settings", exists=False) self.assertEqual( path, os.path.abspath( joinpaths(self.config["upload"]["settings_dir"], "aws/staging-settings.toml")))
def monitor(cfg): """Monitor the queue for new compose requests :param cfg: Configuration settings :type cfg: DataHolder :returns: Does not return The queue has 2 subdirectories, new and run. When a compose is ready to be run a symlink to the uniquely named results directory should be placed in ./queue/new/ When the it is ready to be run (it is checked every 30 seconds or after a previous compose is finished) the symlink will be moved into ./queue/run/ and a STATUS file will be created in the results directory. STATUS can contain one of: WAITING, RUNNING, FINISHED, FAILED If the system is restarted while a compose is running it will move any old symlinks from ./queue/run/ to ./queue/new/ and rerun them. """ def queue_sort(uuid): """Sort the queue entries by their mtime, not their names""" return os.stat(joinpaths(cfg.composer_dir, "queue/new", uuid)).st_mtime check_queues(cfg) while True: uuids = sorted(os.listdir(joinpaths(cfg.composer_dir, "queue/new")), key=queue_sort) # Pick the oldest and move it into ./run/ if not uuids: # No composes left to process, sleep for a bit time.sleep(5) else: src = joinpaths(cfg.composer_dir, "queue/new", uuids[0]) dst = joinpaths(cfg.composer_dir, "queue/run", uuids[0]) try: os.rename(src, dst) except OSError: # The symlink may vanish if uuid_cancel() has been called continue log.info("Starting new compose: %s", dst) open(joinpaths(dst, "STATUS"), "w").write("RUNNING\n") try: make_compose(cfg, os.path.realpath(dst)) log.info("Finished building %s, results are in %s", dst, os.path.realpath(dst)) open(joinpaths(dst, "STATUS"), "w").write("FINISHED\n") write_timestamp(dst, TS_FINISHED) except Exception: import traceback log.error("traceback: %s", traceback.format_exc()) # TODO - Write the error message to an ERROR-LOG file to include with the status # log.error("Error running compose: %s", e) open(joinpaths(dst, "STATUS"), "w").write("FAILED\n") write_timestamp(dst, TS_FINISHED) os.unlink(dst)
def rebuild_initrds(self, add_args=None, backup="", prefix=""): '''Rebuild all the initrds in the tree. If backup is specified, each initrd will be renamed with backup as a suffix before rebuilding. If backup is empty, the existing initrd files will be overwritten. If suffix is specified, the existing initrd is untouched and a new image is built with the filename "${prefix}-${kernel.version}.img" If the initrd doesn't exist its name will be created based on the name of the kernel. ''' add_args = add_args or [] dracut = ["dracut", "--nomdadmconf", "--nolvmconf"] + add_args if not backup: dracut.append("--force") if not self.kernels: raise Exception("No kernels found, cannot rebuild_initrds") # Hush some dracut warnings. TODO: bind-mount proc in place? open(joinpaths(self.vars.inroot, "/proc/modules"), "w") for kernel in self.kernels: if prefix: idir = os.path.dirname(kernel.path) outfile = joinpaths(idir, prefix + '-' + kernel.version + '.img') elif hasattr(kernel, "initrd"): # If there is an existing initrd, use that outfile = kernel.initrd.path else: # Construct an initrd from the kernel name outfile = kernel.path.replace("vmlinuz-", "initrd-") + ".img" logger.info("rebuilding %s", outfile) if backup: initrd = joinpaths(self.vars.inroot, outfile) if os.path.exists(initrd): os.rename(initrd, initrd + backup) cmd = dracut + [outfile, kernel.version] runcmd(cmd, root=self.vars.inroot) os.unlink(joinpaths(self.vars.inroot, "/proc/modules"))
def make_squashfs_test(self): """Test making a squashfs image""" with tempfile.TemporaryDirectory(prefix="lorax.test.") as work_dir: with tempfile.NamedTemporaryFile( prefix="lorax.test.disk.") as disk_img: # Make a small ext4 disk image mksparse(disk_img.name, 42 * 1024**2) runcmd([ "mkfs.ext4", "-L", "Anaconda", "-b", "4096", "-m", "0", disk_img.name ]) opts = DataHolder(compression="xz", arch="x86_64") make_squashfs(opts, disk_img.name, work_dir) # Make sure it made an install.img self.assertTrue( os.path.exists(joinpaths(work_dir, "images/install.img"))) # Make sure it looks like a squashfs filesystem file_details = get_file_magic( joinpaths(work_dir, "images/install.img")) self.assertTrue("Squashfs" in file_details)
def test_pxe_config(self): """Test creation of a PXE config file""" with tempfile.TemporaryDirectory(prefix="lorax.test.") as work_dir: live_image_name = "live-rootfs.squashfs.img" add_pxe_args = ["ostree=/mnt/sysimage/"] lorax_templates = find_templates("./share/") template = joinpaths(lorax_templates, "pxe-live/pxe-config.tmpl") # Make a fake kernel and initrd with open(joinpaths(work_dir, "vmlinuz-4.18.13-200.fc28.x86_64"), "w") as f: f.write("I AM A FAKE KERNEL") with open(joinpaths(work_dir, "initramfs-4.18.13-200.fc28.x86_64.img"), "w") as f: f.write("I AM A FAKE INITRD") # Create the PXE_CONFIG in work_dir create_pxe_config(template, work_dir, live_image_name, add_pxe_args) with open(joinpaths(work_dir, "PXE_CONFIG")) as f: pxe_config = f.read() print(pxe_config) self.assertTrue("vmlinuz-4.18.13-200.fc28.x86_64" in pxe_config) self.assertTrue("initramfs-4.18.13-200.fc28.x86_64.img" in pxe_config) self.assertTrue("/live-rootfs.squashfs.img ostree=/mnt/sysimage/" in pxe_config)
def uuid_image(cfg, uuid): """Return the filename and full path of the build's image file :param cfg: Configuration settings :type cfg: ComposerConfig :param uuid: The UUID of the build :type uuid: str :returns: The image filename and full path :rtype: tuple of strings :raises: RuntimeError if there was a problem (eg. invalid uuid, missing config file) """ uuid_dir = joinpaths(cfg.get("composer", "lib_dir"), "results", uuid) return get_image_name(uuid_dir)
def workspace_dir(repo, branch): """Create the workspace's path from a Repository and branch :param repo: Open repository :type repo: Git.Repository :param branch: Branch name :type branch: str :returns: The path to the branch's workspace directory :rtype: str """ repo_path = repo.get_location().get_path() return joinpaths(repo_path, "workspace", branch)
def move_compose_results(cfg, results_dir): """Move the final image to the results_dir and cleanup the unneeded compose files :param cfg: Build configuration :type cfg: DataHolder :param results_dir: Directory to put the results into :type results_dir: str """ if cfg["make_tar"]: shutil.move(joinpaths(cfg["result_dir"], cfg["image_name"]), results_dir) elif cfg["make_iso"]: # Output from live iso is always a boot.iso under images/, move and rename it shutil.move(joinpaths(cfg["result_dir"], cfg["iso_name"]), joinpaths(results_dir, cfg["image_name"])) elif cfg["make_disk"] or cfg["make_fsimage"]: shutil.move(joinpaths(cfg["result_dir"], cfg["image_name"]), joinpaths(results_dir, cfg["image_name"])) # Cleanup the compose directory, but only if it looks like a compose directory if os.path.basename(cfg["result_dir"]) == "compose": shutil.rmtree(cfg["result_dir"]) else: log.error("Incorrect compose directory, not cleaning up")
def make_appliance_test(self): """Test creating the appliance description XML file""" lorax_templates = find_templates("./share/") appliance_template = joinpaths(lorax_templates, "appliance/libvirt.tmpl") self.assertTrue(os.path.exists(appliance_template)) # A fake disk image with tempfile.NamedTemporaryFile( prefix="lorax.test.disk.") as disk_img: open(disk_img.name, "wb").write(b"THIS IS A FAKE DISK IMAGE FILE") with tempfile.NamedTemporaryFile( prefix="lorax.test.appliance.") as output_xml: make_appliance(disk_img.name, "test-appliance", appliance_template, output_xml.name, ["eth0", "eth1"], ram=4096, vcpus=8, arch="x86_64", title="Lorax Test", project="Fedora", releasever="30") print(open(output_xml.name).read()) # Parse the XML and check for known fields tree = ET.parse(output_xml.name) image = tree.getroot() self.assertEqual(image.find("name").text, "test-appliance") boot = image.find("./domain/boot") self.assertEqual(boot.get("type"), "hvm") self.assertEqual(boot.find("./guest/arch").text, "x86_64") self.assertEqual(boot.find("./os/loader").get("dev"), "hd") self.assertTrue( boot.find("drive").get("disk").startswith( "lorax.test.disk.")) self.assertEqual(boot.find("drive").get("target"), "hda") devices = image.find("./domain/devices") self.assertEqual(devices.find("vcpu").text, "8") self.assertEqual(devices.find("memory").text, "4096") self.assertTrue(len(devices.findall("interface")), 2) storage = image.find("storage") self.assertTrue( storage.find("disk").get("file").startswith( "lorax.test.disk.")) self.assertEqual( storage.find("./disk/checksum").get("type"), "sha256") self.assertEqual( storage.find("./disk/checksum").text, "90611458b33009998f73e25ccc3766b31a8b548cc6c2d84f78ae0e84d64e10a5" )
def configure(conf_file="/etc/lorax/composer.conf", root_dir="/", test_config=False): """lorax-composer configuration :param conf_file: Path to the config file overriding the default settings :type conf_file: str :param root_dir: Directory to prepend to paths, defaults to / :type root_dir: str :param test_config: Set to True to skip reading conf_file :type test_config: bool :returns: Configuration :rtype: ComposerConfig """ conf = ComposerConfig() # set defaults conf.add_section("composer") conf.set("composer", "share_dir", os.path.realpath(joinpaths(root_dir, "/usr/share/lorax/"))) conf.set("composer", "lib_dir", os.path.realpath(joinpaths(root_dir, "/var/lib/lorax/composer/"))) conf.set( "composer", "repo_dir", os.path.realpath( joinpaths(root_dir, "/var/lib/lorax/composer/repos.d/"))) conf.set( "composer", "dnf_conf", os.path.realpath(joinpaths(root_dir, "/var/tmp/composer/dnf.conf"))) conf.set( "composer", "dnf_root", os.path.realpath(joinpaths(root_dir, "/var/tmp/composer/dnf/root/"))) conf.set("composer", "cache_dir", os.path.realpath(joinpaths(root_dir, "/var/tmp/composer/cache/"))) conf.set("composer", "tmp", os.path.realpath(joinpaths(root_dir, "/var/tmp/"))) conf.add_section("users") conf.set("users", "root", "1") # Enable all available repo files by default conf.add_section("repos") conf.set("repos", "use_system_repos", "1") conf.set("repos", "enabled", "*") conf.add_section("dnf") if not test_config: # read the config file if os.path.isfile(conf_file): conf.read(conf_file) return conf
def queue_status(cfg, api=1): """Return details about what is in the queue. :param cfg: Configuration settings :type cfg: ComposerConfig :param api: Select which api version of the dict to return (default 1) :type api: int :returns: A list of the new composes, and a list of the running composes :rtype: dict This returns a dict with 2 lists. "new" is the list of uuids that are waiting to be built, and "run" has the uuids that are being built (currently limited to 1 at a time). """ queue_dir = joinpaths(cfg.get("composer", "lib_dir"), "queue") new_queue = [ os.path.realpath(p) for p in glob(joinpaths(queue_dir, "new/*")) ] run_queue = [ os.path.realpath(p) for p in glob(joinpaths(queue_dir, "run/*")) ] new_details = [] for n in new_queue: try: d = compose_detail(cfg, n, api) except IOError: continue new_details.append(d) run_details = [] for r in run_queue: try: d = compose_detail(cfg, r, api) except IOError: continue run_details.append(d) return {"new": new_details, "run": run_details}
def workspace_filename(repo, branch, recipe_name): """Return the path and filename of the workspace recipe :param repo: Open repository :type repo: Git.Repository :param branch: Branch name :type branch: str :param recipe_name: The name of the recipe :type recipe_name: str :returns: workspace recipe path and filename :rtype: str """ ws_dir = workspace_dir(repo, branch) return joinpaths(ws_dir, recipe_filename(recipe_name))
def get_compose_type(results_dir): """Return the type of composition. :param results_dir: The directory containing the metadata and results for the build :type results_dir: str :returns: The type of compose (eg. 'tar') :rtype: str :raises: RuntimeError if no kickstart template can be found. """ # Should only be 2 kickstarts, the final-kickstart.ks and the template t = [os.path.basename(ks)[:-3] for ks in glob(joinpaths(results_dir, "*.ks")) if "final-kickstart" not in ks] if len(t) != 1: raise RuntimeError("Cannot find ks template for build %s" % os.path.basename(results_dir)) return t[0]
def uuid_delete(cfg, uuid): """Delete all of the results from a compose :param cfg: Configuration settings :type cfg: ComposerConfig :param uuid: The UUID of the build :type uuid: str :returns: True if it was deleted :rtype: bool :raises: This will raise an error if the delete failed """ uuid_dir = joinpaths(cfg.get("composer", "lib_dir"), "results", uuid) if not uuid_dir or len(uuid_dir) < 10: raise RuntimeError("Directory length is too short: %s" % uuid_dir) shutil.rmtree(uuid_dir) return True
def workspace_delete(repo, branch, recipe_name): """Delete the recipe from the workspace :param repo: Open repository :type repo: Git.Repository :param branch: Branch name :type branch: str :param recipe_name: The name of the recipe :type recipe_name: str :returns: None :raises: IO related errors """ ws_dir = workspace_dir(repo, branch) filename = joinpaths(ws_dir, recipe_filename(recipe_name)) if os.path.exists(filename): os.unlink(filename)
def make_queue_dirs(conf, gid): """Make any missing queue directories :param conf: The configuration to use :type conf: ComposerConfig :param gid: Group ID that has access to the queue directories :type gid: int :returns: list of errors :rtype: list of str """ errors = [] lib_dir = conf.get("composer", "lib_dir") for p in ["queue/run", "queue/new", "results"]: p_dir = joinpaths(lib_dir, p) errors.extend(make_owned_dir(p_dir, 0, gid)) return errors
def rebuild_initrds_for_live(opts, sys_root_dir, results_dir): """ Rebuild intrds for pxe live image (root=live:http://) :param opts: options passed to livemedia-creator :type opts: argparse options :param str sys_root_dir: Path to root of the system :param str results_dir: Path of directory for storing results """ # cmdline dracut args override the defaults, but need to be parsed log.info("dracut args = %s", dracut_args(opts)) args = ["--nomdadmconf", "--nolvmconf"] + dracut_args(opts) kdir = "boot" if opts.ostree: kernels_dir = glob.glob(joinpaths(sys_root_dir, "boot/ostree/*")) if kernels_dir: kdir = os.path.relpath(kernels_dir[0], sys_root_dir) kernels = [kernel for kernel in findkernels(sys_root_dir, kdir)] if not kernels: raise Exception("No initrds found, cannot rebuild_initrds") if opts.ostree: # Dracut assumes to have some dirs in disk image # /var/tmp for temp files vartmp_dir = joinpaths(sys_root_dir, "var/tmp") if not os.path.isdir(vartmp_dir): os.mkdir(vartmp_dir) # /root (maybe not fatal) root_dir = joinpaths(sys_root_dir, "var/roothome") if not os.path.isdir(root_dir): os.mkdir(root_dir) # /tmp (maybe not fatal) tmp_dir = joinpaths(sys_root_dir, "sysroot/tmp") if not os.path.isdir(tmp_dir): os.mkdir(tmp_dir) # Write the new initramfs directly to the results directory os.mkdir(joinpaths(sys_root_dir, "results")) with DracutChroot(sys_root_dir, bind=[(results_dir, "/results")]) as dracut: for kernel in kernels: if hasattr(kernel, "initrd"): outfile = os.path.basename(kernel.initrd.path) else: # Construct an initrd from the kernel name outfile = os.path.basename( kernel.path.replace("vmlinuz-", "initrd-") + ".img") log.info("rebuilding %s", outfile) kver = kernel.version dracut.Run(args + ["/results/" + outfile, kver]) shutil.copy2(joinpaths(sys_root_dir, kernel.path), results_dir)
def test_stale_run_symlink(self): """Put a valid symlink in run, make sure it is set to FAILED and removed""" uuid = str(uuid4()) os.makedirs(joinpaths(self.monitor_cfg.composer_dir, "results", uuid)) os.symlink(joinpaths(self.monitor_cfg.composer_dir, "results", uuid), joinpaths(self.monitor_cfg.composer_dir, "queue/run", uuid)) self.assertTrue(os.path.islink(joinpaths(self.monitor_cfg.composer_dir, "queue/run", uuid))) check_queues(self.monitor_cfg) self.assertFalse(os.path.islink(joinpaths(self.monitor_cfg.composer_dir, "queue/run", uuid))) status = open(joinpaths(self.monitor_cfg.composer_dir, "results", uuid, "STATUS")).read().strip() self.assertEqual(status, "FAILED")
def uuid_status(cfg, uuid): """Return the details of a specific UUID compose :param cfg: Configuration settings :type cfg: ComposerConfig :param uuid: The UUID of the build :type uuid: str :returns: Details about the build :rtype: dict or None Returns the same dict as `compose_details()` """ uuid_dir = joinpaths(cfg.get("composer", "lib_dir"), "results", uuid) try: return compose_detail(uuid_dir) except IOError: return None
def _in(self, path): return joinpaths(self.inroot, path)
def _out(self, path): return joinpaths(self.outroot, path)
def implantisomd5(self): for _section, data in self.treeinfo_data.items(): if 'boot.iso' in data: iso = joinpaths(self.vars.outroot, data['boot.iso']) runcmd(["implantisomd5", iso])
def init_file_logging(self, logdir, logname="pylorax.log"): fh = logging.FileHandler(filename=joinpaths(logdir, logname), mode="w") fh.setLevel(logging.DEBUG) logger.addHandler(fh)
def run(self, dbo, product, version, release, variant="", bugurl="", isfinal=False, workdir=None, outputdir=None, buildarch=None, volid=None, domacboot=True, doupgrade=True, remove_temp=False, installpkgs=None, excludepkgs=None, size=2, add_templates=None, add_template_vars=None, add_arch_templates=None, add_arch_template_vars=None, verify=True): assert self._configured installpkgs = installpkgs or [] excludepkgs = excludepkgs or [] if domacboot: try: runcmd(["rpm", "-q", "hfsplus-tools"]) except CalledProcessError: logger.critical("you need to install hfsplus-tools to create mac images") sys.exit(1) # set up work directory self.workdir = workdir or tempfile.mkdtemp(prefix="pylorax.work.") if not os.path.isdir(self.workdir): os.makedirs(self.workdir) # set up log directory logdir = self.conf.get("lorax", "logdir") if not os.path.isdir(logdir): os.makedirs(logdir) self.init_stream_logging() self.init_file_logging(logdir) logger.debug("version is %s", vernum) logger.debug("using work directory %s", self.workdir) logger.debug("using log directory %s", logdir) # set up output directory self.outputdir = outputdir or tempfile.mkdtemp(prefix="pylorax.out.") if not os.path.isdir(self.outputdir): os.makedirs(self.outputdir) logger.debug("using output directory %s", self.outputdir) # do we have root privileges? logger.info("checking for root privileges") if not os.geteuid() == 0: logger.critical("no root privileges") sys.exit(1) # is selinux disabled? # With selinux in enforcing mode the rpcbind package required for # dracut nfs module, which is in turn required by anaconda module, # will not get installed, because it's preinstall scriptlet fails, # resulting in an incomplete initial ramdisk image. # The reason is that the scriptlet runs tools from the shadow-utils # package in chroot, particularly groupadd and useradd to add the # required rpc group and rpc user. This operation fails, because # the selinux context on files in the chroot, that the shadow-utils # tools need to access (/etc/group, /etc/passwd, /etc/shadow etc.), # is wrong and selinux therefore disallows access to these files. logger.info("checking the selinux mode") if selinux.is_selinux_enabled() and selinux.security_getenforce(): logger.critical("selinux must be disabled or in Permissive mode") sys.exit(1) # do we have a proper dnf base object? logger.info("checking dnf base object") if not isinstance(dbo, dnf.Base): logger.critical("no dnf base object") sys.exit(1) self.inroot = dbo.conf.installroot logger.debug("using install root: %s", self.inroot) if not buildarch: buildarch = get_buildarch(dbo) logger.info("setting up build architecture") self.arch = ArchData(buildarch) for attr in ('buildarch', 'basearch', 'libdir'): logger.debug("self.arch.%s = %s", attr, getattr(self.arch,attr)) logger.info("setting up build parameters") self.product = DataHolder(name=product, version=version, release=release, variant=variant, bugurl=bugurl, isfinal=isfinal) logger.debug("product data: %s", self.product) # NOTE: if you change isolabel, you need to change pungi to match, or # the pungi images won't boot. isolabel = volid or "%s-%s-%s" % (self.product.name, self.product.version, self.arch.basearch) if len(isolabel) > 32: logger.fatal("the volume id cannot be longer than 32 characters") sys.exit(1) # NOTE: rb.root = dbo.conf.installroot (== self.inroot) rb = RuntimeBuilder(product=self.product, arch=self.arch, dbo=dbo, templatedir=self.templatedir, installpkgs=installpkgs, excludepkgs=excludepkgs, add_templates=add_templates, add_template_vars=add_template_vars) logger.info("installing runtime packages") rb.install() # write .buildstamp buildstamp = BuildStamp(self.product.name, self.product.version, self.product.bugurl, self.product.isfinal, self.arch.buildarch) buildstamp.write(joinpaths(self.inroot, ".buildstamp")) if self.debug: rb.writepkglists(joinpaths(logdir, "pkglists")) rb.writepkgsizes(joinpaths(logdir, "original-pkgsizes.txt")) logger.info("doing post-install configuration") rb.postinstall() # write .discinfo discinfo = DiscInfo(self.product.release, self.arch.basearch) discinfo.write(joinpaths(self.outputdir, ".discinfo")) logger.info("backing up installroot") installroot = joinpaths(self.workdir, "installroot") linktree(self.inroot, installroot) logger.info("generating kernel module metadata") rb.generate_module_data() logger.info("cleaning unneeded files") rb.cleanup() if verify: logger.info("verifying the installroot") if not rb.verify(): sys.exit(1) else: logger.info("Skipping verify") if self.debug: rb.writepkgsizes(joinpaths(logdir, "final-pkgsizes.txt")) logger.info("creating the runtime image") runtime = "images/install.img" compression = self.conf.get("compression", "type") compressargs = self.conf.get("compression", "args").split() # pylint: disable=no-member if self.conf.getboolean("compression", "bcj"): if self.arch.bcj: compressargs += ["-Xbcj", self.arch.bcj] else: logger.info("no BCJ filter for arch %s", self.arch.basearch) rb.create_runtime(joinpaths(installroot,runtime), compression=compression, compressargs=compressargs, size=size) rb.finished() logger.info("preparing to build output tree and boot images") treebuilder = TreeBuilder(product=self.product, arch=self.arch, inroot=installroot, outroot=self.outputdir, runtime=runtime, isolabel=isolabel, domacboot=domacboot, doupgrade=doupgrade, templatedir=self.templatedir, add_templates=add_arch_templates, add_template_vars=add_arch_template_vars, workdir=self.workdir) logger.info("rebuilding initramfs images") dracut_args = ["--xz", "--install", "/.buildstamp", "--no-early-microcode", "--add", "fips"] anaconda_args = dracut_args + ["--add", "anaconda pollcdrom qemu qemu-net"] # ppc64 cannot boot an initrd > 32MiB so remove some drivers if self.arch.basearch in ("ppc64", "ppc64le"): dracut_args.extend(["--omit-drivers", REMOVE_PPC64_DRIVERS]) # Only omit dracut modules from the initrd so that they're kept for # upgrade.img anaconda_args.extend(["--omit", REMOVE_PPC64_MODULES]) treebuilder.rebuild_initrds(add_args=anaconda_args) logger.info("populating output tree and building boot images") treebuilder.build() # write .treeinfo file and we're done treeinfo = TreeInfo(self.product.name, self.product.version, self.product.variant, self.arch.basearch) for section, data in treebuilder.treeinfo_data.items(): treeinfo.add_section(section, data) treeinfo.write(joinpaths(self.outputdir, ".treeinfo")) # cleanup if remove_temp: remove(self.workdir)
def read_module_set(name): return set(l.strip() for l in open(joinpaths(moddir,name)) if ".ko" in l)