def comment_test(self): with tempfile.NamedTemporaryFile(mode="wt") as testconfig: testconfig.write(self.TEST_CONFIG) testconfig.flush() config = SimpleConfigFile(testconfig.name) config.read() self.assertEqual(config.get("ESSID"), "Example Network #1") self.assertEqual(config.get("ESSID2"), "Network #2") self.assertEqual(config.get("COMMENT"), "Save this string") self.assertEqual(str(config), self.TEST_CONFIG)
def comment_test(self): with tempfile.NamedTemporaryFile() as testconfig: testconfig.write(self.TEST_CONFIG) testconfig.flush() config = SimpleConfigFile(testconfig.name) config.read() self.assertEqual(config.get("ESSID"), "Example Network #1") self.assertEqual(config.get("ESSID2"), "Network #2") self.assertEqual(config.get("COMMENT"), "Save this string") self.assertEqual(str(config), self.TEST_CONFIG)
def set_and_get_test(self): """Setting and getting values""" scf = SimpleConfigFile() scf.set(('key1', 'value1')) self.assertEqual(scf.get('key1'), 'value1') scf.set(('KEY2', 'value2')) self.assertEqual(scf.get('key2'), 'value2') scf.set(('KEY3', 'value3')) self.assertEqual(scf.get('KEY3'), 'value3') scf.set(('key4', 'value4')) self.assertEqual(scf.get('KEY4'), 'value4')
def test_set_and_get(self): """Setting and getting values""" scf = SimpleConfigFile() scf.set(('key1', 'value1')) assert scf.get('key1') == 'value1' scf.set(('KEY2', 'value2')) assert scf.get('key2') == 'value2' scf.set(('KEY3', 'value3')) assert scf.get('KEY3') == 'value3' scf.set(('key4', 'value4')) assert scf.get('KEY4') == 'value4'
def test_comment(self): with tempfile.NamedTemporaryFile(mode="wt") as testconfig: testconfig.write(self.TEST_CONFIG) testconfig.flush() config = SimpleConfigFile(testconfig.name) config.read() assert config.get("ESSID") == "Example Network #1" assert config.get("ESSID2") == "Network #2" assert config.get("COMMENT") == "Save this string" assert str(config) == self.TEST_CONFIG
def no_append_test(self): with tempfile.NamedTemporaryFile(mode="wt") as testconfig: testconfig.write(self.TEST_CONFIG) testconfig.flush() keys = [("BOOT", "BOOT=sometimes"), ("NEWKEY", "NEWKEY=froboz")] simple_replace(testconfig.name, keys, add=False) config = SimpleConfigFile(testconfig.name) config.read() self.assertEqual(config.get("BOOT"), "sometimes") self.assertEqual(config.get("NEWKEY"), "")
def test_remove_key(self): with tempfile.NamedTemporaryFile(mode="wt") as testconfig: testconfig.write(self.TEST_CONFIG) testconfig.flush() scf = SimpleConfigFile() scf.read(testconfig.name) assert scf.get("BOOT") == "always" scf.unset("BOOT") scf.write(testconfig.name) testconfig.flush() scf.reset() scf.read(testconfig.name) assert scf.get("BOOT") == ""
def remove_key_test(self): with tempfile.NamedTemporaryFile(mode="wt") as testconfig: testconfig.write(self.TEST_CONFIG) testconfig.flush() scf = SimpleConfigFile() scf.read(testconfig.name) self.assertEqual(scf.get("BOOT"), "always") scf.unset("BOOT") scf.write(testconfig.name) testconfig.flush() scf.reset() scf.read(testconfig.name) self.assertEqual(scf.get("BOOT"), "")
def remove_key_test(self): with tempfile.NamedTemporaryFile() as testconfig: testconfig.write(self.TEST_CONFIG) testconfig.flush() scf = SimpleConfigFile() scf.read(testconfig.name) self.assertEqual(scf.get("BOOT"), "always") scf.unset("BOOT") scf.write(testconfig.name) testconfig.flush() scf.reset() scf.read(testconfig.name) self.assertEqual(scf.get("BOOT"), "")
def execute(self, storage, ksdata, instClass, users, payload): """ Execute the addon :param storage: Blivet storage object :param ksdata: Kickstart data object :param instClass: Anaconda installclass object :param users: Anaconda users object :param payload: object managing packages and environment groups for the installation """ if not self.enabled: return log.info("Executing docker addon") # This gets called after installation, before initramfs regeneration and kickstart %post scripts. execWithRedirect("mount", ["-o", "bind", getSysroot()+"/var/lib/docker", "/var/lib/docker"]) execWithRedirect("mount", ["-o", "bind", getSysroot()+"/etc/docker", "/etc/docker"]) # Start up the docker daemon log.debug("Starting docker daemon") docker_cmd = ["docker", "daemon"] if ksdata.selinux.selinux: docker_cmd += ["--selinux-enabled"] # Add storage specific arguments to the command docker_cmd += self.storage.docker_cmd(storage, ksdata, instClass, users) docker_cmd += ["--ip-forward=false", "--iptables=false"] docker_cmd += self.extra_args docker_proc = startProgram(docker_cmd, stdout=open("/tmp/docker-daemon.log", "w"), reset_lang=True) log.debug("Running docker commands") script = AnacondaKSScript(self.content, inChroot=False, logfile="/tmp/docker-addon.log") script.run("/") # Kill the docker process log.debug("Shutting down docker daemon") docker_proc.kill() log.debug("Writing docker configs") self.storage.write_configs(storage, ksdata, instClass, users) # Rewrite the OPTIONS entry with the extra args and/or storage specific changes try: docker_cfg = SimpleConfigFile(getSysroot()+"/etc/sysconfig/docker") docker_cfg.read() options = self.storage.options(docker_cfg.get("OPTIONS")) if self.save_args: log.info("Adding extra args to docker OPTIONS") options += " " + " ".join(self.extra_args) docker_cfg.set(("OPTIONS", options)) docker_cfg.write() except IOError as e: log.error("Error updating OPTIONS in /etc/sysconfig/docker: %s", e) # Copy the log files to the system dstdir = "/var/log/anaconda/" os.makedirs(dstdir, exist_ok=True) for l in ["docker-daemon.log", "docker-addon.log"]: shutil.copy2("/tmp/"+l, dstdir+l)
def read_test(self): with tempfile.NamedTemporaryFile(mode="wt") as testconfig: scf = SimpleConfigFile() open(testconfig.name, 'w').write('KEY1="value1"\n') testconfig.flush() scf.read(testconfig.name) self.assertEqual(scf.get('key1'), 'value1')
def read_test(self): with tempfile.NamedTemporaryFile() as testconfig: scf = SimpleConfigFile() open(testconfig.name, 'w').write('KEY1="value1"\n') testconfig.flush() scf.read(testconfig.name) self.assertEqual(scf.get('key1'), 'value1')
def test_read(self): with tempfile.NamedTemporaryFile(mode="wt") as testconfig: scf = SimpleConfigFile() open(testconfig.name, 'w').write('KEY1="value1"\n') testconfig.flush() scf.read(testconfig.name) assert scf.get('key1') == 'value1'
def execute(self, storage, ksdata, instClass, users): """ Execute the addon :param storage: Blivet storage object :param ksdata: Kickstart data object :param instClass: Anaconda installclass object :param users: Anaconda users object """ log.info("Executing docker addon") # This gets called after installation, before initramfs regeneration and kickstart %post scripts. execWithRedirect("mount", ["-o", "bind", getSysroot()+"/var/lib/docker", "/var/lib/docker"]) execWithRedirect("mount", ["-o", "bind", getSysroot()+"/etc/docker", "/etc/docker"]) # Start up the docker daemon log.debug("Starting docker daemon") dm_fs = "dm.fs=%s" % self.fstype pool_name = "dm.thinpooldev=/dev/mapper/%s-docker--pool" % self.vgname docker_cmd = ["docker", "daemon"] if ksdata.selinux.selinux: docker_cmd += ["--selinux-enabled"] docker_cmd += ["--storage-driver", "devicemapper", "--storage-opt", dm_fs, "--storage-opt", pool_name, "--ip-forward=false", "--iptables=false"] docker_cmd += self.extra_args docker_proc = startProgram(docker_cmd, stdout=open("/tmp/docker-daemon.log", "w"), reset_lang=True) log.debug("Running docker commands") script = AnacondaKSScript(self.content, inChroot=False, logfile="/tmp/docker-addon.log") script.run("/") # Kill the docker process log.debug("Shutting down docker daemon") docker_proc.kill() log.debug("Writing docker configs") with open(getSysroot()+"/etc/sysconfig/docker-storage", "w") as fp: fp.write('DOCKER_STORAGE_OPTIONS="--storage-driver devicemapper ' '--storage-opt %s --storage-opt %s"\n' % (dm_fs, pool_name)) with open(getSysroot()+"/etc/sysconfig/docker-storage-setup", "a") as fp: fp.write("VG=%s\n" % self.vgname) # Rewrite the OPTIONS entry with the extra args, if requested. if self.extra_args and self.save_args: try: docker_cfg = SimpleConfigFile(getSysroot()+"/etc/sysconfig/docker") docker_cfg.read() options = docker_cfg.get("OPTIONS")+" " + " ".join(self.extra_args) docker_cfg.set(("OPTIONS", options)) docker_cfg.write() except IOError as e: log.error("Error updating OPTIONS in /etc/sysconfig/docker: %s", e) # Copy the log files to the system dstdir = "/var/log/anaconda/" os.makedirs(dstdir, exist_ok=True) for l in ["docker-daemon.log", "docker-addon.log"]: shutil.copy2("/tmp/"+l, dstdir+l)
def remove_key_test(self): from pyanaconda.simpleconfig import SimpleConfigFile scf = SimpleConfigFile() scf.read(self.PATH) scf.unset("BOOT") scf.write("/tmp/file") scf.reset() scf.read("/tmp/file") self.assertEqual(scf.get("BOOT"), "")
def replace_test(self): with tempfile.NamedTemporaryFile(mode="wt") as testconfig: testconfig.write(self.TEST_CONFIG) testconfig.flush() keys = [("BOOT", "BOOT=never")] simple_replace(testconfig.name, keys) config = SimpleConfigFile(testconfig.name) config.read() self.assertEqual(config.get("BOOT"), "never")
def append_test(self): with tempfile.NamedTemporaryFile(mode="wt") as testconfig: testconfig.write(self.TEST_CONFIG) testconfig.flush() keys = [("NEWKEY", "NEWKEY=froboz")] simple_replace(testconfig.name, keys) config = SimpleConfigFile(testconfig.name) config.read() self.assertEqual(config.get("NEWKEY"), "froboz")
def unset_test(self): scf = SimpleConfigFile() scf.set(('key1', 'value1')) scf.unset(('key1')) self.assertEqual(scf.get('key1'), '')
def test_unset(self): scf = SimpleConfigFile() scf.set(('key1', 'value1')) scf.unset(('key1')) assert scf.get('key1') == ''
def start_build(cfg, dnflock, gitlock, branch, recipe_name, compose_type, test_mode=0): """ Start the build :param cfg: Configuration object :type cfg: ComposerConfig :param dnflock: Lock and YumBase for depsolving :type dnflock: YumLock :param recipe: The recipe to build :type recipe: str :param compose_type: The type of output to create from the recipe :type compose_type: str :returns: Unique ID for the build that can be used to track its status :rtype: str """ share_dir = cfg.get("composer", "share_dir") lib_dir = cfg.get("composer", "lib_dir") # Make sure compose_type is valid if compose_type not in compose_types(share_dir): raise RuntimeError("Invalid compose type (%s), must be one of %s" % (compose_type, compose_types(share_dir))) with gitlock.lock: (commit_id, recipe) = read_recipe_and_id(gitlock.repo, branch, recipe_name) # Combine modules and packages and depsolve the list # TODO include the version/glob in the depsolving module_nver = recipe.module_nver package_nver = recipe.package_nver projects = sorted(set(module_nver + package_nver), key=lambda p: p[0].lower()) deps = [] try: with dnflock.lock: (installed_size, deps) = projects_depsolve_with_size(dnflock.dbo, projects, recipe.group_names, with_core=False) except ProjectsError as e: log.error("start_build depsolve: %s", str(e)) raise RuntimeError("Problem depsolving %s: %s" % (recipe["name"], str(e))) # Read the kickstart template for this type ks_template_path = joinpaths(share_dir, "composer", compose_type) + ".ks" ks_template = open(ks_template_path, "r").read() # How much space will the packages in the default template take? ks_version = makeVersion() ks = KickstartParser(ks_version, errorsAreFatal=False, missingIncludeIsFatal=False) ks.readKickstartFromString(ks_template + "\n%end\n") pkgs = [(name, "*") for name in ks.handler.packages.packageList] grps = [grp.name for grp in ks.handler.packages.groupList] try: with dnflock.lock: (template_size, _) = projects_depsolve_with_size( dnflock.dbo, pkgs, grps, with_core=not ks.handler.packages.nocore) except ProjectsError as e: log.error("start_build depsolve: %s", str(e)) raise RuntimeError("Problem depsolving %s: %s" % (recipe["name"], str(e))) log.debug("installed_size = %d, template_size=%d", installed_size, template_size) # Minimum LMC disk size is 1GiB, and anaconda bumps the estimated size up by 10% (which doesn't always work). # XXX BUT Anaconda has a bug, it won't execute a kickstart on a disk smaller than 3000 MB # XXX There is an upstream patch pending, but until then, use that as the minimum installed_size = max(3e9, int((installed_size + template_size))) * 1.2 log.debug("/ partition size = %d", installed_size) # Create the results directory build_id = str(uuid4()) results_dir = joinpaths(lib_dir, "results", build_id) os.makedirs(results_dir) # Write the recipe commit hash commit_path = joinpaths(results_dir, "COMMIT") with open(commit_path, "w") as f: f.write(commit_id) # Write the original recipe recipe_path = joinpaths(results_dir, "blueprint.toml") with open(recipe_path, "w") as f: f.write(recipe.toml()) # Write the frozen recipe frozen_recipe = recipe.freeze(deps) recipe_path = joinpaths(results_dir, "frozen.toml") with open(recipe_path, "w") as f: f.write(frozen_recipe.toml()) # Write out the dependencies to the results dir deps_path = joinpaths(results_dir, "deps.toml") with open(deps_path, "w") as f: f.write(toml.dumps({"packages": deps})) # Save a copy of the original kickstart shutil.copy(ks_template_path, results_dir) with dnflock.lock: repos = list(dnflock.dbo.repos.iter_enabled()) if not repos: raise RuntimeError("No enabled repos, canceling build.") # Create the final kickstart with repos and package list ks_path = joinpaths(results_dir, "final-kickstart.ks") with open(ks_path, "w") as f: ks_url = repo_to_ks(repos[0], "url") log.debug("url = %s", ks_url) f.write('url %s\n' % ks_url) for idx, r in enumerate(repos[1:]): ks_repo = repo_to_ks(r, "baseurl") log.debug("repo composer-%s = %s", idx, ks_repo) f.write('repo --name="composer-%s" %s\n' % (idx, ks_repo)) # Setup the disk for booting # TODO Add GPT and UEFI boot support f.write('clearpart --all --initlabel\n') # Write the root partition and it's size in MB (rounded up) f.write('part / --fstype="ext4" --size=%d\n' % ceil(installed_size / 1024**2)) f.write(ks_template) for d in deps: f.write(dep_nevra(d) + "\n") f.write("%end\n") add_customizations(f, recipe) # Setup the config to pass to novirt_install log_dir = joinpaths(results_dir, "logs/") cfg_args = compose_args(compose_type) # Get the title, project, and release version from the host if not os.path.exists("/etc/os-release"): log.error( "/etc/os-release is missing, cannot determine product or release version" ) os_release = SimpleConfigFile("/etc/os-release") os_release.read() log.debug("os_release = %s", os_release) cfg_args["title"] = os_release.get("PRETTY_NAME") cfg_args["project"] = os_release.get("NAME") cfg_args["releasever"] = os_release.get("VERSION_ID") cfg_args["volid"] = "" cfg_args.update({ "compression": "xz", "compress_args": [], "ks": [ks_path], "logfile": log_dir, "timeout": 60, # 60 minute timeout }) with open(joinpaths(results_dir, "config.toml"), "w") as f: f.write(toml.dumps(cfg_args)) # Set the initial status open(joinpaths(results_dir, "STATUS"), "w").write("WAITING") # Set the test mode, if requested if test_mode > 0: open(joinpaths(results_dir, "TEST"), "w").write("%s" % test_mode) write_timestamp(results_dir, TS_CREATED) log.info("Adding %s (%s %s) to compose queue", build_id, recipe["name"], compose_type) os.symlink(results_dir, joinpaths(lib_dir, "queue/new/", build_id)) return build_id