def write_autoinstall_template(self, file_path: str, data: str) -> bool: """ Write an automatic OS installation template :param file_path: automatic installation template relative file path :param data: automatic installation template content """ file_path = self.validate_autoinstall_template_file_path( file_path, for_item=False, new_autoinstall=True) file_full_path = "%s/%s" % (self.templates_base_dir, file_path) try: utils.mkdir(os.path.dirname(file_full_path)) except: utils.die( self.logger, "unable to create directory for automatic OS installation template at %s" % file_path) fileh = open(file_full_path, "w+") fileh.write(data) fileh.close() return True
def copy_single_distro_files(self, d, dirtree, symlink_ok): distros = os.path.join(dirtree, "images") distro_dir = os.path.join(distros, d.name) utils.mkdir(distro_dir) kernel = utils.find_kernel(d.kernel) # full path initrd = utils.find_initrd(d.initrd) # full path if kernel is None: raise CX("kernel not found: %(file)s, distro: %(distro)s" % {"file": d.kernel, "distro": d.name}) if initrd is None: raise CX("initrd not found: %(file)s, distro: %(distro)s" % {"file": d.initrd, "distro": d.name}) # Koan manages remote kernel itself, but for consistent PXE # configurations the synchronization is still necessary if not utils.file_is_remote(kernel): b_kernel = os.path.basename(kernel) dst1 = os.path.join(distro_dir, b_kernel) utils.linkfile(kernel, dst1, symlink_ok=symlink_ok, api=self.api, logger=self.logger) else: b_kernel = os.path.basename(kernel) dst1 = os.path.join(distro_dir, b_kernel) utils.copyremotefile(kernel, dst1, api=None, logger=self.logger) if not utils.file_is_remote(initrd): b_initrd = os.path.basename(initrd) dst2 = os.path.join(distro_dir, b_initrd) utils.linkfile(initrd, dst2, symlink_ok=symlink_ok, api=self.api, logger=self.logger) else: b_initrd = os.path.basename(initrd) dst1 = os.path.join(distro_dir, b_initrd) utils.copyremotefile(initrd, dst1, api=None, logger=self.logger)
def write_autoinstall_snippet(self, file_path, data): file_path = self.validate_autoinstall_snippet_file_path(file_path, new_snippet=True) file_full_path = "%s/%s" % (self.snippets_base_dir, file_path) try: utils.mkdir(os.path.dirname(file_full_path)) except: utils.die(self.logger, "unable to create directory for automatic OS installation snippet at %s" % file_path) fileh = open(file_full_path, "w+") fileh.write(data) fileh.close()
def write_autoinstall_snippet(self, file_path, data): file_path = self.validate_autoinstall_snippet_file_path( file_path, new_snippet=True) file_full_path = "%s/%s" % (self.snippets_base_dir, file_path) try: utils.mkdir(os.path.dirname(file_full_path)) except: utils.die( self.logger, "unable to create directory for automatic OS installation snippet at %s" % file_path) fileh = open(file_full_path, "w+") fileh.write(data) fileh.close()
def write_boot_files_distro(self, distro): # Collapse the object down to a rendered datastructure. # The second argument set to false means we don't collapse dicts/arrays into a flat string. target = utils.blender(self.api, False, distro) # Create metadata for the templar function. # Right now, just using local_img_path, but adding more Cobbler variables here would probably be good. metadata = {} metadata["local_img_path"] = os.path.join(self.bootloc, "images", distro.name) metadata["web_img_path"] = os.path.join(self.webdir, "distro_mirror", distro.name) # Create the templar instance. Used to template the target directory templater = templar.Templar(self.collection_mgr) # Loop through the dict of boot files, executing a cp for each one self.logger.info("processing boot_files for distro: %s" % distro.name) for boot_file in list(target["boot_files"].keys()): rendered_target_file = templater.render(boot_file, metadata, None) rendered_source_file = templater.render( target["boot_files"][boot_file], metadata, None) try: for file in glob.glob(rendered_source_file): if file == rendered_source_file: # this wasn't really a glob, so just copy it as is filedst = rendered_target_file else: # this was a glob, so figure out what the destination file path/name should be tgt_path, tgt_file = os.path.split(file) rnd_path, rnd_file = os.path.split( rendered_target_file) filedst = os.path.join(rnd_path, tgt_file) if not os.path.isdir(rnd_path): utils.mkdir(rnd_path) if not os.path.isfile(filedst): shutil.copyfile(file, filedst) self.collection_mgr.api.log("copied file %s to %s for %s" % (file, filedst, distro.name)) except: self.logger.error("failed to copy file %s to %s for %s", file, filedst, distro.name) return 0
def test_mkdir(): # TODO: Check how already existing folder is handled. # Arrange testfolder = "/dev/shm/testfoldercreation" testmode = 0o600 try: shutil.rmtree(testfolder) except OSError: pass # Pre assert to check that this actually does something assert not os.path.exists(testfolder) # Act utils.mkdir(testfolder, testmode) # Assert assert os.path.exists(testfolder)
def write_autoinstall_snippet(self, file_path, data): """ Writes a snippet with the given content to the relative path under the snippet root directory. :param file_path: The relative path under the configured snippet base dir. :type file_path: str :param data: The snippet code. :type data: str """ file_path = self.validate_autoinstall_snippet_file_path(file_path, new_snippet=True) file_full_path = "%s/%s" % (self.snippets_base_dir, file_path) try: utils.mkdir(os.path.dirname(file_full_path)) except: utils.die(self.logger, "unable to create directory for automatic OS installation snippet at %s" % file_path) fileh = open(file_full_path, "w+") fileh.write(data) fileh.close()
def write_autoinstall_template(self, file_path, data): """ Write an automatic OS installation template @param str file_path automatic installation template relative file path @param str data automatic installation template content """ file_path = self.validate_autoinstall_template_file_path(file_path, for_item=False, new_autoinstall=True) file_full_path = "%s/%s" % (self.templates_base_dir, file_path) try: utils.mkdir(os.path.dirname(file_full_path)) except: utils.die(self.logger, "unable to create directory for automatic OS installation template at %s" % file_path) fileh = open(file_full_path, "w+") fileh.write(data) fileh.close() return True
def make_tftpboot(self): """ Make directories for tftpboot images """ if not os.path.exists(self.pxelinux_dir): utils.mkdir(self.pxelinux_dir, logger=self.logger) if not os.path.exists(self.grub_dir): utils.mkdir(self.grub_dir, logger=self.logger) grub_images_link = os.path.join(self.grub_dir, "images") if not os.path.exists(grub_images_link): os.symlink("../images", grub_images_link) if not os.path.exists(self.images_dir): utils.mkdir(self.images_dir, logger=self.logger) if not os.path.exists(self.rendered_dir): utils.mkdir(self.rendered_dir, logger=self.logger) if not os.path.exists(self.yaboot_bin_dir): utils.mkdir(self.yaboot_bin_dir, logger=self.logger) if not os.path.exists(self.yaboot_cfg_dir): utils.mkdir(self.yaboot_cfg_dir, logger=self.logger)
def import_tree(self, mirror_url, mirror_name, network_root=None, autoinstall_file=None, rsync_flags=None, arch=None, breed=None, os_version=None, logger=None): """ Automatically import a directory tree full of distribution files. mirror_url can be a string that represents a path, a user@host syntax for SSH, or an rsync:// address. If mirror_url is a filesystem path and mirroring is not desired, set network_root to something like "nfs://path/to/mirror_url/root" """ self.log("import_tree", [mirror_url, mirror_name, network_root, autoinstall_file, rsync_flags]) # both --path and --name are required arguments if mirror_url is None: self.log("import failed. no --path specified") return False if mirror_name is None: self.log("import failed. no --name specified") return False path = os.path.normpath("%s/distro_mirror/%s" % (self.settings().webdir, mirror_name)) if arch is not None: arch = arch.lower() if arch == "x86": # be consistent arch = "i386" if path.split("-")[-1] != arch: path += ("-%s" % arch) # we need to mirror (copy) the files self.log("importing from a network location, running rsync to fetch the files first") utils.mkdir(path) # prevent rsync from creating the directory name twice # if we are copying via rsync if not mirror_url.endswith("/"): mirror_url = "%s/" % mirror_url if mirror_url.startswith("http://") or mirror_url.startswith("ftp://") or mirror_url.startswith("nfs://"): # http mirrors are kind of primative. rsync is better. # that's why this isn't documented in the manpage and we don't support them. # TODO: how about adding recursive FTP as an option? self.log("unsupported protocol") return False else: # good, we're going to use rsync.. # we don't use SSH for public mirrors and local files. # presence of user@host syntax means use SSH spacer = "" if not mirror_url.startswith("rsync://") and not mirror_url.startswith("/"): spacer = ' -e "ssh" ' rsync_cmd = RSYNC_CMD if rsync_flags: rsync_cmd += " " + rsync_flags # if --available-as was specified, limit the files we # pull down via rsync to just those that are critical # to detecting what the distro is if network_root is not None: rsync_cmd += " --include-from=/etc/cobbler/import_rsync_whitelist" # kick off the rsync now utils.run_this(rsync_cmd, (spacer, mirror_url, path), self.logger) if network_root is not None: # in addition to mirroring, we're going to assume the path is available # over http, ftp, and nfs, perhaps on an external filer. scanning still requires # --mirror is a filesystem path, but --available-as marks the network path. # this allows users to point the path at a directory containing just the network # boot files while the rest of the distro files are available somewhere else. # find the filesystem part of the path, after the server bits, as each distro # URL needs to be calculated relative to this. if not network_root.endswith("/"): network_root += "/" valid_roots = ["nfs://", "ftp://", "http://"] for valid_root in valid_roots: if network_root.startswith(valid_root): break else: self.log("Network root given to --available-as must be nfs://, ftp://, or http://") return False if network_root.startswith("nfs://"): try: (a, b, rest) = network_root.split(":", 3) except: self.log("Network root given to --available-as is missing a colon, please see the manpage example.") return False # importer_modules = self.get_modules_in_category("manage/import") # for importer_module in importer_modules: # manager = importer_module.get_import_manager(self._collection_mgr,logger) # try: # (found,pkgdir) = manager.check_for_signature(path,breed) # if found: # self.log("running import manager: %s" % manager.what()) # return manager.run(pkgdir,mirror_name,path,network_root,autoinstall_file,rsync_flags,arch,breed,os_version) # except: # self.log("an exception occured while running the import manager") # self.log("error was: %s" % sys.exc_info()[1]) # continue # self.log("No import managers found a valid signature at the location specified") # # FIXME: since we failed, we should probably remove the # # path tree we created above so we don't leave cruft around # return False import_module = self.get_module_by_name("manage_import_signatures").get_import_manager(self._collection_mgr, logger) import_module.run(path, mirror_name, network_root, autoinstall_file, arch, breed, os_version)
def create_local_file(self, dest_path, repo, output=True): """ Creates Yum config files for use by reposync Two uses: (A) output=True, Create local files that can be used with yum on provisioned clients to make use of this mirror. (B) output=False, Create a temporary file for yum to feed into yum for mirroring """ # the output case will generate repo configuration files which are usable # for the installed systems. They need to be made compatible with --server-override # which means they are actually templates, which need to be rendered by a cobbler-sync # on per profile/system basis. if output: fname = os.path.join(dest_path, "config.repo") else: fname = os.path.join(dest_path, "%s.repo" % repo.name) self.logger.debug("creating: %s" % fname) if not os.path.exists(dest_path): utils.mkdir(dest_path) config_file = open(fname, "w+") if not output: config_file.write("[main]\nreposdir=/dev/null\n") config_file.write("[%s]\n" % repo.name) config_file.write("name=%s\n" % repo.name) optenabled = False optgpgcheck = False if output: if repo.mirror_locally: line = "baseurl=http://${http_server}/cobbler/repo_mirror/%s\n" % ( repo.name) else: mstr = repo.mirror if mstr.startswith("/"): mstr = "file://%s" % mstr line = "baseurl=%s\n" % mstr config_file.write(line) # user may have options specific to certain yum plugins # add them to the file for x in repo.yumopts: config_file.write("%s=%s\n" % (x, repo.yumopts[x])) if x == "enabled": optenabled = True if x == "gpgcheck": optgpgcheck = True else: mstr = repo.mirror if mstr.startswith("/"): mstr = "file://%s" % mstr line = "baseurl=%s\n" % mstr if self.settings.http_port not in (80, '80'): http_server = "%s:%s" % (self.settings.server, self.settings.http_port) else: http_server = self.settings.server line = line.replace("@@server@@", http_server) config_file.write(line) config_proxy = None if repo.proxy == '<<inherit>>': config_proxy = self.settings.proxy_url_ext elif repo.proxy != '' and repo.proxy != '<<None>>': config_proxy = repo.proxy if config_proxy is not None: config_file.write("proxy=%s\n" % config_proxy) if 'exclude' in list(repo.yumopts.keys()): self.logger.debug("excluding: %s" % repo.yumopts['exclude']) config_file.write("exclude=%s\n" % repo.yumopts['exclude']) if not optenabled: config_file.write("enabled=1\n") config_file.write("priority=%s\n" % repo.priority) # FIXME: potentially might want a way to turn this on/off on a per-repo basis if not optgpgcheck: config_file.write("gpgcheck=0\n") # user may have options specific to certain yum plugins # add them to the file for x in repo.yumopts: config_file.write("%s=%s\n" % (x, repo.yumopts[x])) if x == "enabled": optenabled = True if x == "gpgcheck": optgpgcheck = True config_file.close() return fname
def import_tree(self, mirror_url, mirror_name, network_root=None, autoinstall_file=None, rsync_flags=None, arch=None, breed=None, os_version=None, logger=None): """ Automatically import a directory tree full of distribution files. mirror_url can be a string that represents a path, a user@host syntax for SSH, or an rsync:// address. If mirror_url is a filesystem path and mirroring is not desired, set network_root to something like "nfs://path/to/mirror_url/root" """ self.log("import_tree", [mirror_url, mirror_name, network_root, autoinstall_file, rsync_flags]) # both --path and --name are required arguments if mirror_url is None or not mirror_url: self.log("import failed. no --path specified") return False if mirror_name is None or not mirror_name: self.log("import failed. no --name specified") return False path = os.path.normpath("%s/distro_mirror/%s" % (self.settings().webdir, mirror_name)) if arch is not None: arch = arch.lower() if arch == "x86": # be consistent arch = "i386" if path.split("-")[-1] != arch: path += ("-%s" % arch) # we need to mirror (copy) the files self.log("importing from a network location, running rsync to fetch the files first") utils.mkdir(path) # prevent rsync from creating the directory name twice # if we are copying via rsync if not mirror_url.endswith("/"): mirror_url = "%s/" % mirror_url if mirror_url.startswith("http://") or mirror_url.startswith("https://") or mirror_url.startswith("ftp://") or mirror_url.startswith("nfs://"): # http mirrors are kind of primative. rsync is better. # that's why this isn't documented in the manpage and we don't support them. # TODO: how about adding recursive FTP as an option? self.log("unsupported protocol") return False else: # good, we're going to use rsync.. # we don't use SSH for public mirrors and local files. # presence of user@host syntax means use SSH spacer = "" if not mirror_url.startswith("rsync://") and not mirror_url.startswith("/"): spacer = ' -e "ssh" ' rsync_cmd = RSYNC_CMD if rsync_flags: rsync_cmd += " " + rsync_flags # if --available-as was specified, limit the files we # pull down via rsync to just those that are critical # to detecting what the distro is if network_root is not None: rsync_cmd += " --include-from=/etc/cobbler/import_rsync_whitelist" # kick off the rsync now utils.run_this(rsync_cmd, (spacer, mirror_url, path), self.logger) if network_root is not None: # in addition to mirroring, we're going to assume the path is available # over http, ftp, and nfs, perhaps on an external filer. scanning still requires # --mirror is a filesystem path, but --available-as marks the network path. # this allows users to point the path at a directory containing just the network # boot files while the rest of the distro files are available somewhere else. # find the filesystem part of the path, after the server bits, as each distro # URL needs to be calculated relative to this. if not network_root.endswith("/"): network_root += "/" valid_roots = ["nfs://", "ftp://", "http://", "https://"] for valid_root in valid_roots: if network_root.startswith(valid_root): break else: self.log("Network root given to --available-as must be nfs://, ftp://, http://, or https://") return False if network_root.startswith("nfs://"): try: (a, b, rest) = network_root.split(":", 3) except: self.log("Network root given to --available-as is missing a colon, please see the manpage example.") return False import_module = self.get_module_by_name("manage_import_signatures").get_import_manager(self._collection_mgr, logger) import_module.run(path, mirror_name, network_root, autoinstall_file, arch, breed, os_version)
def gen_win_files(distro, meta): (kernel_path, kernel_name) = os.path.split(distro.kernel) distro_path = utils.find_distro_path(settings, distro) distro_dir = wim_file_name = os.path.join(settings.tftpboot_location, "images", distro.name) web_dir = os.path.join(settings.webdir, "images", distro.name) is_winpe = "winpe" in meta and meta['winpe'] != "" is_bcd = "bcd" in meta and meta['bcd'] != "" if "kernel" in meta: kernel_name = meta["kernel"] kernel_name = os.path.basename(kernel_name) is_wimboot = "wimboot" in kernel_name if is_wimboot: distro_path = os.path.join(settings.webdir, "distro_mirror", distro.name) kernel_path = os.path.join(distro_path, "Boot") if "kernel" in meta and "wimboot" not in distro.kernel: tgen.copy_single_distro_file( os.path.join(settings.tftpboot_location, kernel_name), distro_dir, False) tgen.copy_single_distro_file( os.path.join(distro_dir, kernel_name), web_dir, True) if "post_install_script" in meta: post_install_dir = distro_path if distro.os_version not in ("XP", "2003"): post_install_dir = os.path.join(post_install_dir, "sources") post_install_dir = os.path.join(post_install_dir, "$OEM$", "$1") if not os.path.exists(post_install_dir): utils.mkdir(post_install_dir) data = templ.render(post_tmpl_data, meta, None) post_install_script = os.path.join(post_install_dir, meta["post_install_script"]) logger.info('Build post install script: ' + post_install_script) with open(post_install_script, "w+") as pi_file: pi_file.write(data) if "answerfile" in meta: data = templ.render(tmpl_data, meta, None) answerfile_name = os.path.join(distro_dir, meta["answerfile"]) logger.info('Build answer file: ' + answerfile_name) with open(answerfile_name, "w+") as answerfile: answerfile.write(data) tgen.copy_single_distro_file(answerfile_name, distro_path, False) tgen.copy_single_distro_file(answerfile_name, web_dir, True) if "kernel" in meta and "bootmgr" in meta: wk_file_name = os.path.join(distro_dir, kernel_name) wl_file_name = os.path.join(distro_dir, meta["bootmgr"]) tl_file_name = os.path.join(kernel_path, "bootmgr.exe") if distro.os_version in ("XP", "2003") and not is_winpe: tl_file_name = os.path.join(kernel_path, "setupldr.exe") if len(meta["bootmgr"]) != 5: logger.error( "The loader name should be EXACTLY 5 character") return 1 pat1 = re.compile(br'NTLDR', re.IGNORECASE) pat2 = re.compile(br'winnt\.sif', re.IGNORECASE) with open(tl_file_name, 'rb') as file: out = data = file.read() if "answerfile" in meta: if len(meta["answerfile"]) != 9: logger.error( "The response file name should be EXACTLY 9 character" ) return 1 out = pat2.sub(bytes(meta["answerfile"], 'utf-8'), data) else: if len(meta["bootmgr"]) != 11: logger.error( "The Boot manager file name should be EXACTLY 11 character" ) return 1 bcd_name = "bcd" if is_bcd: bcd_name = meta["bcd"] if len(bcd_name) != 3: logger.error( "The BCD file name should be EXACTLY 3 character") return 1 if not os.path.isfile(tl_file_name): logger.error("File not found: %s" % tl_file_name) return 1 pat1 = re.compile(br'bootmgr\.exe', re.IGNORECASE) pat2 = re.compile(br'(\\.B.o.o.t.\\.)(B)(.)(C)(.)(D)', re.IGNORECASE) bcd_name = bytes( "\\g<1>" + bcd_name[0] + "\\g<3>" + bcd_name[1] + "\\g<5>" + bcd_name[2], 'utf-8') with open(tl_file_name, 'rb') as file: out = file.read() if not is_wimboot: logger.info('Patching build Loader: %s' % wl_file_name) out = pat2.sub(bcd_name, out) if tl_file_name != wl_file_name: logger.info('Build Loader: %s from %s' % (wl_file_name, tl_file_name)) with open(wl_file_name, 'wb+') as file: file.write(out) tgen.copy_single_distro_file(wl_file_name, web_dir, True) if not is_wimboot: if distro.os_version not in ("XP", "2003") or is_winpe: pe = pefile.PE(wl_file_name, fast_load=True) pe.OPTIONAL_HEADER.CheckSum = pe.generate_checksum() pe.write(filename=wl_file_name) with open(distro.kernel, 'rb') as file: data = file.read() out = pat1.sub(bytes(meta["bootmgr"], 'utf-8'), data) if wk_file_name != distro.kernel: logger.info("Build PXEBoot: %s from %s" % (wk_file_name, distro.kernel)) with open(wk_file_name, 'wb+') as file: file.write(out) tgen.copy_single_distro_file(wk_file_name, web_dir, True) if is_bcd: obcd_file_name = os.path.join(kernel_path, "bcd") bcd_file_name = os.path.join(distro_dir, meta["bcd"]) wim_file_name = 'winpe.wim' if not os.path.isfile(obcd_file_name): logger.error("File not found: %s" % obcd_file_name) return 1 if is_winpe: wim_file_name = meta["winpe"] if is_wimboot: wim_file_name = '\\Boot\\' + wim_file_name sdi_file_name = '\\Boot\\' + 'boot.sdi' else: wim_file_name = os.path.join("/images", distro.name, wim_file_name) sdi_file_name = os.path.join("/images", distro.name, os.path.basename(distro.initrd)) logger.info('Build BCD: %s from %s for %s' % (bcd_file_name, obcd_file_name, wim_file_name)) bcdedit(obcd_file_name, bcd_file_name, wim_file_name, sdi_file_name) tgen.copy_single_distro_file(bcd_file_name, web_dir, True) if is_winpe: ps_file_name = os.path.join(distro_dir, meta["winpe"]) wim_pl_name = os.path.join(kernel_path, "winpe.wim") cmd = ["/usr/bin/cp", "--reflink=auto", wim_pl_name, ps_file_name] utils.subprocess_call(logger, cmd, shell=False) tgen.copy_single_distro_file(ps_file_name, web_dir, True) if os.path.exists(wimupdate): data = templ.render(tmplstart_data, meta, None) pi_file = tempfile.NamedTemporaryFile() pi_file.write(bytes(data, 'utf-8')) pi_file.flush() cmd = [ wimupdate, ps_file_name, "--command=add " + pi_file.name + " /Windows/System32/startnet.cmd" ] utils.subprocess_call(cmd, shell=False) pi_file.close()
def __create_tftpboot_dirs(self): """ Create directories for tftpboot images """ if not os.path.exists(self.pxelinux_dir): utils.mkdir(self.pxelinux_dir) if not os.path.exists(self.grub_dir): utils.mkdir(self.grub_dir) grub_images_link = os.path.join(self.grub_dir, "images") if not os.path.exists(grub_images_link): os.symlink("../images", grub_images_link) if not os.path.exists(self.images_dir): utils.mkdir(self.images_dir) if not os.path.exists(self.rendered_dir): utils.mkdir(self.rendered_dir) if not os.path.exists(self.yaboot_bin_dir): utils.mkdir(self.yaboot_bin_dir) if not os.path.exists(self.yaboot_cfg_dir): utils.mkdir(self.yaboot_cfg_dir) if not os.path.exists(self.ipxe_dir): utils.mkdir(self.ipxe_dir)
def render(self, data_input: Union[TextIO, str], search_table: dict, out_path: Optional[str], template_type="default") -> str: """ Render data_input back into a file. :param data_input: is either a str or a TextIO object. :param search_table: is a dict of metadata keys and values. :param out_path: Optional parameter which (if present), represents the target path to write the result into. :param template_type: May currently be "cheetah" or "jinja2". "default" looks in the settings. :return: The rendered template. """ if not isinstance(data_input, str): raw_data = data_input.read() else: raw_data = data_input lines = raw_data.split('\n') if template_type is None: raise ValueError('"template_type" can\'t be "None"!') if not isinstance(template_type, str): raise TypeError('"template_type" must be of type "str"!') if template_type not in ("default", "jinja2", "cheetah"): return "# ERROR: Unsupported template type selected!" if template_type == "default": if self.settings and self.settings.default_template_type: template_type = self.settings.default_template_type else: template_type = "cheetah" if len(lines) > 0 and lines[0].find("#template=") == 0: # Pull the template type out of the first line and then drop it and rejoin them to pass to the template # language template_type = lines[0].split("=")[1].strip().lower() del lines[0] raw_data = "\n".join(lines) if template_type == "cheetah": data_out = self.render_cheetah(raw_data, search_table) elif template_type == "jinja2": if jinja2_available: data_out = self.render_jinja2(raw_data, search_table) else: return "# ERROR: JINJA2 NOT AVAILABLE. Maybe you need to install python-jinja2?\n" else: return "# ERROR: UNSUPPORTED TEMPLATE TYPE (%s)" % str( template_type) # Now apply some magic post-filtering that is used by "cobbler import" and some other places. Forcing folks to # double escape things would be very unwelcome. hp = search_table.get("http_port", "80") server = search_table.get("server", self.settings.server) if hp not in (80, '80'): repstr = "%s:%s" % (server, hp) else: repstr = server search_table["http_server"] = repstr # string replacements for @@xyz@@ in data_out with prior regex lookups of keys regex = r"@@[\S]*?@@" regex_matches = re.finditer(regex, data_out, re.MULTILINE) matches = set([ match.group() for match_num, match in enumerate(regex_matches, start=1) ]) for match in matches: data_out = data_out.replace(match, search_table[match.strip("@@")]) # remove leading newlines which apparently breaks AutoYAST ? if data_out.startswith("\n"): data_out = data_out.lstrip() # if requested, write the data out to a file if out_path is not None: utils.mkdir(os.path.dirname(out_path)) with open(out_path, "w+") as file_descriptor: file_descriptor.write(data_out) return data_out
def run(self, path: str, name: str, network_root=None, autoinstall_file=None, arch: Optional[str] = None, breed=None, os_version=None): """ This is the main entry point in a manager. It is a required function for import modules. :param path: the directory we are scanning for files :param name: the base name of the distro :param network_root: the remote path (nfs/http/ftp) for the distro files :param autoinstall_file: user-specified response file, which will override the default :param arch: user-specified architecture :param breed: user-specified breed :param os_version: user-specified OS version :raises CX """ self.name = name self.network_root = network_root self.autoinstall_file = autoinstall_file self.arch = arch self.breed = breed self.os_version = os_version self.path = path self.rootdir = path self.pkgdir = path # some fixups for the XMLRPC interface, which does not use "None" if self.arch == "": self.arch = None if self.name == "": self.name = None if self.autoinstall_file == "": self.autoinstall_file = None if self.os_version == "": self.os_version = None if self.network_root == "": self.network_root = None if self.os_version and not self.breed: utils.die( "OS version can only be specified when a specific breed is selected" ) self.signature = self.scan_signatures() if not self.signature: error_msg = "No signature matched in %s" % path self.logger.error(error_msg) raise CX(error_msg) # now walk the filesystem looking for distributions that match certain patterns self.logger.info("Adding distros from path %s:" % self.path) distros_added = [] import_walker(self.path, self.distro_adder, distros_added) if len(distros_added) == 0: if self.breed == "windows": cmd_path = "/usr/bin/wimexport" bootwim_path = os.path.join(self.path, "sources", "boot.wim") dest_path = os.path.join(self.path, "boot") if os.path.exists(cmd_path) and os.path.exists(bootwim_path): winpe_path = os.path.join(dest_path, "winpe.wim") if not os.path.exists(dest_path): utils.mkdir(dest_path) rc = utils.subprocess_call( [cmd_path, bootwim_path, "1", winpe_path, "--boot"], shell=False) if rc == 0: cmd = [ "/usr/bin/wimdir %s 1 | /usr/bin/grep -i '^/Windows/Boot/PXE$'" % winpe_path ] pxe_path = utils.subprocess_get(cmd, shell=True)[0:-1] cmd = [ "/usr/bin/wimdir %s 1 | /usr/bin/grep -i '^/Windows/System32/config/SOFTWARE$'" % winpe_path ] config_path = utils.subprocess_get(cmd, shell=True)[0:-1] cmd_path = "/usr/bin/wimextract" rc = utils.subprocess_call([ cmd_path, bootwim_path, "1", "%s/pxeboot.n12" % pxe_path, "%s/bootmgr.exe" % pxe_path, config_path, "--dest-dir=%s" % dest_path, "--no-acls", "--no-attributes" ], shell=False) if rc == 0: if HAS_HIVEX: software = os.path.join( dest_path, os.path.basename(config_path)) h = hivex.Hivex(software, write=True) root = h.root() node = h.node_get_child(root, "Microsoft") node = h.node_get_child(node, "Windows NT") node = h.node_get_child(node, "CurrentVersion") h.node_set_value( node, { "key": "SystemRoot", "t": REG_SZ, "value": "x:\\Windows\0".encode( encoding="utf-16le") }) node = h.node_get_child(node, "WinPE") # remove the key InstRoot from the registry values = h.node_values(node) new_values = [] for value in values: keyname = h.value_key(value) if keyname == "InstRoot": continue val = h.node_get_value(node, keyname) valtype = h.value_type(val)[0] value2 = h.value_value(val)[1] valobject = { "key": keyname, "t": int(valtype), "value": value2 } new_values.append(valobject) h.node_set_values(node, new_values) h.commit(software) cmd_path = "/usr/bin/wimupdate" rc = utils.subprocess_call([ cmd_path, winpe_path, "--command=add %s %s" % (software, config_path) ], shell=False) os.remove(software) else: self.logger.info( "python3-hivex not found. If you need Automatic Windows " "Installation support, please install.") import_walker(self.path, self.distro_adder, distros_added) if len(distros_added) == 0: self.logger.warning("No distros imported, bailing out") return # find out if we can auto-create any repository records from the install tree if self.network_root is None: self.logger.info("associating repos") # FIXME: this automagic is not possible (yet) without mirroring self.repo_finder(distros_added)
def write_templates(self, obj, write_file=False, path=None): """ A semi-generic function that will take an object with a template_files dict {source:destiation}, and generate a rendered file. The write_file option allows for generating of the rendered output without actually creating any files. The return value is a dict of the destination file names (after variable substitution is done) and the data in the file. """ self.logger.info("Writing template files for %s" % obj.name) results = {} try: templates = obj.template_files except: return results blended = utils.blender(self.api, False, obj) if obj.COLLECTION_TYPE == "distro": if re.search("esxi[56]", obj.os_version) is not None: realbootcfg = open(os.path.join(os.path.dirname(obj.kernel), 'boot.cfg')).read() bootmodules = re.findall(r'modules=(.*)', realbootcfg) for modules in bootmodules: blended['esx_modules'] = modules.replace('/', '') autoinstall_meta = blended.get("autoinstall_meta", {}) try: del blended["autoinstall_meta"] except: pass blended.update(autoinstall_meta) # make available at top level templates = blended.get("template_files", {}) try: del blended["template_files"] except: pass blended.update(templates) # make available at top level (success, templates) = utils.input_string_or_dict(templates) if not success: return results # FIXME: img_path and local_img_path should probably be moved # up into the blender function to ensure they're consistently # available to templates across the board if blended["distro_name"]: blended['img_path'] = os.path.join("/images", blended["distro_name"]) blended['local_img_path'] = os.path.join(self.bootloc, "images", blended["distro_name"]) for template in list(templates.keys()): dest = templates[template] if dest is None: continue # Run the source and destination files through # templar first to allow for variables in the path template = self.templar.render(template, blended, None).strip() dest = os.path.normpath(self.templar.render(dest, blended, None).strip()) # Get the path for the destination output dest_dir = os.path.normpath(os.path.dirname(dest)) # If we're looking for a single template, skip if this ones # destination is not it. if path is not None and path != dest: continue # If we are writing output to a file, we allow files tobe # written into the tftpboot directory, otherwise force all # templated configs into the rendered directory to ensure that # a user granted cobbler privileges via sudo can't overwrite # arbitrary system files (This also makes cleanup easier). if os.path.isabs(dest_dir) and write_file: if dest_dir.find(self.bootloc) != 0: raise CX(" warning: template destination (%s) is outside %s, skipping." % (dest_dir, self.bootloc)) continue elif write_file: dest_dir = os.path.join(self.settings.webdir, "rendered", dest_dir) dest = os.path.join(dest_dir, os.path.basename(dest)) if not os.path.exists(dest_dir): utils.mkdir(dest_dir) # Check for problems if not os.path.exists(template): raise CX("template source %s does not exist" % template) continue elif write_file and not os.path.isdir(dest_dir): raise CX("template destination (%s) is invalid" % dest_dir) continue elif write_file and os.path.exists(dest): raise CX("template destination (%s) already exists" % dest) continue elif write_file and os.path.isdir(dest): raise CX("template destination (%s) is a directory" % dest) continue elif template == "" or dest == "": raise CX("either the template source or destination was blank (unknown variable used?)" % dest) continue template_fh = open(template) template_data = template_fh.read() template_fh.close() buffer = self.templar.render(template_data, blended, None) results[dest] = buffer if write_file: self.logger.info("generating: %s" % dest) fd = open(dest, "w") fd.write(buffer) fd.close() return results