def generate_kickstart_for_profile(self,g): g = self.api.find_profile(name=g) if g is None: return "# profile not found" distro = g.get_conceptual_parent() meta = utils.blender(self.api, False, g) if distro is None: raise CX(_("profile %(profile)s references missing distro %(distro)s") % { "profile" : g.name, "distro" : g.distro }) kickstart_path = utils.find_kickstart(meta["kickstart"]) if kickstart_path is not None and os.path.exists(kickstart_path): # the input is an *actual* file, hence we have to copy it try: meta = utils.blender(self.api, False, g) ksmeta = meta["ks_meta"] del meta["ks_meta"] meta.update(ksmeta) # make available at top level meta["yum_repo_stanza"] = self.generate_repo_stanza(g,True) meta["yum_config_stanza"] = self.generate_config_stanza(g,True) meta["kickstart_done"] = self.generate_kickstart_signal(0, g, None) meta["kickstart_start"] = self.generate_kickstart_signal(1, g, None) meta["kernel_options"] = utils.hash_to_string(meta["kernel_options"]) kfile = open(kickstart_path) data = self.templar.render(kfile, meta, None, g) kfile.close() return data except: utils.log_exc(self.api.logger) raise elif kickstart_path is not None and not os.path.exists(kickstart_path): if kickstart_path.find("http://") == -1 and kickstart_path.find("ftp://") == -1 and kickstart_path.find("nfs:") == -1: return "# Error, cannot find %s" % kickstart_path return "# kickstart is sourced externally, or is missing, and cannot be displayed here: %s" % meta["kickstart"]
def run(api,args,logger): # FIXME: make everything use the logger, no prints, use util.subprocess_call, etc objtype = args[0] # "system" or "profile" name = args[1] # name of system or profile ip = args[2] # ip or "?" if objtype == "system": target = api.find_system(name) else: target = api.find_profile(name) # collapse the object down to a rendered datastructure target = utils.blender(api, False, target) if target == {}: logger.info("unable to locate %s " % name) raise CX("failure looking up target") if target['ks_meta']['vms']: for vm in target['ks_meta']['vms'].split(','): try: arglist = ["/usr/local/bin/createvm",target['ip_address_vmnic1'],vm,target['server']] logger.info("creating virtual guest %s" % vm) rc = utils.subprocess_call(logger, arglist, shell=False) except Exception, reason: logger.error("unable to create %s: %s" % (name,reason)) if rc != 0: raise CX("cobbler trigger failed: %(file)s returns ")
def generate_kickstart(self, profile=None, system=None): obj = system if system is None: obj = profile meta = utils.blender(self.api, False, obj) kickstart_path = utils.find_kickstart(meta["kickstart"]) if not kickstart_path: return "# kickstart is missing or invalid: %s" % meta["kickstart"] ksmeta = meta["ks_meta"] del meta["ks_meta"] meta.update(ksmeta) # make available at top level meta["yum_repo_stanza"] = self.generate_repo_stanza(obj, (system is None)) meta["yum_config_stanza"] = self.generate_config_stanza(obj, (system is None)) meta["kernel_options"] = utils.hash_to_string(meta["kernel_options"]) # meta["config_template_files"] = self.generate_template_files_stanza(g, False) try: raw_data = utils.read_file_contents(kickstart_path, self.api.logger, self.settings.template_remote_kickstarts) if raw_data is None: return "# kickstart is sourced externally: %s" % meta["kickstart"] data = self.templar.render(raw_data, meta, None, obj) return data except FileNotFoundException: self.api.logger.warning("kickstart not found: %s" % meta["kickstart"]) return "# kickstart not found: %s" % meta["kickstart"]
def write_boot_files_distro(self,distro): # collapse the object down to a rendered datastructure # the second argument set to false means we don't collapse # hashes/arrays into a flat string target = utils.blender(self.config.api, False, distro) # Create metadata for the templar function # Right now, just using img_path, but adding more # cobbler variables here would probably be good metadata = {} metadata["img_path"] = os.path.join( utils.tftpboot_location(), "images",distro.name) # Create the templar instance. Used to template the target directory templater = templar.Templar() # Loop through the hash of boot files, # executing a cp for each one for file in target["boot_files"].keys(): file_dst = templater.render(file,metadata,None) try: shutil.copyfile(target["boot_files"][file], file_dst) self.config.api.log("copied file %s to %s for %s" % ( target["boot_files"][file], file_dst, distro.name)) except: self.logger.error("failed to copy file %s to %s for %s" % ( target["boot_files"][file], file_dst, distro.name)) # Continue on to sync what you can return 0
def checkfile(self,obj,is_profile): blended = utils.blender(self.config.api, False, obj) os_version = blended["os_version"] ks = blended["kickstart"] if ks is None or ks == "": print "%s has no kickstart, skipping" % obj.name return True breed = blended["breed"] if breed != "redhat": print "%s has a breed of %s, skipping" % (obj.name, breed) return True server = blended["server"] if not ks.startswith("/"): url = self.kickstart elif is_profile: url = "http://%s/cblr/svc/op/ks/profile/%s" % (server,obj.name) else: url = "http://%s/cblr/svc/op/ks/system/%s" % (server,obj.name) print "----------------------------" print "checking url: %s" % url rc = os.system("/usr/bin/ksvalidator \"%s\"" % url) if rc != 0: return False return True
def generate_autoyast(self, profile=None, system=None, raw_data=None): self.api.logger.info("autoyast XML file found. Checkpoint: profile=%s system=%s" % (profile,system) ) nopxe = "\nwget \"http://%s/cblr/svc/op/nopxe/system/%s\" -O /dev/null" runpost = "\ncurl \"http://%s/cblr/svc/op/trig/mode/post/%s/%s\" > /dev/null" runpre = "\nwget \"http://%s/cblr/svc/op/trig/mode/pre/%s/%s\" -O /dev/null" what = "profile" blend_this = profile if system: what = "system" blend_this = system blended = utils.blender(self.api, False, blend_this) srv = blended["http_server"] document = xml.dom.minidom.parseString(raw_data) # do we already have the #raw comment in the XML? (addComment = 0 means, don't add #raw comment) addComment = 1 for node in document.childNodes[1].childNodes: if node.nodeType == node.ELEMENT_NODE and node.tagName == "cobbler": addComment = 0 break # add some cobbler information to the XML file # maybe that should be configureable if addComment == 1: #startComment = document.createComment("\ncobbler_system_name=$system_name\ncobbler_server=$server\n#raw\n") #endComment = document.createComment("\n#end raw\n") cobblerElement = document.createElement("cobbler") cobblerElementSystem = xml.dom.minidom.Element("system_name") cobblerElementProfile = xml.dom.minidom.Element("profile_name") if( system is not None ): cobblerTextSystem = document.createTextNode(system.name) cobblerElementSystem.appendChild( cobblerTextSystem ) if( profile is not None ): cobblerTextProfile = document.createTextNode(profile.name) cobblerElementProfile.appendChild( cobblerTextProfile ) cobblerElementServer = document.createElement("server") cobblerTextServer = document.createTextNode(blended["http_server"]) cobblerElementServer.appendChild( cobblerTextServer ) cobblerElement.appendChild( cobblerElementServer ) cobblerElement.appendChild( cobblerElementSystem ) cobblerElement.appendChild( cobblerElementProfile ) document.childNodes[1].insertBefore( cobblerElement, document.childNodes[1].childNodes[1]) name = profile.name if system is not None: name = system.name if str(self.settings.pxe_just_once).upper() in [ "1", "Y", "YES", "TRUE" ]: self.addAutoYaSTScript( document, "chroot-scripts", nopxe % (srv, name) ) if self.settings.run_install_triggers: # notify cobblerd when we start/finished the installation self.addAutoYaSTScript( document, "pre-scripts", runpre % ( srv, what, name ) ) self.addAutoYaSTScript( document, "init-scripts", runpost % ( srv, what, name ) ) return document.toxml()
def generate_kickstart(self, profile=None, system=None): obj = system if system is None: obj = profile meta = utils.blender(self.api, False, obj) kickstart_path = utils.find_kickstart(meta["kickstart"]) if not kickstart_path: return "# kickstart is missing or invalid: %s" % meta["kickstart"] ksmeta = meta["ks_meta"] del meta["ks_meta"] meta.update(ksmeta) # make available at top level meta["yum_repo_stanza"] = self.generate_repo_stanza( obj, (system is None)) meta["yum_config_stanza"] = self.generate_config_stanza( obj, (system is None)) meta["kernel_options"] = utils.hash_to_string(meta["kernel_options"]) # meta["config_template_files"] = self.generate_template_files_stanza(g, False) try: raw_data = utils.read_file_contents( kickstart_path, self.api.logger, self.settings.template_remote_kickstarts) if raw_data is None: return "# kickstart is sourced externally: %s" % meta[ "kickstart"] data = self.templar.render(raw_data, meta, None, obj) return data except FileNotFoundException: self.api.logger.warning("kickstart not found: %s" % meta["kickstart"]) return "# kickstart not found: %s" % meta["kickstart"]
def run(api, args, logger): objtype = args[0] # "system" or "profile" name = args[1] # name of system or profile ip = args[2] # ip or "?" if objtype != "system": return 0 settings = api.settings() if not str(settings.puppet_auto_setup).lower() in [ "1", "yes", "y", "true"]: return 0 if not str(settings.sign_puppet_certs_automatically).lower() in [ "1", "yes", "y", "true"]: return 0 system = api.find_system(name) system = utils.blender(api, False, system) hostname = system[ "hostname" ] puppetca_path = settings.puppetca_path cmd = [puppetca_path, '--sign', hostname] rc = 0 try: rc = utils.subprocess_call(logger, cmd, shell=False) except: if logger is not None: logger.warning("failed to execute %s", puppetca_path) if rc != 0: if logger is not None: logger.warning("signing of puppet cert for %s failed", name) return 0
def generate_autoyast(self, profile=None, system=None, raw_data=None): self.api.logger.info("autoyast XML file found. Checkpoint: profile=%s system=%s" % (profile, system)) nopxe = '\nwget "http://%s/cblr/svc/op/nopxe/system/%s" -O /dev/null' runpost = '\ncurl "http://%s/cblr/svc/op/trig/mode/post/%s/%s" > /dev/null' runpre = '\nwget "http://%s/cblr/svc/op/trig/mode/pre/%s/%s" -O /dev/null' what = "profile" blend_this = profile if system: what = "system" blend_this = system blended = utils.blender(self.api, False, blend_this) srv = blended["http_server"] document = xml.dom.minidom.parseString(raw_data) # do we already have the #raw comment in the XML? (addComment = 0 means, don't add #raw comment) addComment = 1 for node in document.childNodes[1].childNodes: if node.nodeType == node.ELEMENT_NODE and node.tagName == "cobbler": addComment = 0 break # add some cobbler information to the XML file # maybe that should be configureable if addComment == 1: # startComment = document.createComment("\ncobbler_system_name=$system_name\ncobbler_server=$server\n#raw\n") # endComment = document.createComment("\n#end raw\n") cobblerElement = document.createElement("cobbler") cobblerElementSystem = xml.dom.minidom.Element("system_name") cobblerElementProfile = xml.dom.minidom.Element("profile_name") if system is not None: cobblerTextSystem = document.createTextNode(system.name) cobblerElementSystem.appendChild(cobblerTextSystem) if profile is not None: cobblerTextProfile = document.createTextNode(profile.name) cobblerElementProfile.appendChild(cobblerTextProfile) cobblerElementServer = document.createElement("server") cobblerTextServer = document.createTextNode(blended["http_server"]) cobblerElementServer.appendChild(cobblerTextServer) cobblerElement.appendChild(cobblerElementServer) cobblerElement.appendChild(cobblerElementSystem) cobblerElement.appendChild(cobblerElementProfile) document.childNodes[1].insertBefore(cobblerElement, document.childNodes[1].childNodes[1]) name = profile.name if system is not None: name = system.name if str(self.settings.pxe_just_once).upper() in ["1", "Y", "YES", "TRUE"]: self.addAutoYaSTScript(document, "chroot-scripts", nopxe % (srv, name)) if self.settings.run_install_triggers: # notify cobblerd when we start/finished the installation self.addAutoYaSTScript(document, "pre-scripts", runpre % (srv, what, name)) self.addAutoYaSTScript(document, "init-scripts", runpost % (srv, what, name)) return document.toxml()
def power(self, desired_state): """ state is either "on" or "off". Rebooting is implemented at the api.py level. The user and password need not be supplied. If not supplied they will be taken from the environment, COBBLER_POWER_USER and COBBLER_POWER_PASS. If provided, these will override any other data and be used instead. Users interested in maximum security should take that route. """ template = self.get_command_template() template_file = open(template, "r") meta = utils.blender(self.api, False, self.system) meta["power_mode"] = desired_state # allow command line overrides of the username/password if self.force_user is not None: meta["power_user"] = self.force_user if self.force_pass is not None: meta["power_pass"] = self.force_pass tmp = templar.Templar(self.api._config) cmd = tmp.render(template_file, meta, None, self.system) template_file.close() cmd = cmd.strip() self.logger.info("cobbler power configuration is:") self.logger.info(" type : %s" % self.system.power_type) self.logger.info(" address: %s" % self.system.power_address) self.logger.info(" user : %s" % self.system.power_user) self.logger.info(" id : %s" % self.system.power_id) # if no username/password data, check the environment if meta.get("power_user","") == "": meta["power_user"] = os.environ.get("COBBLER_POWER_USER","") if meta.get("power_pass","") == "": meta["power_pass"] = os.environ.get("COBBLER_POWER_PASS","") # now reprocess the command so we don't feed it through the shell cmd = cmd.split(" ") # Try the power command 5 times before giving up. # Some power switches are flakey for x in range(0,5): rc = utils.subprocess_call(self.logger, cmd, shell=False) if rc == 0: break else: time.sleep(2) if not rc == 0: utils.die(self.logger,"command failed (rc=%s), please validate the physical setup and cobbler config" % rc) return rc
def generate_repo_stanza(self, obj, is_profile=True): """ Automatically attaches yum repos to profiles/systems in kickstart files that contain the magic $yum_repo_stanza variable. This includes repo objects as well as the yum repos that are part of split tree installs, whose data is stored with the distro (example: RHEL5 imports) """ buf = "" blended = utils.blender(self.api, False, obj) repos = blended["repos"] # keep track of URLs and be sure to not include any duplicates included = {} for repo in repos: # see if this is a source_repo or not repo_obj = self.api.find_repo(repo) if repo_obj is not None: yumopts = '' for opt in repo_obj.yumopts: yumopts = yumopts + " %s=%s" % (opt, repo_obj.yumopts[opt]) if not repo_obj.yumopts.has_key( 'enabled') or repo_obj.yumopts['enabled'] == '1': if repo_obj.mirror_locally: baseurl = "http://%s/cobbler/repo_mirror/%s" % ( blended["http_server"], repo_obj.name) if not included.has_key(baseurl): buf = buf + "repo --name=%s --baseurl=%s\n" % ( repo_obj.name, baseurl) included[baseurl] = 1 else: if not included.has_key(repo_obj.mirror): buf = buf + "repo --name=%s --baseurl=%s %s\n" % ( repo_obj.name, repo_obj.mirror, yumopts) included[repo_obj.mirror] = 1 else: # FIXME: what to do if we can't find the repo object that is listed? # this should be a warning at another point, probably not here # so we'll just not list it so the kickstart will still work # as nothing will be here to read the output noise. Logging might # be useful. pass if is_profile: distro = obj.get_conceptual_parent() else: distro = obj.get_conceptual_parent().get_conceptual_parent() source_repos = distro.source_repos count = 0 for x in source_repos: count = count + 1 if not included.has_key(x[1]): buf = buf + "repo --name=source-%s --baseurl=%s\n" % (count, x[1]) included[x[1]] = 1 return buf
def get_yum_config(self, obj, is_profile): """ Return one large yum repo config blob suitable for use by any target system that requests it. """ totalbuf = "" blended = utils.blender(self.api, False, obj) input_files = [] # chance old versions from upgrade do not have a source_repos # workaround for user bug if not blended.has_key("source_repos"): blended["source_repos"] = [] # tack on all the install source repos IF there is more than one. # this is basically to support things like RHEL5 split trees # if there is only one, then there is no need to do this. included = {} for r in blended["source_repos"]: filename = self.settings.webdir + "/" + "/".join( r[0].split("/")[4:]) if not included.has_key(filename): input_files.append(filename) included[filename] = 1 for repo in blended["repos"]: path = os.path.join(self.settings.webdir, "repo_mirror", repo, "config.repo") if not included.has_key(path): input_files.append(path) included[path] = 1 for infile in input_files: if infile.find("ks_mirror") == -1: dispname = infile.split("/")[-2] else: dispname = infile.split("/")[-1].replace(".repo", "") try: infile_h = open(infile) except: # file does not exist and the user needs to run reposync # before we will use this, cobbler check will mention # this problem totalbuf = totalbuf + "\n# error: could not read repo source: %s\n\n" % infile continue infile_data = infile_h.read() infile_h.close() outfile = None # disk output only totalbuf = totalbuf + self.templar.render(infile_data, blended, outfile, None) totalbuf = totalbuf + "\n\n" return totalbuf
def generate_repo_stanza(self, obj, is_profile=True): """ Automatically attaches yum repos to profiles/systems in kickstart files that contain the magic $yum_repo_stanza variable. This includes repo objects as well as the yum repos that are part of split tree installs, whose data is stored with the distro (example: RHEL5 imports) """ buf = "" blended = utils.blender(self.api, False, obj) repos = blended["repos"] # keep track of URLs and be sure to not include any duplicates included = {} for repo in repos: # see if this is a source_repo or not repo_obj = self.api.find_repo(repo) if repo_obj is not None: yumopts = "" for opt in repo_obj.yumopts: yumopts = yumopts + " %s=%s" % (opt, repo_obj.yumopts[opt]) if not repo_obj.yumopts.has_key("enabled") or repo_obj.yumopts["enabled"] == "1": if repo_obj.mirror_locally: baseurl = "http://%s/cobbler/repo_mirror/%s" % (blended["http_server"], repo_obj.name) if not included.has_key(baseurl): buf = buf + "repo --name=%s --baseurl=%s\n" % (repo_obj.name, baseurl) included[baseurl] = 1 else: if not included.has_key(repo_obj.mirror): buf = buf + "repo --name=%s --baseurl=%s %s\n" % (repo_obj.name, repo_obj.mirror, yumopts) included[repo_obj.mirror] = 1 else: # FIXME: what to do if we can't find the repo object that is listed? # this should be a warning at another point, probably not here # so we'll just not list it so the kickstart will still work # as nothing will be here to read the output noise. Logging might # be useful. pass if is_profile: distro = obj.get_conceptual_parent() else: distro = obj.get_conceptual_parent().get_conceptual_parent() source_repos = distro.source_repos count = 0 for x in source_repos: count = count + 1 if not included.has_key(x[1]): buf = buf + "repo --name=source-%s --baseurl=%s\n" % (count, x[1]) included[x[1]] = 1 return buf
def generate_bootcfg(self, what, name): if what.lower() not in ("profile", "system"): return "# bootcfg is only valid for profiles and systems" distro = None if what == "profile": obj = self.api.find_profile(name=name) distro = obj.get_conceptual_parent() else: obj = self.api.find_system(name=name) distro = obj.get_conceptual_parent().get_conceptual_parent() # For multi-arch distros, the distro name in ks_mirror # may not contain the arch string, so we need to figure out # the path based on where the kernel is stored. We do this # because some distros base future downloads on the initial # URL passed in, so all of the files need to be at this location # (which is why we can't use the images link, which just contains # the kernel and initrd). ks_mirror_name = string.join(distro.kernel.split("/")[-2:-1], "") blended = utils.blender(self.api, False, obj) ksmeta = blended.get("ks_meta", {}) try: del blended["ks_meta"] except: pass blended.update(ksmeta) # make available at top level blended["distro"] = ks_mirror_name # FIXME: img_path should probably be moved up into the # blender function to ensure they're consistently # available to templates across the board if obj.enable_gpxe: blended["img_path"] = "http://%s:%s/cobbler/links/%s" % ( self.settings.server, self.settings.http_port, distro.name, ) else: blended["img_path"] = os.path.join("/images", distro.name) template = os.path.join( self.settings.pxe_template_dir, "bootcfg_%s_%s.template" % (what.lower(), distro.os_version) ) if not os.path.exists(template): return "# boot.cfg template not found for the %s named %s (filename=%s)" % (what, name, template) template_fh = open(template) template_data = template_fh.read() template_fh.close() return self.templar.render(template_data, blended, None)
def get_yum_config(self,obj,is_profile): """ Return one large yum repo config blob suitable for use by any target system that requests it. """ totalbuf = "" blended = utils.blender(self.api, False, obj) input_files = [] # chance old versions from upgrade do not have a source_repos # workaround for user bug if not blended.has_key("source_repos"): blended["source_repos"] = [] # tack on all the install source repos IF there is more than one. # this is basically to support things like RHEL5 split trees # if there is only one, then there is no need to do this. included = {} for r in blended["source_repos"]: filename = self.settings.webdir + "/" + "/".join(r[0].split("/")[4:]) if not included.has_key(filename): input_files.append(filename) included[filename] = 1 for repo in blended["repos"]: path = os.path.join(self.settings.webdir, "repo_mirror", repo, "config.repo") if not included.has_key(path): input_files.append(path) included[path] = 1 for infile in input_files: if infile.find("ks_mirror") == -1: dispname = infile.split("/")[-2] else: dispname = infile.split("/")[-1].replace(".repo","") try: infile_h = open(infile) except: # file does not exist and the user needs to run reposync # before we will use this, cobbler check will mention # this problem totalbuf = totalbuf + "\n# error: could not read repo source: %s\n\n" % infile continue infile_data = infile_h.read() infile_h.close() outfile = None # disk output only totalbuf = totalbuf + self.templar.render(infile_data, blended, outfile, None) totalbuf = totalbuf + "\n\n" return totalbuf
def generate_kickstart_signal(self, is_pre=0, profile=None, system=None): """ Do things that we do at the start/end of kickstarts... * start: signal the status watcher we're starting * end: signal the status watcher we're done * end: disable PXE if needed * end: save the original kickstart file for debug """ nopxe = "\nwget \"http://%s/cblr/svc/op/nopxe/system/%s\" -O /dev/null" saveks = "\nwget \"http://%s/cblr/svc/op/ks/%s/%s\" -O /root/cobbler.ks" runpost = "\nwget \"http://%s/cblr/svc/op/trig/mode/post/%s/%s\" -O /dev/null" runpre = "\nwget \"http://%s/cblr/svc/op/trig/mode/pre/%s/%s\" -O /dev/null" what = "profile" blend_this = profile if system: what = "system" blend_this = system blended = utils.blender(self.api, False, blend_this) kickstart = blended.get("kickstart", None) buf = "" srv = blended["http_server"] if system is not None: if not is_pre: if str(self.settings.pxe_just_once).upper() in [ "1", "Y", "YES", "TRUE" ]: buf = buf + nopxe % (srv, system.name) if kickstart and os.path.exists(kickstart): buf = buf + saveks % (srv, "system", system.name) if self.settings.run_install_triggers: buf = buf + runpost % (srv, what, system.name) else: if self.settings.run_install_triggers: buf = buf + runpre % (srv, what, system.name) else: if not is_pre: if kickstart and os.path.exists(kickstart): buf = buf + saveks % (srv, "profile", profile.name) if self.settings.run_install_triggers: buf = buf + runpost % (srv, what, profile.name) else: if self.settings.run_install_triggers: buf = buf + runpre % (srv, what, profile.name) return buf
def generate_bootcfg(self, what, name): if what.lower() not in ("profile", "system"): return "# bootcfg is only valid for profiles and systems" distro = None if what == "profile": obj = self.api.find_profile(name=name) distro = obj.get_conceptual_parent() else: obj = self.api.find_system(name=name) distro = obj.get_conceptual_parent().get_conceptual_parent() # For multi-arch distros, the distro name in ks_mirror # may not contain the arch string, so we need to figure out # the path based on where the kernel is stored. We do this # because some distros base future downloads on the initial # URL passed in, so all of the files need to be at this location # (which is why we can't use the images link, which just contains # the kernel and initrd). ks_mirror_name = string.join(distro.kernel.split('/')[-2:-1], '') blended = utils.blender(self.api, False, obj) ksmeta = blended.get("ks_meta", {}) try: del blended["ks_meta"] except: pass blended.update(ksmeta) # make available at top level blended['distro'] = ks_mirror_name # FIXME: img_path should probably be moved up into the # blender function to ensure they're consistently # available to templates across the board if obj.enable_gpxe: blended['img_path'] = 'http://%s:%s/cobbler/links/%s' % (self.settings.server, self.settings.http_port, distro.name) else: blended['img_path'] = os.path.join("/images", distro.name) template = os.path.join(self.settings.boot_loader_conf_template_dir, "bootcfg_%s_%s.template" % (what.lower(), distro.os_version)) if not os.path.exists(template): return "# boot.cfg template not found for the %s named %s (filename=%s)" % (what, name, template) template_fh = open(template) template_data = template_fh.read() template_fh.close() return self.templar.render(template_data, blended, None)
def get_yum_config(self, obj, is_profile): """ Return one large yum repo config blob suitable for use by any target system that requests it. """ totalbuf = "" blended = utils.blender(self.api, False, obj) input_files = [] # tack on all the install source repos IF there is more than one. # this is basically to support things like RHEL5 split trees # if there is only one, then there is no need to do this. included = {} for r in blended["source_repos"]: filename = self.settings.webdir + "/" + "/".join( r[0].split("/")[4:]) if filename not in included: input_files.append(filename) included[filename] = 1 for repo in blended["repos"]: path = os.path.join(self.settings.webdir, "repo_mirror", repo, "config.repo") if path not in included: input_files.append(path) included[path] = 1 for infile in input_files: try: infile_h = open(infile) except: # file does not exist and the user needs to run reposync # before we will use this, cobbler check will mention # this problem totalbuf += "\n# error: could not read repo source: %s\n\n" % infile continue infile_data = infile_h.read() infile_h.close() outfile = None # disk output only #传入模板文件内容,要求生成实际配置 totalbuf += self.templar.render(infile_data, blended, outfile, None) totalbuf += "\n\n" return totalbuf
def generate_kickstart(self, profile=None, system=None): obj = system if system is None: obj = profile meta = utils.blender(self.api, False, obj) kickstart_path = utils.find_kickstart(meta["kickstart"]) if not kickstart_path: return "# kickstart is missing or invalid: %s" % meta["kickstart"] ksmeta = meta["ks_meta"] del meta["ks_meta"] meta.update(ksmeta) # make available at top level meta["yum_repo_stanza"] = self.generate_repo_stanza( obj, (system is None)) meta["yum_config_stanza"] = self.generate_config_stanza( obj, (system is None)) meta["kernel_options"] = utils.hash_to_string(meta["kernel_options"]) # meta["config_template_files"] = self.generate_template_files_stanza(g, False) # add extra variables for other distro types if "tree" in meta: urlparts = urlparse.urlsplit(meta["tree"]) meta["install_source_directory"] = urlparts[2] try: raw_data = utils.read_file_contents( kickstart_path, self.api.logger, self.settings.template_remote_kickstarts) if raw_data is None: return "# kickstart is sourced externally: %s" % meta[ "kickstart"] distro = profile.get_conceptual_parent() if system is not None: distro = system.get_conceptual_parent().get_conceptual_parent() data = self.templar.render(raw_data, meta, None, obj) if distro.breed == "suse": # AutoYaST profile data = self.generate_autoyast(profile, system, data) return data except FileNotFoundException: self.api.logger.warning("kickstart not found: %s" % meta["kickstart"]) return "# kickstart not found: %s" % meta["kickstart"]
def generate_kickstart_signal(self, is_pre=0, profile=None, system=None): """ Do things that we do at the start/end of kickstarts... * start: signal the status watcher we're starting * end: signal the status watcher we're done * end: disable PXE if needed * end: save the original kickstart file for debug """ nopxe = "\nwget \"http://%s/cblr/svc/op/nopxe/system/%s\" -O /dev/null" saveks = "\nwget \"http://%s/cblr/svc/op/ks/%s/%s\" -O /root/cobbler.ks" runpost = "\nwget \"http://%s/cblr/svc/op/trig/mode/post/%s/%s\" -O /dev/null" runpre = "\nwget \"http://%s/cblr/svc/op/trig/mode/pre/%s/%s\" -O /dev/null" what = "profile" blend_this = profile if system: what = "system" blend_this = system blended = utils.blender(self.api, False, blend_this) kickstart = blended.get("kickstart",None) buf = "" srv = blended["http_server"] if system is not None: if not is_pre: if str(self.settings.pxe_just_once).upper() in [ "1", "Y", "YES", "TRUE" ]: buf = buf + nopxe % (srv, system.name) if kickstart and os.path.exists(kickstart): buf = buf + saveks % (srv, "system", system.name) if self.settings.run_install_triggers: buf = buf + runpost % (srv, what, system.name) else: if self.settings.run_install_triggers: buf = buf + runpre % (srv, what, system.name) else: if not is_pre: if kickstart and os.path.exists(kickstart): buf = buf + saveks % (srv, "profile", profile.name) if self.settings.run_install_triggers: buf = buf + runpost % (srv, what, profile.name) else: if self.settings.run_install_triggers: buf = buf + runpre % (srv, what, profile.name) return buf
def run(api, args, logger): objtype = args[0] # "system" or "profile" name = args[1] # name of system or profile ip = args[2] # ip or "?" if objtype != "system": return 0 settings = api.settings() if not str( settings.puppet_auto_setup).lower() in ["1", "yes", "y", "true"]: return 0 if not str(settings.remove_old_puppet_certs_automatically).lower() in [ "1", "yes", "y", "true" ]: return 0 system = api.find_system(name) system = utils.blender(api, False, system) hostname = system["hostname"] if not re.match(r'[\w-]+\..+', hostname): search_domains = system['name_servers_search'] if search_domains: hostname += '.' + search_domains[0] if not re.match(r'[\w-]+\..+', hostname): default_search_domains = system['default_name_servers_search'] if default_search_domains: hostname += '.' + default_search_domains[0] puppetca_path = settings.puppetca_path cmd = [puppetca_path, 'cert', 'clean', hostname] rc = 0 try: rc = utils.subprocess_call(logger, cmd, shell=False) except: if logger is not None: logger.warning("failed to execute %s" % puppetca_path) if rc != 0: if logger is not None: logger.warning("puppet cert removal for %s failed" % name) return 0
def generate_config_stanza(self, obj, is_profile=True): """ Add in automatic to configure /etc/yum.repos.d on the remote system if the kickstart file contains the magic $yum_config_stanza. """ if not self.settings.yum_post_install_mirror: return "" blended = utils.blender(self.api, False, obj) if is_profile: url = "http://%s/cblr/svc/op/yum/profile/%s" % (blended["http_server"], obj.name) else: url = "http://%s/cblr/svc/op/yum/system/%s" % (blended["http_server"], obj.name) return 'wget "%s" --output-document=/etc/yum.repos.d/cobbler-config.repo\n' % (url)
def generate_config_stanza(self, obj, is_profile=True): """ Add in automatic to configure /etc/yum.repos.d on the remote system if the kickstart file contains the magic $yum_config_stanza. """ if not self.settings.yum_post_install_mirror: return "" blended = utils.blender(self.api, False, obj) if is_profile: url = "http://%s/cblr/svc/op/yum/profile/%s" % (blended["http_server"], obj.name) else: url = "http://%s/cblr/svc/op/yum/system/%s" % (blended["http_server"], obj.name) return "wget \"%s\" --output-document=/etc/yum.repos.d/cobbler-config.repo\n" % (url)
def make_s390_pseudo_pxe_menu(self): s390path = os.path.join(self.bootloc, "s390x") if not os.path.exists(s390path): utils.mkdir(s390path) profile_list = [profile for profile in self.profiles] image_list = [image for image in self.images] def sort_name(a, b): return cmp(a.name, b.name) profile_list.sort(sort_name) image_list.sort(sort_name) listfile = open(os.path.join(s390path, "profile_list"), "w+") for profile in profile_list: distro = profile.get_conceptual_parent() if distro is None: raise CX("profile is missing distribution: %s, %s" % (profile.name, profile.distro)) if distro.arch.startswith("s390"): listfile.write("%s\n" % profile.name) f2 = os.path.join(self.bootloc, "s390x", "p_%s" % profile.name) self.write_pxe_file(f2, None, profile, distro, distro.arch) cf = "%s_conf" % f2 pf = "%s_parm" % f2 template_cf = open("/etc/cobbler/pxe/s390x_conf.template") template_pf = open("/etc/cobbler/pxe/s390x_parm.template") blended = utils.blender(self.api, True, profile) self.templar.render(template_cf, blended, cf) # FIXME: profiles also need this data! # FIXME: the _conf and _parm files are limited to 80 characters in length try: ipaddress = socket.gethostbyname_ex( blended["http_server"])[2][0] except socket.gaierror: ipaddress = blended["http_server"] kickstart_path = "http://%s/cblr/svc/op/ks/profile/%s" % ( ipaddress, profile.name) # gather default kernel_options and default kernel_options_s390x kopts = blended.get("kernel_options", "") hkopts = shlex.split(utils.hash_to_string(kopts)) blended["kickstart_expanded"] = "ks=%s" % kickstart_path blended["kernel_options"] = hkopts self.templar.render(template_pf, blended, pf) listfile.close()
def createrepo_walker(self, repo, dirname, fnames): """ Used to run createrepo on a copied Yum mirror. """ if os.path.exists(dirname) or repo['breed'] == 'rsync': utils.remove_yum_olddata(dirname) # add any repo metadata we can use mdoptions = [] if os.path.isfile("%s/.origin/repomd.xml" % (dirname)): if not HAS_YUM: utils.die(self.logger, "yum is required to use this feature") rmd = yum.repoMDObject.RepoMD( '', "%s/.origin/repomd.xml" % (dirname)) if rmd.repoData.has_key("group"): groupmdfile = rmd.getData("group").location[1] mdoptions.append("-g %s" % groupmdfile) if rmd.repoData.has_key("prestodelta"): # need createrepo >= 0.9.7 to add deltas if utils.check_dist() in ("redhat", "fedora", "centos", "scientific linux", "suse", "opensuse"): cmd = "/usr/bin/rpmquery --queryformat=%{VERSION} createrepo" createrepo_ver = utils.subprocess_get(self.logger, cmd) if createrepo_ver >= "0.9.7": mdoptions.append("--deltas") else: self.logger.error( "this repo has presto metadata; you must upgrade createrepo to >= 0.9.7 first and then need to resync the repo through cobbler." ) blended = utils.blender(self.api, False, repo) flags = blended.get("createrepo_flags", "(ERROR: FLAGS)") try: # BOOKMARK cmd = "createrepo %s %s %s" % (" ".join(mdoptions), flags, dirname) utils.subprocess_call(self.logger, cmd) except: utils.log_exc(self.logger) self.logger.error("createrepo failed.") del fnames[:] # we're in the right place
def generate_kickstart(self, profile=None, system=None): obj = system if system is None: obj = profile meta = utils.blender(self.api, False, obj) kickstart_path = utils.find_kickstart(meta["kickstart"]) if not kickstart_path: return "# kickstart is missing or invalid: %s" % meta["kickstart"] ksmeta = meta["ks_meta"] del meta["ks_meta"] meta.update(ksmeta) # make available at top level meta["yum_repo_stanza"] = self.generate_repo_stanza(obj, (system is None)) meta["yum_config_stanza"] = self.generate_config_stanza(obj, (system is None)) meta["kernel_options"] = utils.hash_to_string(meta["kernel_options"]) # add extra variables for other distro types if "tree" in meta: urlparts = urlparse.urlsplit(meta["tree"]) meta["install_source_directory"] = urlparts[2] try: raw_data = utils.read_file_contents( kickstart_path, self.api.logger, self.settings.template_remote_kickstarts) if raw_data is None: return "# kickstart is sourced externally: %s" % meta["kickstart"] distro = profile.get_conceptual_parent() if system is not None: distro = system.get_conceptual_parent().get_conceptual_parent() data = self.templar.render(raw_data, meta, None, obj) if distro.breed == "suse": # AutoYaST profile data = self.generate_autoyast(profile, system, data) return data except FileNotFoundException: self.api.logger.warning("kickstart not found: %s" % meta["kickstart"]) return "# kickstart not found: %s" % meta["kickstart"]
def checkfile(self,obj,is_profile): last_errors = [] blended = utils.blender(self.config.api, False, obj) os_version = blended["os_version"] print "----------------------------" ks = blended["kickstart"] if ks is None or ks == "": print "%s has no kickstart, skipping" % obj.name return [True, last_errors] if ks in self.ks_cache: print "Skipping kickstart %s, already checked previously" % ks return [True, ()] else: self.ks_cache.append(ks) breed = blended["breed"] if breed != "redhat": print "%s has a breed of %s, skipping" % (obj.name, breed) return [True, last_errors] server = blended["server"] if not ks.startswith("/"): url = self.kickstart else: if is_profile: url = "http://%s/cblr/svc/op/ks/profile/%s" % (server,obj.name) self.kickgen.generate_kickstart_for_profile(obj.name) else: url = "http://%s/cblr/svc/op/ks/system/%s" % (server,obj.name) self.kickgen.generate_kickstart_for_system(obj.name) last_errors = self.kickgen.get_last_errors() print "checking url: %s" % url rc = utils.os_system("/usr/bin/ksvalidator \"%s\"" % url) if rc != 0: return [False, last_errors] return [True, last_errors]
def checkfile(self, obj, is_profile): last_errors = [] blended = utils.blender(self.config.api, False, obj) os_version = blended["os_version"] self.logger.info("----------------------------") self.logger.debug("osversion: %s" % os_version) ks = blended["kickstart"] if ks is None or ks == "": self.logger.info("%s has no kickstart, skipping" % obj.name) return [True, last_errors] breed = blended["breed"] if breed != "redhat": self.logger.info("%s has a breed of %s, skipping" % (obj.name, breed)) return [True, last_errors] server = blended["server"] if not ks.startswith("/"): url = ks else: if is_profile: url = "http://%s/cblr/svc/op/ks/profile/%s" % (server, obj.name) self.kickgen.generate_kickstart_for_profile(obj.name) else: url = "http://%s/cblr/svc/op/ks/system/%s" % (server, obj.name) self.kickgen.generate_kickstart_for_system(obj.name) last_errors = self.kickgen.get_last_errors() self.logger.info("checking url: %s" % url) rc = utils.subprocess_call(self.logger, "/usr/bin/ksvalidator -v \"%s\" \"%s\"" % (os_version, url), shell=True) if rc != 0: return [False, last_errors] return [True, last_errors]
def run(api, args, logger): objtype = args[0] # "system" or "profile" name = args[1] # name of system or profile # ip = args[2] # ip or "?" if objtype != "system": return 0 settings = api.settings() if not str(settings.puppet_auto_setup).lower() in ["1", "yes", "y", "true"]: return 0 if not str(settings.remove_old_puppet_certs_automatically).lower() in ["1", "yes", "y", "true"]: return 0 system = api.find_system(name) system = utils.blender(api, False, system) hostname = system["hostname"] if not re.match(r"[\w-]+\..+", hostname): search_domains = system["name_servers_search"] if search_domains: hostname += "." + search_domains[0] if not re.match(r"[\w-]+\..+", hostname): default_search_domains = system["default_name_servers_search"] if default_search_domains: hostname += "." + default_search_domains[0] puppetca_path = settings.puppetca_path cmd = [puppetca_path, "cert", "clean", hostname] rc = 0 try: rc = utils.subprocess_call(logger, cmd, shell=False) except: if logger is not None: logger.warning("failed to execute %s" % puppetca_path) if rc != 0: if logger is not None: logger.warning("puppet cert removal for %s failed" % name) return 0
def generate_script(self, what, objname, script_name): if what == "profile": obj = self.api.find_profile(name=objname) else: obj = self.api.find_system(name=objname) if not obj: return "# %s named %s not found" % (what, objname) distro = obj.get_conceptual_parent() while distro.get_conceptual_parent(): distro = distro.get_conceptual_parent() blended = utils.blender(self.api, False, obj) ksmeta = blended.get("ks_meta", {}) try: del blended["ks_meta"] except: pass blended.update(ksmeta) # make available at top level # FIXME: img_path should probably be moved up into the # blender function to ensure they're consistently # available to templates across the board if obj.enable_gpxe: blended["img_path"] = "http://%s:%s/cobbler/links/%s" % ( self.settings.server, self.settings.http_port, distro.name, ) else: blended["img_path"] = os.path.join("/images", distro.name) template = os.path.normpath(os.path.join("/var/lib/cobbler/scripts", script_name)) if not os.path.exists(template): return "# script template %s not found" % script_name template_fh = open(template) template_data = template_fh.read() template_fh.close() return self.templar.render(template_data, blended, None, obj)
def run(api, args, logger): logging.debug("%s\tRunning node install trigger\n", time.strftime('%X %x %Z')) settings = api.settings() # go no further if this feature is turned off if not str(settings.base_megamreporting_enabled).lower() in [ "1", "yes", "y", "true"]: return 0 logging.debug("%s\tUnwrapping args\n", time.strftime('%X %x %Z')) objtype = args[0] # "target" or "profile" name = args[1] # name of target or profile box_ip = args[2] # ip logging.debug("%s\tArgs are \t%s\t%s\t%s\n", time.strftime('%X %x %Z'), objtype, name, box_ip) if objtype == "system": target = api.find_system(name) else: target = api.find_profile(name) logging.debug("%s\tFigured out HOSTNAME\t%s\n", time.strftime('%X %x %Z'),api.find_system(name)) box_host = "dummy" logging.debug("%s\tFigured out objtype\t%s\n", time.strftime('%X %x %Z'),target) target = utils.blender(api, False, target) if target == {}: raise CX("failure looking up target") logging.debug("%s\tboxip is\t%s\n", time.strftime('%X %x %Z'),box_ip) with open('/var/lib/megam/megamcib/boxips', 'a') as f: f.write(box_host+"="+box_ip) logging.debug("%s\tI am done. Adios..Amigo\n",time.strftime('%X %x %Z')) return 0
def make_s390_pseudo_pxe_menu(self): s390path = os.path.join(self.bootloc, "s390x") if not os.path.exists(s390path): utils.mkdir(s390path) profile_list = [profile for profile in self.profiles] image_list = [image for image in self.images] def sort_name(a, b): return cmp(a.name, b.name) profile_list.sort(sort_name) image_list.sort(sort_name) listfile = open(os.path.join(s390path, "profile_list"), "w+") for profile in profile_list: distro = profile.get_conceptual_parent() if distro is None: raise CX("profile is missing distribution: %s, %s" % (profile.name, profile.distro)) if distro.arch.startswith("s390"): listfile.write("%s\n" % profile.name) f2 = os.path.join(self.bootloc, "s390x", "p_%s" % profile.name) self.write_pxe_file(f2, None, profile, distro, distro.arch) cf = "%s_conf" % f2 pf = "%s_parm" % f2 template_cf = open("/etc/cobbler/pxe/s390x_conf.template") template_pf = open("/etc/cobbler/pxe/s390x_parm.template") blended = utils.blender(self.api, True, profile) self.templar.render(template_cf, blended, cf) # FIXME: profiles also need this data! # FIXME: the _conf and _parm files are limited to 80 characters in length try: ipaddress = socket.gethostbyname_ex(blended["http_server"])[2][0] except socket.gaierror: ipaddress = blended["http_server"] kickstart_path = "http://%s/cblr/svc/op/ks/profile/%s" % (ipaddress, profile.name) # gather default kernel_options and default kernel_options_s390x kopts = blended.get("kernel_options", "") hkopts = shlex.split(utils.hash_to_string(kopts)) blended["kickstart_expanded"] = "ks=%s" % kickstart_path blended["kernel_options"] = hkopts self.templar.render(template_pf, blended, pf) listfile.close()
def generate_bootcfg(self,what,name): if what.lower() not in ("profile","system"): return "# bootcfg is only valid for profiles and systems" distro = None if what == "profile": obj = self.api.find_profile(name=name) distro = obj.get_conceptual_parent() else: obj = self.api.find_system(name=name) distro = obj.get_conceptual_parent().get_conceptual_parent() # For multi-arch distros, the distro name in ks_mirror # may not contain the arch string, so we need to figure out # the path based on where the kernel is stored. We do this # because some distros base future downloads on the initial # URL passed in, so all of the files need to be at this location # (which is why we can't use the images link, which just contains # the kernel and initrd). ks_mirror_name = string.join(distro.kernel.split('/')[-2:-1],'') blended = utils.blender(self.api, False, obj) ksmeta = blended.get("ks_meta",{}) try: del blended["ks_meta"] except: pass blended.update(ksmeta) # make available at top level blended['distro'] = ks_mirror_name template = os.path.join(self.settings.pxe_template_dir,"bootcfg_%s_%s.template" % (what.lower(),distro.os_version)) if not os.path.exists(template): return "# boot.cfg template not found for the %s named %s (filename=%s)" % (what,name,template) template_fh = open(template) template_data = template_fh.read() template_fh.close() return self.templar.render(template_data, blended, None)
def createrepo_walker(self, repo, dirname, fnames): """ Used to run createrepo on a copied Yum mirror. """ if os.path.exists(dirname) or repo["breed"] == "rsync": utils.remove_yum_olddata(dirname) # add any repo metadata we can use mdoptions = [] if os.path.isfile("%s/repodata/repomd.xml" % (dirname)): if not HAS_YUM: utils.die(self.logger, "yum is required to use this feature") rmd = yum.repoMDObject.RepoMD("", "%s/repodata/repomd.xml" % (dirname)) if rmd.repoData.has_key("group"): groupmdfile = rmd.getData("group").location[1] mdoptions.append("-g %s" % groupmdfile) if rmd.repoData.has_key("prestodelta"): # need createrepo >= 0.9.7 to add deltas if utils.check_dist() == "redhat" or utils.check_dist() == "suse": cmd = "/usr/bin/rpmquery --queryformat=%{VERSION} createrepo" createrepo_ver = utils.subprocess_get(self.logger, cmd) if createrepo_ver >= "0.9.7": mdoptions.append("--deltas") else: utils.die( self.logger, "this repo has presto metadata; you must upgrade createrepo to >= 0.9.7 first and then need to resync the repo through cobbler.", ) blended = utils.blender(self.api, False, repo) flags = blended.get("createrepo_flags", "(ERROR: FLAGS)") try: # BOOKMARK cmd = "createrepo %s %s %s" % (" ".join(mdoptions), flags, dirname) utils.subprocess_call(self.logger, cmd) except: utils.log_exc(self.logger) self.logger.error("createrepo failed.") del fnames[:] # we're in the right place
def write_boot_files_distro(self, distro): # collapse the object down to a rendered datastructure # the second argument set to false means we don't collapse # hashes/arrays into a flat string target = utils.blender(self.config.api, False, distro) # Create metadata for the templar function # Right now, just using local_img_path, but adding more # cobbler variables here would probably be good metadata = {} metadata["local_img_path"] = os.path.join(utils.tftpboot_location(), "images", distro.name) # Create the templar instance. Used to template the target directory templater = templar.Templar(self.config) # Loop through the hash of boot files, # executing a cp for each one self.logger.info("processing boot_files for distro: %s" % distro.name) for file in target["boot_files"].keys(): rendered_file = templater.render(file, metadata, None) try: for f in glob.glob(target["boot_files"][file]): if f == target["boot_files"][file]: # this wasn't really a glob, so just copy it as is filedst = rendered_file else: # this was a glob, so figure out what the destination # file path/name should be tgt_path, tgt_file = os.path.split(f) rnd_path, rnd_file = os.path.split(rendered_file) filedst = os.path.join(rnd_path, tgt_file) if not os.path.isfile(filedst): shutil.copyfile(f, filedst) self.config.api.log("copied file %s to %s for %s" % (f, filedst, distro.name)) except: self.logger.error("failed to copy file %s to %s for %s" % (f, filedst, distro.name)) return 0
def generate_script(self, what, objname, script_name): if what == "profile": obj = self.api.find_profile(name=objname) else: obj = self.api.find_system(name=objname) if not obj: return "# %s named %s not found" % (what, objname) distro = obj.get_conceptual_parent() while distro.get_conceptual_parent(): distro = distro.get_conceptual_parent() blended = utils.blender(self.api, False, obj) ksmeta = blended.get("ks_meta", {}) try: del blended["ks_meta"] except: pass blended.update(ksmeta) # make available at top level # FIXME: img_path should probably be moved up into the # blender function to ensure they're consistently # available to templates across the board if obj.enable_gpxe: blended['img_path'] = 'http://%s:%s/cobbler/links/%s' % (self.settings.server, self.settings.http_port, distro.name) else: blended['img_path'] = os.path.join("/images", distro.name) template = os.path.normpath(os.path.join("/var/lib/cobbler/scripts", script_name)) if not os.path.exists(template): return "# script template %s not found" % script_name template_fh = open(template) template_data = template_fh.read() template_fh.close() return self.templar.render(template_data, blended, None, obj)
def write_boot_files_distro(self,distro): # collapse the object down to a rendered datastructure # the second argument set to false means we don't collapse # hashes/arrays into a flat string target = utils.blender(self.config.api, False, distro) # Create metadata for the templar function # Right now, just using img_path, but adding more # cobbler variables here would probably be good metadata = {} metadata["img_path"] = os.path.join( utils.tftpboot_location(), "images",distro.name) # Create the templar instance. Used to template the target directory templater = templar.Templar(self.config) # Loop through the hash of boot files, # executing a cp for each one for file in target["boot_files"].keys(): rendered_file = templater.render(file,metadata,None) try: for f in glob.glob(target["boot_files"][file]): if f == target["boot_files"][file]: # this wasn't really a glob, so just copy it as is filedst = rendered_file else: # this was a glob, so figure out what the destination # file path/name should be tgt_path,tgt_file=os.path.split(f) rnd_path,rnd_file=os.path.split(rendered_file) filedst = os.path.join(rnd_path,tgt_file) if not os.path.isfile(filedst): shutil.copyfile(f, filedst) self.config.api.log("copied file %s to %s for %s" % (f,filedst,distro.name)) except: self.logger.error("failed to copy file %s to %s for %s" % (f,filedst,distro.name)) return 0
def generate_kickstart_for_system(self,s): s = self.api.find_system(name=s) if s is None: return "# system not found" profile = s.get_conceptual_parent() if profile is None: raise CX(_("system %(system)s references missing profile %(profile)s") % { "system" : s.name, "profile" : s.profile }) distro = profile.get_conceptual_parent() if distro is None: # this is an image parented system, no kickstart available return "# image based systems do not have kickstarts" meta = utils.blender(self.api, False, s) kickstart_path = utils.find_kickstart(meta["kickstart"]) if kickstart_path and os.path.exists(kickstart_path): try: ksmeta = meta["ks_meta"] del meta["ks_meta"] meta.update(ksmeta) # make available at top level meta["yum_repo_stanza"] = self.generate_repo_stanza(s, False) meta["yum_config_stanza"] = self.generate_config_stanza(s, False) meta["kickstart_done"] = self.generate_kickstart_signal(0, profile, s) meta["kickstart_start"] = self.generate_kickstart_signal(1, profile, s) meta["kernel_options"] = utils.hash_to_string(meta["kernel_options"]) # meta["config_template_files"] = self.generate_template_files_stanza(g, False) kfile = open(kickstart_path) data = self.templar.render(kfile, meta, None, s) kfile.close() return data except: traceback.print_exc() raise CX(_("Error templating file")) elif kickstart_path is not None and not os.path.exists(kickstart_path): if kickstart_path.find("http://") == -1 and kickstart_path.find("ftp://") == -1 and kickstart_path.find("nfs:") == -1: return "# Error, cannot find %s" % kickstart_path return "# kickstart is sourced externally: %s" % meta["kickstart"]
def run(api, args, logger): settings = api.settings() name = args[0] os.system("echo %s > /tmp/my_test" % (args)) if len(args) > 1 and args[1] != "system": return 0 system = api.find_system(name) system = utils.blender(api, False, system) os.system("echo 'hi, my name is %s' >> /tmp/blah" % (name)) aws_conn = aws.SystemAWS() aws_conn._run_sys(name,'ami-aecd60c7','dbarcelo-omnia','t1.micro', ['quick-start-1']) if logger is not None: logger.info('spinning up in AWS!') return 0
def generate_netboot_iso(self,imagesdir,isolinuxdir,profiles=None,systems=None,exclude_dns=None,force_server=None,no_local_hdd=None): self.logger.info("copying kernels and initrds for profiles") # copy all images in included profiles to images dir for profile in self.api.profiles(): use_this = True if profiles is not None: which_profiles = profiles.split(",") if not profile.name in which_profiles: use_this = False if use_this: dist = profile.get_conceptual_parent() if dist.name.lower().find("-xen") != -1: self.logger.info("skipping Xen distro: %s" % dist.name) continue distname = self.make_shorter(dist.name) # buildisodir/isolinux/$distro/vmlinuz, initrd.img # FIXME: this will likely crash on non-Linux breeds f1 = os.path.join(isolinuxdir, "%s.krn" % distname) f2 = os.path.join(isolinuxdir, "%s.img" % distname) if not os.path.exists(dist.kernel): utils.die(self.logger,"path does not exist: %s" % dist.kernel) if not os.path.exists(dist.initrd): utils.die(self.logger,"path does not exist: %s" % dist.initrd) shutil.copyfile(dist.kernel, f1) shutil.copyfile(dist.initrd, f2) if systems is not None: self.logger.info("copying kernels and initrds for systems") # copy all images in included profiles to images dir for system in self.api.systems(): if system.name in systems: profile = system.get_conceptual_parent() dist = profile.get_conceptual_parent() if dist.name.find("-xen") != -1: continue distname = self.make_shorter(dist.name) # buildisodir/isolinux/$distro/vmlinuz, initrd.img # FIXME: this will likely crash on non-Linux breeds shutil.copyfile(dist.kernel, os.path.join(isolinuxdir, "%s.krn" % distname)) shutil.copyfile(dist.initrd, os.path.join(isolinuxdir, "%s.img" % distname)) self.logger.info("generating a isolinux.cfg") isolinuxcfg = os.path.join(isolinuxdir, "isolinux.cfg") cfg = open(isolinuxcfg, "w+") header_written = True if not no_local_hdd: cfg.write(LOCAL_HDD_HEADER) # fixme, use template else: header_written = False self.logger.info("generating profile list") #sort the profiles profile_list = [profile for profile in self.profiles] def sort_name(a,b): return cmp(a.name,b.name) profile_list.sort(sort_name) for profile in profile_list: use_this = True if profiles is not None: which_profiles = profiles.split(",") if not profile.name in which_profiles: use_this = False if use_this: dist = profile.get_conceptual_parent() if dist.name.find("-xen") != -1: continue data = utils.blender(self.api, True, profile) if force_server: data["server"] = force_server distname = self.make_shorter(dist.name) if not header_written: cfg.write(HEADER.replace('local', profile.name)) header_written = True cfg.write("\n") cfg.write("LABEL %s\n" % profile.name) cfg.write(" MENU LABEL %s\n" % profile.name) cfg.write(" kernel %s.krn\n" % distname) if data["kickstart"].startswith("/"): data["kickstart"] = "http://%s/cblr/svc/op/ks/profile/%s" % ( data["server"], profile.name ) else: if force_server: # replace configured hostname with the forced one data["kickstart"] = re.sub(r'://.*?/', '://' + data["server"] + '/', data["kickstart"]) append_line = " append initrd=%s.img" % distname append_line = append_line + " ks=%s " % data["kickstart"] append_line = append_line + " %s\n" % data["kernel_options"] length=len(append_line) if length>254: self.logger.warning("append line length is greater than 254 chars: (%s chars)" % length) cfg.write(append_line) if systems is not None: self.logger.info("generating system list") if header_written: cfg.write("\nMENU SEPARATOR\n") #sort the systems system_list = [system for system in self.systems] def sort_name(a,b): return cmp(a.name,b.name) system_list.sort(sort_name) for system in system_list: use_this = False if systems is not None: which_systems = systems.split(",") if system.name in which_systems: use_this = True if use_this: profile = system.get_conceptual_parent() dist = profile.get_conceptual_parent() if dist.name.find("-xen") != -1: continue data = utils.blender(self.api, True, system) if force_server: data["server"] = force_server distname = self.make_shorter(dist.name) if not header_written: cfg.write(HEADER.replace('local', system.name)) header_written = True cfg.write("\n") cfg.write("LABEL %s\n" % system.name) cfg.write(" MENU LABEL %s\n" % system.name) cfg.write(" kernel %s.krn\n" % distname) if data["kickstart"].startswith("/"): data["kickstart"] = "http://%s/cblr/svc/op/ks/system/%s" % ( data["server"], system.name ) else: if force_server: # replace configured hostname with the forced one data["kickstart"] = re.sub(r'://.*?/', '://' + data["server"] + '/', data["kickstart"]) append_line = " append initrd=%s.img" % distname append_line = append_line + " ks=%s" % data["kickstart"] append_line = append_line + " %s" % data["kernel_options"] # add network info to avoid DHCP only if it is available if data.has_key("bonding_master_eth0") and data["bonding_master_eth0"] != "": primary_interface = data["bonding_master_eth0"] else: primary_interface = "eth0" # check if ksdevice entry exists and use that for network info blended = utils.blender(self.api, False, system) # don't collapse if blended["kernel_options"].has_key("ksdevice") and blended["kernel_options"]["ksdevice"] != "": ksdevice = blended["kernel_options"]["ksdevice"] self.logger.info(" - ksdevice %s set for system %s" % (ksdevice,system.name)) if data.has_key("ip_address_" + ksdevice ) and data["ip_address_" + ksdevice] != "": primary_interface = ksdevice else: for (obj_iname, obj_interface) in data['interfaces'].iteritems(): mac = obj_interface["mac_address"].upper() ksdevice_mac = ksdevice.upper() if mac == ksdevice_mac: primary_interface = obj_iname if data.has_key("ip_address_" + primary_interface) and data["ip_address_" + primary_interface] != "": append_line = append_line + " ip=%s" % data["ip_address_" + primary_interface] if data.has_key("netmask_" + primary_interface) and data["netmask_" + primary_interface] != "": append_line = append_line + " netmask=%s" % data["netmask_" + primary_interface] if data.has_key("gateway") and data["gateway"] != "": append_line = append_line + " gateway=%s" % data["gateway"] if not exclude_dns and data.has_key("name_servers") and data["name_servers"]: version = dist.os_version if dist.breed == "redhat" and ( (version.startswith("rhel") and version >= "rhel7") or (version.startswith("fedora") and version >= "fedora17") ): append_line = append_line + " nameserver=%s\n" % " nameserver=".join(data["name_servers"]) else: append_line = append_line + " dns=%s\n" % ",".join(data["name_servers"]) length=len(append_line) if length > 254: self.logger.warning("append line length is greater than 254 chars: (%s chars)" % length) cfg.write(append_line) if not header_written: cfg.write(HEADER) self.logger.info("done writing config") cfg.write("\n") cfg.write("MENU END\n") cfg.close()
def generate_standalone_iso(self,imagesdir,isolinuxdir,distname,filesource): # Get the distro object for the requested distro # and then get all of its descendants (profiles/sub-profiles/systems) distro = self.api.find_distro(distname) if distro is None: utils.die(self.logger,"distro %s was not found, aborting" % distname) descendants = distro.get_descendants() if filesource is None: # Try to determine the source from the distro kernel path self.logger.debug("trying to locate source for distro") found_source = False (source_head, source_tail) = os.path.split(distro.kernel) while source_tail != '': if source_head == os.path.join(self.api.settings().webdir, "ks_mirror"): filesource = os.path.join(source_head, source_tail) found_source = True self.logger.debug("found source in %s" % filesource) break (source_head, source_tail) = os.path.split(source_head) # Can't find the source, raise an error if not found_source: utils.die(self.logger," Error, no installation source found. When building a standalone ISO, you must specify a --source if the distro install tree is not hosted locally") self.logger.info("copying kernels and initrds for standalone distro") # buildisodir/isolinux/$distro/vmlinuz, initrd.img # FIXME: this will likely crash on non-Linux breeds f1 = os.path.join(isolinuxdir, "vmlinuz") f2 = os.path.join(isolinuxdir, "initrd.img") if not os.path.exists(distro.kernel): utils.die(self.logger,"path does not exist: %s" % distro.kernel) if not os.path.exists(distro.initrd): utils.die(self.logger,"path does not exist: %s" % distro.initrd) shutil.copyfile(distro.kernel, f1) shutil.copyfile(distro.initrd, f2) cmd = "rsync -rlptgu --exclude=boot.cat --exclude=TRANS.TBL --exclude=isolinux/ %s/ %s/../" % (filesource, isolinuxdir) self.logger.info("- copying distro %s files (%s)" % (distname,cmd)) rc = utils.subprocess_call(self.logger, cmd, shell=True) if rc: utils.die(self.logger,"rsync of files failed") self.logger.info("generating a isolinux.cfg") isolinuxcfg = os.path.join(isolinuxdir, "isolinux.cfg") cfg = open(isolinuxcfg, "w+") cfg.write(HEADER) # fixme, use template for descendant in descendants: data = utils.blender(self.api, True, descendant) cfg.write("\n") cfg.write("LABEL %s\n" % descendant.name) cfg.write(" MENU LABEL %s\n" % descendant.name) cfg.write(" kernel vmlinuz\n") data["kickstart"] = "cdrom:/isolinux/ks-%s.cfg" % descendant.name append_line = " append initrd=initrd.img" append_line = append_line + " ks=%s " % data["kickstart"] append_line = append_line + " %s\n" % data["kernel_options"] cfg.write(append_line) if descendant.COLLECTION_TYPE == 'profile': kickstart_data = self.api.kickgen.generate_kickstart_for_profile(descendant.name) elif descendant.COLLECTION_TYPE == 'system': kickstart_data = self.api.kickgen.generate_kickstart_for_system(descendant.name) cdregex = re.compile("url .*\n", re.IGNORECASE) kickstart_data = cdregex.sub("cdrom\n", kickstart_data) ks_name = os.path.join(isolinuxdir, "ks-%s.cfg" % descendant.name) ks_file = open(ks_name, "w+") ks_file.write(kickstart_data) ks_file.close() self.logger.info("done writing config") cfg.write("\n") cfg.write("MENU END\n") cfg.close() return
def run(api, args, logger): # FIXME: make everything use the logger settings = api.settings() # go no further if this feature is turned off if not str(settings.build_reporting_enabled).lower() in [ "1", "yes", "y", "true" ]: return 0 objtype = args[0] # "target" or "profile" name = args[1] # name of target or profile boot_ip = args[2] # ip or "?" if objtype == "system": target = api.find_system(name) else: target = api.find_profile(name) # collapse the object down to a rendered datastructure target = utils.blender(api, False, target) if target == {}: raise CX("failure looking up target") to_addr = settings.build_reporting_email if to_addr == "": return 0 # add the ability to specify an MTA for servers that don't run their own smtp_server = settings.build_reporting_smtp_server if smtp_server == "": smtp_server = "localhost" # use a custom from address or fall back to a reasonable default from_addr = settings.build_reporting_sender if from_addr == "": from_addr = "cobbler@%s" % settings.server subject = settings.build_reporting_subject if subject == "": subject = '[Cobbler] install complete ' to_addr = ", ".join(to_addr) metadata = { "from_addr": from_addr, "to_addr": to_addr, "subject": subject, "boot_ip": boot_ip } metadata.update(target) input_template = open("/etc/cobbler/reporting/build_report_email.template") input_data = input_template.read() input_template.close() message = templar.Templar().render(input_data, metadata, None) # for debug, call # print message # Send the mail # FIXME: on error, return non-zero server_handle = smtplib.SMTP(smtp_server) server_handle.sendmail(from_addr, to_addr, message) server_handle.quit() return 0
def test_inheritance_and_variable_propogation(self): # STEP ONE: verify that non-inherited objects behave # correctly with ks_meta (we picked this attribute # because it's a hash and it's a bit harder to handle # than strings). It should be passed down the render # tree to all subnodes repo = self.api.new_repo() try: os.makedirs("/tmp/test_cobbler_repo") except: pass fd = open("/tmp/test_cobbler_repo/test.file", "w+") fd.write("hello!") fd.close() self.assertTrue(repo.set_name("testrepo")) self.assertTrue(repo.set_mirror("/tmp/test_cobbler_repo")) self.assertTrue(self.api.add_repo(repo)) profile = self.api.new_profile() self.assertTrue(profile.set_name("testprofile12b2")) self.assertTrue(profile.set_distro("testdistro0")) self.assertTrue(profile.set_kickstart("http://127.0.0.1/foo")) self.assertTrue(profile.set_repos(["testrepo"])) self.assertTrue(profile.set_name_servers(["asdf"])) self.assertTrue(self.api.add_profile(profile)) # disable this test as it's not a valid repo yet # self.api.reposync() self.api.sync() system = self.api.new_system() self.assertTrue(system.set_name("foo")) self.assertTrue(system.set_profile("testprofile12b2")) self.assertTrue(system.set_ksmeta({"asdf" : "jkl" })) self.assertTrue(self.api.add_system(system)) profile = self.api.profiles().find("testprofile12b2") ksmeta = profile.ks_meta self.assertFalse(ksmeta.has_key("asdf")) # FIXME: do the same for inherited profiles # now verify the same for an inherited profile # and this time walk up the tree to verify it wasn't # applied to any other object except the base. profile2 = self.api.new_profile(is_subobject=True) profile2.set_name("testprofile12b3") profile2.set_parent("testprofile12b2") self.api.add_profile(profile2) # disable this test as syncing an invalid repo will fail # self.api.reposync() self.api.sync() # FIXME: now add a system to the inherited profile # and set a attribute on it that we will later check for system2 = self.api.new_system() self.assertTrue(system2.set_name("foo2")) self.assertTrue(system2.set_profile("testprofile12b3")) self.assertTrue(system2.set_ksmeta({"narf" : "troz"})) self.assertTrue(self.api.add_system(system2)) # disable this test as invalid repos don't sync # self.api.reposync() self.api.sync() # FIXME: now evaluate the system object and make sure # that it has inherited the repos value from the superprofile # above it's actual profile. This should NOT be present in the # actual object, which we have not modified yet. data = utils.blender(self.api, False, system2) self.assertTrue(data["repos"] == ["testrepo"]) self.assertTrue(self.api.profiles().find(system2.profile).repos == "<<inherit>>") # now if we set the repos object of the system to an additional # repo we should verify it now contains two repos. # (FIXME) repo2 = self.api.new_repo() try: os.makedirs("/tmp/cobbler_test/repo0") except: pass fd = open("/tmp/cobbler_test/repo0/file.test","w+") fd.write("Hi!") fd.close() self.assertTrue(repo2.set_name("testrepo2")) self.assertTrue(repo2.set_mirror("/tmp/cobbler_test/repo0")) self.assertTrue(self.api.add_repo(repo2)) profile2 = self.api.profiles().find("testprofile12b3") # note: side check to make sure we can also set to string values profile2.set_repos("testrepo2") self.api.add_profile(profile2) # save it # random bug testing: run sync several times and ensure cardinality doesn't change #self.api.reposync() self.api.sync() self.api.sync() self.api.sync() data = utils.blender(self.api, False, system2) self.assertTrue("testrepo" in data["repos"]) self.assertTrue("testrepo2" in data["repos"]) self.assertTrue(len(data["repos"]) == 2) self.assertTrue(self.api.profiles().find(system2.profile).repos == ["testrepo2"]) # now double check that the parent profile still only has one repo in it. # this is part of our test against upward propogation profile = self.api.profiles().find("testprofile12b2") self.assertTrue(len(profile.repos) == 1) self.assertTrue(profile.repos == ["testrepo"]) # now see if the subprofile does NOT have the ksmeta attribute # this is part of our test against upward propogation profile2 = self.api.profiles().find("testprofile12b3") self.assertTrue(type(profile2.ks_meta) == type("")) self.assertTrue(profile2.ks_meta == "<<inherit>>") # now see if the profile above this profile still doesn't have it profile = self.api.profiles().find("testprofile12b2") self.assertTrue(type(profile.ks_meta) == type({})) # self.api.reposync() self.api.sync() self.assertFalse(profile.ks_meta.has_key("narf"), "profile does not have the system ksmeta") #self.api.reposync() self.api.sync() # verify that the distro did not acquire the property # we just set on the leaf system distro = self.api.distros().find("testdistro0") self.assertTrue(type(distro.ks_meta) == type({})) self.assertFalse(distro.ks_meta.has_key("narf"), "distro does not have the system ksmeta") # STEP THREE: verify that inheritance appears to work # by setting ks_meta on the subprofile and seeing # if it appears on the leaf system ... must use # blender functions profile2 = self.api.profiles().find("testprofile12b3") profile2.set_ksmeta({"canyouseethis" : "yes" }) self.assertTrue(self.api.add_profile(profile2)) system2 = self.api.systems().find("foo2") data = utils.blender(self.api, False, system2) self.assertTrue(data.has_key("ks_meta")) self.assertTrue(data["ks_meta"].has_key("canyouseethis")) # STEP FOUR: do the same on the superprofile and see # if that propogates profile = self.api.profiles().find("testprofile12b2") profile.set_ksmeta({"canyouseethisalso" : "yes" }) self.assertTrue(self.api.add_profile(profile)) system2 = self.api.systems().find("foo2") data = utils.blender(self.api, False, system2) self.assertTrue(data.has_key("ks_meta")) self.assertTrue(data["ks_meta"].has_key("canyouseethisalso")) # STEP FIVE: see if distro attributes propogate distro = self.api.distros().find("testdistro0") distro.set_ksmeta({"alsoalsowik" : "moose" }) self.assertTrue(self.api.add_distro(distro)) system2 = self.api.find_system("foo2") data = utils.blender(self.api, False, system2) self.assertTrue(data.has_key("ks_meta")) self.assertTrue(data["ks_meta"].has_key("alsoalsowik"))
def generate_gpxe(self,what,name): if what.lower() not in ("profile","system"): return "# gpxe is only valid for profiles and systems" distro = None if what == "profile": obj = self.api.find_profile(name=name) distro = obj.get_conceptual_parent() else: obj = self.api.find_system(name=name) distro = obj.get_conceptual_parent().get_conceptual_parent() netboot_enabled = obj.netboot_enabled # For multi-arch distros, the distro name in ks_mirror # may not contain the arch string, so we need to figure out # the path based on where the kernel is stored. We do this # because some distros base future downloads on the initial # URL passed in, so all of the files need to be at this location # (which is why we can't use the images link, which just contains # the kernel and initrd). ks_mirror_name = string.join(distro.kernel.split('/')[-2:-1],'') blended = utils.blender(self.api, False, obj) ksmeta = blended.get("ks_meta",{}) try: del blended["ks_meta"] except: pass blended.update(ksmeta) # make available at top level blended['distro'] = distro.name blended['ks_mirror_name'] = ks_mirror_name blended['kernel_name'] = os.path.basename(distro.kernel) blended['initrd_name'] = os.path.basename(distro.initrd) if what == "profile": blended['append_line'] = self.build_kernel_options(None,obj,distro,None,None,blended['kickstart']) else: blended['append_line'] = self.build_kernel_options(obj,None,distro,None,None,blended['kickstart']) template = None if distro.breed in ['redhat','debian','ubuntu','suse']: # all of these use a standard kernel/initrd setup so # they all use the same gPXE template template = os.path.join(self.settings.pxe_template_dir,"gpxe_%s_linux.template" % what.lower()) elif distro.breed == 'vmware': if distro.os_version == 'esx4': # older ESX is pretty much RHEL, so it uses the standard kernel/initrd setup template = os.path.join(self.settings.pxe_template_dir,"gpxe_%s_linux.template" % what.lower()) elif distro.os_version == 'esxi4': template = os.path.join(self.settings.pxe_template_dir,"gpxe_%s_esxi4.template" % what.lower()) elif distro.os_version.startswith('esxi5'): template = os.path.join(self.settings.pxe_template_dir,"gpxe_%s_esxi5.template" % what.lower()) elif distro.breed == 'freebsd': template = os.path.join(self.settings.pxe_template_dir,"gpxe_%s_freebsd.template" % what.lower()) if what == "system": if not netboot_enabled: template = os.path.join(self.settings.pxe_template_dir,"gpxe_%s_local.template" % what.lower()) if not template: return "# unsupported breed/os version" if not os.path.exists(template): return "# gpxe template not found for the %s named %s (filename=%s)" % (what,name,template) template_fh = open(template) template_data = template_fh.read() template_fh.close() return self.templar.render(template_data, blended, None)
def write_templates(self,obj,write_file=False,path=None): """ A semi-generic function that will take an object with a template_files hash {source:destiation}, and generate a rendered file. The write_file option allows for generating of the rendered output without actually creating any files. The return value is a hash of the destination file names (after variable substitution is done) and the data in the file. """ self.logger.info("Writing template files for %s" % obj.name) results = {} try: templates = obj.template_files except: return results blended = utils.blender(self.api, False, obj) ksmeta = blended.get("ks_meta",{}) try: del blended["ks_meta"] except: pass blended.update(ksmeta) # make available at top level templates = blended.get("template_files",{}) try: del blended["template_files"] except: pass blended.update(templates) # make available at top level (success, templates) = utils.input_string_or_hash(templates) if not success: return results blended['img_path'] = os.path.join("/images",blended["distro_name"]) blended['local_img_path'] = os.path.join(utils.tftpboot_location(),"images",blended["distro_name"]) for template in templates.keys(): dest = templates[template] if dest is None: continue # Run the source and destination files through # templar first to allow for variables in the path template = self.templar.render(template, blended, None).strip() dest = os.path.normpath(self.templar.render(dest, blended, None).strip()) # Get the path for the destination output dest_dir = os.path.normpath(os.path.dirname(dest)) # If we're looking for a single template, skip if this ones # destination is not it. if not path is None and path != dest: continue # If we are writing output to a file, we allow files tobe # written into the tftpboot directory, otherwise force all # templated configs into the rendered directory to ensure that # a user granted cobbler privileges via sudo can't overwrite # arbitrary system files (This also makes cleanup easier). if os.path.isabs(dest_dir) and write_file: if dest_dir.find(utils.tftpboot_location()) != 0: raise CX(" warning: template destination (%s) is outside %s, skipping." % (dest_dir,utils.tftpboot_location())) continue else: dest_dir = os.path.join(self.settings.webdir, "rendered", dest_dir) dest = os.path.join(dest_dir, os.path.basename(dest)) if not os.path.exists(dest_dir): utils.mkdir(dest_dir) # Check for problems if not os.path.exists(template): raise CX("template source %s does not exist" % template) continue elif write_file and not os.path.isdir(dest_dir): raise CX("template destination (%s) is invalid" % dest_dir) continue elif write_file and os.path.exists(dest): raise CX("template destination (%s) already exists" % dest) continue elif write_file and os.path.isdir(dest): raise CX("template destination (%s) is a directory" % dest) continue elif template == "" or dest == "": raise CX("either the template source or destination was blank (unknown variable used?)" % dest) continue template_fh = open(template) template_data = template_fh.read() template_fh.close() buffer = self.templar.render(template_data, blended, None) results[dest] = buffer if write_file: self.logger.info("generating: %s" % dest) fd = open(dest, "w") fd.write(buffer) fd.close() return results
def build_kernel_options(self, system, profile, distro, image, arch, kickstart_path): """ Builds the full kernel options line. """ if system is not None: blended = utils.blender(self.api, False, system) elif profile is not None: blended = utils.blender(self.api, False, profile) else: blended = utils.blender(self.api, False, image) append_line = "" kopts = blended.get("kernel_options", dict()) # support additional initrd= entries in kernel options. if "initrd" in kopts: append_line = ",%s" % kopts.pop("initrd") hkopts = utils.hash_to_string(kopts) append_line = "%s %s" % (append_line, hkopts) # kickstart path rewriting (get URLs for local files) if kickstart_path is not None and kickstart_path != "": # FIXME: need to make shorter rewrite rules for these URLs try: ipaddress = socket.gethostbyname_ex(blended["http_server"])[2][0] except socket.gaierror: ipaddress = blended["http_server"] if system is not None and kickstart_path.startswith("/"): kickstart_path = "http://%s/cblr/svc/op/ks/system/%s" % (ipaddress, system.name) elif kickstart_path.startswith("/"): kickstart_path = "http://%s/cblr/svc/op/ks/profile/%s" % (ipaddress, profile.name) if distro.breed is None or distro.breed == "redhat": append_line = "%s ks=%s" % (append_line, kickstart_path) gpxe = blended["enable_gpxe"] if gpxe: append_line = append_line.replace('ksdevice=bootif','ksdevice=${net0/mac}') elif distro.breed == "suse": append_line = "%s autoyast=%s" % (append_line, kickstart_path) elif distro.breed == "debian" or distro.breed == "ubuntu": append_line = "%s auto-install/enable=true priority=critical url=%s" % (append_line, kickstart_path) elif distro.breed == "freebsd": append_line = "%s ks=%s" % (append_line, kickstart_path) # rework kernel options for debian distros translations = { 'ksdevice':"interface" , 'lang':"locale" } for k,v in translations.iteritems(): append_line = append_line.replace("%s="%k,"%s="%v) # interface=bootif causes a failure append_line = append_line.replace("interface=bootif","") elif distro.breed == "vmware": if distro.os_version.find("esxi") != -1: # ESXi is very picky, it's easier just to redo the # entire append line here since append_line = " ks=%s %s" % (kickstart_path, hkopts) # ESXi likes even fewer options, so we remove them too append_line = append_line.replace("kssendmac","") else: append_line = "%s vmkopts=debugLogToSerial:1 mem=512M ks=%s" % \ (append_line, kickstart_path) # interface=bootif causes a failure append_line = append_line.replace("ksdevice=bootif","") if distro is not None and (distro.breed in [ "debian", "ubuntu" ]): # Hostname is required as a parameter, the one in the preseed is # not respected, so calculate if we have one here. # We're trying: first part of FQDN in hostname field, then system # name, then profile name. # In Ubuntu, this is at least used for the volume group name when # using LVM. domain = "local.lan" if system is not None: if system.hostname is not None and system.hostname != "": # If this is a FQDN, grab the first bit hostname = system.hostname.split(".")[0] _domain = system.hostname.split(".")[1:] if _domain: domain = ".".join( _domain ) else: hostname = system.name else: # ubuntu at the very least does not like having underscores # in the hostname. # FIXME: Really this should remove all characters that are # forbidden in hostnames hostname = profile.name.replace("_","") # At least for debian deployments configured for DHCP networking # this values are not used, but specifying here avoids questions append_line = "%s hostname=%s" % (append_line, hostname) append_line = "%s domain=%s" % (append_line, domain) # A similar issue exists with suite name, as installer requires # the existence of "stable" in the dists directory append_line = "%s suite=%s" % (append_line, distro.os_version) # append necessary kernel args for arm architectures if arch is not None and arch.startswith("arm"): append_line = "%s fixrtc vram=48M omapfb.vram=0:24M" % append_line # do variable substitution on the append line # FIXME: should we just promote all of the ksmeta # variables instead of just the tree? if blended.has_key("ks_meta") and blended["ks_meta"].has_key("tree"): blended["tree"] = blended["ks_meta"]["tree"] append_line = self.templar.render(append_line,utils.flatten(blended),None) # FIXME - the append_line length limit is architecture specific if len(append_line) >= 255: self.logger.warning("warning: kernel option length exceeds 255") return append_line
def write_pxe_file(self, filename, system, profile, distro, arch, image=None, include_header=True, metadata=None, format="pxe"): """ Write a configuration file for the boot loader(s). More system-specific configuration may come in later, if so that would appear inside the system object in api.py NOTE: relevant to tftp and pseudo-PXE (s390) only ia64 is mostly the same as syslinux stuff, s390 is a bit short-circuited and simpler. All of it goes through the templating engine, see the templates in /etc/cobbler for more details Can be used for different formats, "pxe" (default) and "grub". """ if arch is None: raise "missing arch" if image and not os.path.exists(image.file): return None # nfs:// URLs or something, can't use for TFTP if metadata is None: metadata = {} (rval,settings) = utils.input_string_or_hash(self.settings.to_datastruct()) if rval: for key in settings.keys(): metadata[key] = settings[key] # --- # just some random variables template = None buffer = "" # --- kickstart_path = None kernel_path = None initrd_path = None img_path = None if image is None: # not image based, it's something normalish img_path = os.path.join("/images",distro.name) kernel_path = os.path.join("/images",distro.name,os.path.basename(distro.kernel)) initrd_path = os.path.join("/images",distro.name,os.path.basename(distro.initrd)) # Find the kickstart if we inherit from another profile if system: blended = utils.blender(self.api, True, system) else: blended = utils.blender(self.api, True, profile) kickstart_path = blended.get("kickstart","") else: # this is an image we are making available, not kernel+initrd if image.image_type == "direct": kernel_path = os.path.join("/images2",image.name) elif image.image_type == "memdisk": kernel_path = "/memdisk" initrd_path = os.path.join("/images2",image.name) else: # CD-ROM ISO or virt-clone image? We can't PXE boot it. kernel_path = None initrd_path = None if img_path is not None and not metadata.has_key("img_path"): metadata["img_path"] = img_path if kernel_path is not None and not metadata.has_key("kernel_path"): metadata["kernel_path"] = kernel_path if initrd_path is not None and not metadata.has_key("initrd_path"): metadata["initrd_path"] = initrd_path # --- # choose a template if system: if format == "grub": template = os.path.join(self.settings.pxe_template_dir, "grubsystem.template") else: # pxe if system.netboot_enabled: template = os.path.join(self.settings.pxe_template_dir,"pxesystem.template") if arch.startswith("s390"): template = os.path.join(self.settings.pxe_template_dir,"pxesystem_s390x.template") elif arch == "ia64": template = os.path.join(self.settings.pxe_template_dir,"pxesystem_ia64.template") elif arch.startswith("ppc"): template = os.path.join(self.settings.pxe_template_dir,"pxesystem_ppc.template") elif arch.startswith("arm"): template = os.path.join(self.settings.pxe_template_dir,"pxesystem_arm.template") elif distro and distro.os_version.startswith("esxi"): # ESXi uses a very different pxe method, using more files than # a standard kickstart and different options - so giving it a dedicated # PXE template makes more sense than shoe-horning it into the existing # templates template = os.path.join(self.settings.pxe_template_dir,"pxesystem_esxi.template") else: # local booting on ppc requires removing the system-specific dhcpd.conf filename if arch is not None and arch.startswith("ppc"): # Disable yaboot network booting for all interfaces on the system for (name,interface) in system.interfaces.iteritems(): filename = "%s" % utils.get_config_filename(system, interface=name).lower() # Remove symlink to the yaboot binary f3 = os.path.join(self.bootloc, "ppc", filename) if os.path.lexists(f3): utils.rmfile(f3) # Remove the interface-specific config file f3 = os.path.join(self.bootloc, "etc", filename) if os.path.lexists(f3): utils.rmfile(f3) # Yaboot/OF doesn't support booting locally once you've # booted off the network, so nothing left to do return None elif arch is not None and arch.startswith("s390"): template = os.path.join(self.settings.pxe_template_dir,"pxelocal_s390x.template") elif arch is not None and arch.startswith("ia64"): template = os.path.join(self.settings.pxe_template_dir,"pxelocal_ia64.template") else: template = os.path.join(self.settings.pxe_template_dir,"pxelocal.template") else: # not a system record, so this is a profile record or an image if arch.startswith("s390"): template = os.path.join(self.settings.pxe_template_dir,"pxeprofile_s390x.template") if arch.startswith("arm"): template = os.path.join(self.settings.pxe_template_dir,"pxeprofile_arm.template") elif format == "grub": template = os.path.join(self.settings.pxe_template_dir,"grubprofile.template") elif distro and distro.os_version.startswith("esxi"): # ESXi uses a very different pxe method, see comment above in the system section template = os.path.join(self.settings.pxe_template_dir,"pxeprofile_esxi.template") else: template = os.path.join(self.settings.pxe_template_dir,"pxeprofile.template") if kernel_path is not None: metadata["kernel_path"] = kernel_path if initrd_path is not None: metadata["initrd_path"] = initrd_path # generate the kernel options and append line: kernel_options = self.build_kernel_options(system, profile, distro, image, arch, kickstart_path) metadata["kernel_options"] = kernel_options if distro and distro.os_version.startswith("esxi") and filename is not None: append_line = "BOOTIF=%s" % (os.path.basename(filename)) elif metadata.has_key("initrd_path") and (not arch or arch not in ["ia64", "ppc", "ppc64", "arm"]): append_line = "append initrd=%s" % (metadata["initrd_path"]) else: append_line = "append " append_line = "%s%s" % (append_line, kernel_options) if arch.startswith("ppc") or arch.startswith("s390"): # remove the prefix "append" # TODO: this looks like it's removing more than append, really # not sure what's up here... append_line = append_line[7:] metadata["append_line"] = append_line # store variables for templating metadata["menu_label"] = "" if profile: if not arch in [ "ia64", "ppc", "ppc64", "s390", "s390x" ]: metadata["menu_label"] = "MENU LABEL %s" % profile.name metadata["profile_name"] = profile.name elif image: metadata["menu_label"] = "MENU LABEL %s" % image.name metadata["profile_name"] = image.name if system: metadata["system_name"] = system.name # get the template if kernel_path is not None: template_fh = open(template) template_data = template_fh.read() template_fh.close() else: # this is something we can't PXE boot template_data = "\n" # save file and/or return results, depending on how called. buffer = self.templar.render(template_data, metadata, None) if filename is not None: self.logger.info("generating: %s" % filename) fd = open(filename, "w") fd.write(buffer) fd.close() return buffer
def write_all_system_files(self, system): profile = system.get_conceptual_parent() if profile is None: raise CX("system %(system)s references a missing profile %(profile)s" % { "system" : system.name, "profile" : system.profile}) distro = profile.get_conceptual_parent() image_based = False image = None if distro is None: if profile.COLLECTION_TYPE == "profile": raise CX("profile %(profile)s references a missing distro %(distro)s" % { "profile" : system.profile, "distro" : profile.distro}) else: image_based = True image = profile # hack: s390 generates files per system not per interface if not image_based and distro.arch.startswith("s390"): # Always write a system specific _conf and _parm file f2 = os.path.join(self.bootloc, "s390x", "s_%s" % system.name) cf = "%s_conf" % f2 pf = "%s_parm" % f2 template_cf = open("/etc/cobbler/pxe/s390x_conf.template") template_pf = open("/etc/cobbler/pxe/s390x_parm.template") blended = utils.blender(self.api, True, system) self.templar.render(template_cf, blended, cf) # FIXME: profiles also need this data! # FIXME: the _conf and _parm files are limited to 80 characters in length try: ipaddress = socket.gethostbyname_ex(blended["http_server"])[2][0] except socket.gaierror: ipaddress = blended["http_server"] kickstart_path = "http://%s/cblr/svc/op/ks/system/%s" % (ipaddress, system.name) # gather default kernel_options and default kernel_options_s390x kopts = blended.get("kernel_options","") hkopts = shlex.split(utils.hash_to_string(kopts)) blended["kickstart_expanded"] = "ks=%s" % kickstart_path blended["kernel_options"] = hkopts self.templar.render(template_pf, blended, pf) # Write system specific zPXE file if system.is_management_supported(): self.write_pxe_file(f2, system, profile, distro, distro.arch) else: # ensure the file doesn't exist utils.rmfile(f2) return pxe_metadata = {'pxe_menu_items': self.get_menu_items()['pxe'] } # generate one record for each described NIC .. for (name,interface) in system.interfaces.iteritems(): ip = interface["ip_address"] f1 = utils.get_config_filename(system, interface=name) if f1 is None: self.logger.warning("invalid interface recorded for system (%s,%s)" % (system.name,name)) continue; if image_based: working_arch = image.arch else: working_arch = distro.arch if working_arch is None: raise "internal error, invalid arch supplied" # for tftp only ... grub_path = None if working_arch in [ "i386", "x86", "x86_64", "arm", "standard"]: # pxelinux wants a file named $name under pxelinux.cfg f2 = os.path.join(self.bootloc, "pxelinux.cfg", f1) # Only generating grub menus for these arch's: grub_path = os.path.join(self.bootloc, "grub", f1.upper()) elif working_arch == "ia64": # elilo expects files to be named "$name.conf" in the root # and can not do files based on the MAC address if ip is not None and ip != "": self.logger.warning("Warning: Itanium system object (%s) needs an IP address to PXE" % system.name) filename = "%s.conf" % utils.get_config_filename(system,interface=name) f2 = os.path.join(self.bootloc, filename) elif working_arch.startswith("ppc"): # Determine filename for system-specific yaboot.conf filename = "%s" % utils.get_config_filename(system, interface=name).lower() f2 = os.path.join(self.bootloc, "etc", filename) # Link to the yaboot binary f3 = os.path.join(self.bootloc, "ppc", filename) if os.path.lexists(f3): utils.rmfile(f3) os.symlink("../yaboot", f3) else: continue if system.is_management_supported(): if not image_based: self.write_pxe_file(f2, system, profile, distro, working_arch, metadata=pxe_metadata) if grub_path: self.write_pxe_file(grub_path, system, profile, distro, working_arch, format="grub") else: self.write_pxe_file(f2, system, None, None, working_arch, image=profile, metadata=pxe_metadata) else: # ensure the file doesn't exist utils.rmfile(f2) if grub_path: utils.rmfile(grub_path)
def generate_standalone_iso(self, imagesdir, isolinuxdir, distname, filesource): """ Create bootable CD image to be used for handsoff CD installtions """ # Get the distro object for the requested distro # and then get all of its descendants (profiles/sub-profiles/systems) distro = self.api.find_distro(distname) if distro is None: utils.die(self.logger, "distro %s was not found, aborting" % distname) descendants = distro.get_descendants() if filesource is None: # Try to determine the source from the distro kernel path self.logger.debug("trying to locate source for distro") found_source = False (source_head, source_tail) = os.path.split(distro.kernel) while source_tail != '': if source_head == os.path.join(self.api.settings().webdir, "ks_mirror"): filesource = os.path.join(source_head, source_tail) found_source = True self.logger.debug("found source in %s" % filesource) break (source_head, source_tail) = os.path.split(source_head) # Can't find the source, raise an error if not found_source: utils.die( self.logger, " Error, no installation source found. When building a standalone ISO, you must specify a --source if the distro install tree is not hosted locally" ) self.logger.info("copying kernels and initrds for standalone distro") self.copy_boot_files(distro, isolinuxdir, None) cmd = "rsync -rlptgu --exclude=boot.cat --exclude=TRANS.TBL --exclude=isolinux/ %s/ %s/../" % ( filesource, isolinuxdir) self.logger.info("- copying distro %s files (%s)" % (distname, cmd)) rc = utils.subprocess_call(self.logger, cmd, shell=True) if rc: utils.die(self.logger, "rsync of files failed") self.logger.info("generating a isolinux.cfg") isolinuxcfg = os.path.join(isolinuxdir, "isolinux.cfg") cfg = open(isolinuxcfg, "w+") cfg.write(self.iso_template) for descendant in descendants: data = utils.blender(self.api, False, descendant) cfg.write("\n") cfg.write("LABEL %s\n" % descendant.name) cfg.write(" MENU LABEL %s\n" % descendant.name) cfg.write(" kernel %s\n" % os.path.basename(distro.kernel)) append_line = " append initrd=%s" % os.path.basename( distro.initrd) if distro.breed == "redhat": append_line += " ks=cdrom:/isolinux/%s.cfg" % descendant.name if distro.breed == "suse": append_line += " autoyast=file:///isolinux/%s.cfg install=file:///" % descendant.name if data["kernel_options"].has_key("install"): del data["kernel_options"]["install"] if distro.breed in ["ubuntu", "debian"]: append_line += " auto-install/enable=true preseed/file=/cdrom/isolinux/%s.cfg" % descendant.name # add remaining kernel_options to append_line append_line += self.add_remaining_kopts(data["kernel_options"]) cfg.write(append_line) if descendant.COLLECTION_TYPE == 'profile': kickstart_data = self.api.kickgen.generate_kickstart_for_profile( descendant.name) elif descendant.COLLECTION_TYPE == 'system': kickstart_data = self.api.kickgen.generate_kickstart_for_system( descendant.name) cdregex = re.compile("url .*\n", re.IGNORECASE) kickstart_data = cdregex.sub("cdrom\n", kickstart_data) ks_name = os.path.join(isolinuxdir, "%s.cfg" % descendant.name) ks_file = open(ks_name, "w+") ks_file.write(kickstart_data) ks_file.close() self.logger.info("done writing config") cfg.write("\n") cfg.write("MENU END\n") cfg.close() return
def generate_netboot_iso(self, imagesdir, isolinuxdir, profiles=None, systems=None, exclude_dns=None): """ Create bootable CD image to be used for network installations """ # setup all profiles/systems lists all_profiles = [profile for profile in self.api.profiles()] all_profiles.sort(self.sort_name) all_systems = [system for system in self.api.systems()] all_systems.sort(self.sort_name) # convert input to lists which_profiles = utils.input_string_or_list(profiles) which_systems = utils.input_string_or_list(systems) # no profiles/systems selection is made, let's process everything do_all_systems = False do_all_profiles = False if len(which_profiles) == 0 and len(which_systems) == 0: do_all_systems = True do_all_profiles = True # setup isolinux.cfg isolinuxcfg = os.path.join(isolinuxdir, "isolinux.cfg") cfg = open(isolinuxcfg, "w+") cfg.write(self.iso_template) # iterate through selected profiles for profile in all_profiles: if profile.name in which_profiles or do_all_profiles is True: self.logger.info("processing profile: %s" % profile.name) dist = profile.get_conceptual_parent() distname = self.make_shorter(dist.name) self.copy_boot_files(dist, isolinuxdir, distname) cfg.write("\n") cfg.write("LABEL %s\n" % profile.name) cfg.write(" MENU LABEL %s\n" % profile.name) cfg.write(" kernel %s.krn\n" % distname) data = utils.blender(self.api, False, profile) if data["kickstart"].startswith("/"): data[ "kickstart"] = "http://%s:%s/cblr/svc/op/ks/profile/%s" % ( data["server"], self.api.settings().http_port, profile.name) append_line = " append initrd=%s.img" % distname if dist.breed == "suse": if data["proxy"] != "": append_line += " proxy=%s" % data["proxy"] if data["kernel_options"].has_key("install") and data[ "kernel_options"]["install"] != "": append_line += " install=%s" % data["kernel_options"][ "install"] del data["kernel_options"]["install"] else: append_line += " install=http://%s:%s/cblr/links/%s" % ( data["server"], self.api.settings().http_port, dist.name) if data["kernel_options"].has_key("autoyast") and data[ "kernel_options"]["autoyast"] != "": append_line += " autoyast=%s" % data["kernel_options"][ "autoyast"] del data["kernel_options"]["autoyast"] else: append_line += " autoyast=%s" % data["kickstart"] if dist.breed == "redhat": if data["proxy"] != "": append_line += " proxy=%s http_proxy=%s" % ( data["proxy"], data["proxy"]) append_line += " ks=%s" % data["kickstart"] if dist.breed in ["ubuntu", "debian"]: append_line += " auto-install/enable=true url=%s" % data[ "kickstart"] if data["proxy"] != "": append_line += " mirror/http/proxy=%s" % data["proxy"] append_line += self.add_remaining_kopts(data["kernel_options"]) cfg.write(append_line) length = len(append_line) if length > 254: self.logger.warning( "append line length is greater than 254 chars (%s chars)" % length) cfg.write("\nMENU SEPARATOR\n") # iterate through all selected systems for system in all_systems: if system.name in which_systems or do_all_systems is True: self.logger.info("processing system: %s" % system.name) profile = system.get_conceptual_parent() dist = profile.get_conceptual_parent() distname = self.make_shorter(dist.name) self.copy_boot_files(dist, isolinuxdir, distname) cfg.write("\n") cfg.write("LABEL %s\n" % system.name) cfg.write(" MENU LABEL %s\n" % system.name) cfg.write(" kernel %s.krn\n" % distname) data = utils.blender(self.api, False, system) if data["kickstart"].startswith("/"): data[ "kickstart"] = "http://%s:%s/cblr/svc/op/ks/system/%s" % ( data["server"], self.api.settings().http_port, system.name) append_line = " append initrd=%s.img" % distname if dist.breed == "suse": if data["proxy"] != "": append_line += " proxy=%s" % data["proxy"] if data["kernel_options"].has_key("install") and data[ "kernel_options"]["install"] != "": append_line += " install=%s" % data["kernel_options"][ "install"] del data["kernel_options"]["install"] else: append_line += " install=http://%s:%s/cblr/links/%s" % ( data["server"], self.api.settings().http_port, dist.name) if data["kernel_options"].has_key("autoyast") and data[ "kernel_options"]["autoyast"] != "": append_line += " autoyast=%s" % data["kernel_options"][ "autoyast"] del data["kernel_options"]["autoyast"] else: append_line += " autoyast=%s" % data["kickstart"] if dist.breed == "redhat": if data["proxy"] != "": append_line += " proxy=%s http_proxy=%s" % ( data["proxy"], data["proxy"]) append_line += " ks=%s" % data["kickstart"] if dist.breed in ["ubuntu", "debian"]: append_line += " auto-install/enable=true url=%s netcfg/disable_dhcp=true" % data[ "kickstart"] if data["proxy"] != "": append_line += " mirror/http/proxy=%s" % data["proxy"] # hostname is required as a parameter, the one in the preseed is not respected my_domain = "local.lan" if system.hostname != "": # if this is a FQDN, grab the first bit my_hostname = system.hostname.split(".")[0] _domain = system.hostname.split(".")[1:] if _domain: my_domain = ".".join(_domain) else: my_hostname = system.name.split(".")[0] _domain = system.name.split(".")[1:] if _domain: my_domain = ".".join(_domain) # at least for debian deployments configured for DHCP networking # this values are not used, but specifying here avoids questions append_line += " hostname=%s domain=%s" % (my_hostname, my_domain) # a similar issue exists with suite name, as installer requires # the existence of "stable" in the dists directory append_line += " suite=%s" % dist.os_version # try to add static ip boot options to avoid DHCP (interface/ip/netmask/gw/dns) # check for overrides first and clear them from kernel_options my_int = None my_ip = None my_mask = None my_gw = None my_dns = None if dist.breed in ["suse", "redhat"]: if data["kernel_options"].has_key("netmask") and data[ "kernel_options"]["netmask"] != "": my_mask = data["kernel_options"]["netmask"] del data["kernel_options"]["netmask"] if data["kernel_options"].has_key("gateway") and data[ "kernel_options"]["gateway"] != "": my_gw = data["kernel_options"]["gateway"] del data["kernel_options"]["gateway"] if dist.breed == "redhat": if data["kernel_options"].has_key("ksdevice") and data[ "kernel_options"]["ksdevice"] != "": my_int = data["kernel_options"]["ksdevice"] if my_int == "bootif": my_int = None del data["kernel_options"]["ksdevice"] if data["kernel_options"].has_key( "ip") and data["kernel_options"]["ip"] != "": my_ip = data["kernel_options"]["ip"] del data["kernel_options"]["ip"] if data["kernel_options"].has_key( "dns") and data["kernel_options"]["dns"] != "": my_dns = data["kernel_options"]["dns"] del data["kernel_options"]["dns"] if dist.breed == "suse": if data["kernel_options"].has_key("netdevice") and data[ "kernel_options"]["netdevice"] != "": my_int = data["kernel_options"]["netdevice"] del data["kernel_options"]["netdevice"] if data["kernel_options"].has_key("hostip") and data[ "kernel_options"]["hostip"] != "": my_ip = data["kernel_options"]["hostip"] del data["kernel_options"]["hostip"] if data["kernel_options"].has_key("nameserver") and data[ "kernel_options"]["nameserver"] != "": my_dns = data["kernel_options"]["nameserver"] del data["kernel_options"]["nameserver"] if dist.breed in ["ubuntu", "debian"]: if data["kernel_options"].has_key( "netcfg/choose_interface" ) and data["kernel_options"][ "netcfg/choose_interface"] != "": my_int = data["kernel_options"][ "netcfg/choose_interface"] del data["kernel_options"]["netcfg/choose_interface"] if data["kernel_options"].has_key( "netcfg/get_ipaddress" ) and data["kernel_options"]["netcfg/get_ipaddress"] != "": my_ip = data["kernel_options"]["netcfg/get_ipaddress"] del data["kernel_options"]["netcfg/get_ipaddress"] if data["kernel_options"].has_key( "netcfg/get_netmask" ) and data["kernel_options"]["netcfg/get_netmask"] != "": my_mask = data["kernel_options"]["netcfg/get_netmask"] del data["kernel_options"]["netcfg/get_netmask"] if data["kernel_options"].has_key( "netcfg/get_gateway" ) and data["kernel_options"]["netcfg/get_gateway"] != "": my_gw = data["kernel_options"]["netcfg/get_gateway"] del data["kernel_options"]["netcfg/get_gateway"] if data["kernel_options"].has_key( "netcfg/get_nameservers" ) and data["kernel_options"][ "netcfg/get_nameservers"] != "": my_dns = data["kernel_options"][ "netcfg/get_nameservers"] del data["kernel_options"]["netcfg/get_nameservers"] # if no kernel_options overrides are present find the management interface # do nothing when zero or multiple management interfaces are found if my_int is None: mgmt_ints = [] mgmt_ints_multi = [] slave_ints = [] if len(data["interfaces"].keys()) >= 1: for (iname, idata) in data["interfaces"].iteritems(): if idata["management"] == True and idata[ "interface_type"] in [ "master", "bond", "bridge" ]: # bonded/bridged management interface mgmt_ints_multi.append(iname) if idata["management"] == True and idata[ "interface_type"] not in [ "master", "bond", "bridge", "slave", "bond_slave", "bridge_slave", "bonded_bridge_slave" ]: # single management interface mgmt_ints.append(iname) if len(mgmt_ints_multi) == 1 and len(mgmt_ints) == 0: # bonded/bridged management interface, find a slave interface # if eth0 is a slave use that (it's what people expect) for (iname, idata) in data["interfaces"].iteritems(): if idata["interface_type"] in [ "slave", "bond_slave", "bridge_slave", "bonded_bridge_slave" ] and idata["interface_master"] == mgmt_ints_multi[ 0]: slave_ints.append(iname) if "eth0" in slave_ints: my_int = "eth0" else: my_int = slave_ints[0] # set my_ip from the bonded/bridged interface here my_ip = data["ip_address_" + data["interface_master_" + my_int]] my_mask = data["netmask_" + data["interface_master_" + my_int]] if len(mgmt_ints) == 1 and len(mgmt_ints_multi) == 0: # single management interface my_int = mgmt_ints[0] # lookup tcp/ip configuration data if my_ip is None and my_int is not None: if data.has_key("ip_address_" + my_int) and data["ip_address_" + my_int] != "": my_ip = data["ip_address_" + my_int] if my_mask is None and my_int is not None: if data.has_key("netmask_" + my_int) and data["netmask_" + my_int] != "": my_mask = data["netmask_" + my_int] if my_gw is None: if data.has_key("gateway") and data["gateway"] != "": my_gw = data["gateway"] if my_dns is None: if data.has_key( "name_servers") and data["name_servers"] != "": my_dns = data["name_servers"] # add information to the append_line if my_int is not None: if dist.breed == "suse": if data.has_key("mac_address_" + my_int) and data["mac_address_" + my_int] != "": append_line += " netdevice=%s" % data[ "mac_address_" + my_int].lower() else: append_line += " netdevice=%s" % my_int if dist.breed == "redhat": if data.has_key("mac_address_" + my_int) and data["mac_address_" + my_int] != "": append_line += " ksdevice=%s" % data["mac_address_" + my_int] else: append_line += " ksdevice=%s" % my_int if dist.breed in ["ubuntu", "debian"]: append_line += " netcfg/choose_interface=%s" % my_int if my_ip is not None: if dist.breed == "suse": append_line += " hostip=%s" % my_ip if dist.breed == "redhat": append_line += " ip=%s" % my_ip if dist.breed in ["ubuntu", "debian"]: append_line += " netcfg/get_ipaddress=%s" % my_ip if my_mask is not None: if dist.breed in ["suse", "redhat"]: append_line += " netmask=%s" % my_mask if dist.breed in ["ubuntu", "debian"]: append_line += " netcfg/get_netmask=%s" % my_mask if my_gw is not None: if dist.breed in ["suse", "redhat"]: append_line += " gateway=%s" % my_gw if dist.breed in ["ubuntu", "debian"]: append_line += " netcfg/get_gateway=%s" % my_gw if exclude_dns is None or my_dns is not None: if dist.breed == "suse": append_line += " nameserver=%s" % my_dns[0] if dist.breed == "redhat": if type(my_dns) == list: append_line += " dns=%s" % ",".join(my_dns) else: append_line += " dns=%s" % my_dns if dist.breed in ["ubuntu", "debian"]: append_line += " netcfg/get_nameservers=%s" % ",".join( my_dns) # add remaining kernel_options to append_line append_line += self.add_remaining_kopts(data["kernel_options"]) cfg.write(append_line) length = len(append_line) if length > 254: self.logger.warning( "append line length is greater than 254 chars (%s chars)" % length) cfg.write("\n") cfg.write("MENU END\n") cfg.close()
def write_dhcp_file(self): """ DHCP files are written when manage_dhcp is set in /var/lib/cobbler/settings. """ template_file = "/etc/cobbler/dhcp.template" blender_cache = {} try: f2 = open(template_file, "r") except: raise CX(_("error reading template: %s") % template_file) template_data = "" template_data = f2.read() f2.close() # use a simple counter for generating generic names where a hostname # is not available counter = 0 # we used to just loop through each system, but now we must loop # through each network interface of each system. dhcp_tags = {"default": {}} yaboot = "/yaboot" for system in self.systems: if not system.is_management_supported(cidr_ok=False): continue profile = system.get_conceptual_parent() distro = profile.get_conceptual_parent() # if distro is None then the profile is really an image # record! for (name, interface) in system.interfaces.iteritems(): # this is really not a per-interface setting # but we do this to make the templates work # without upgrade interface["gateway"] = system.gateway mac = interface["mac_address"] if interface["interface_type"] in ("bond_slave", "bridge_slave", "bonded_bridge_slave"): if interface["interface_master"] not in system.interfaces: # Can't write DHCP entry; master interface does not # exist continue ip = system.interfaces[ interface["interface_master"]]["ip_address"] interface["ip_address"] = ip host = system.interfaces[ interface["interface_master"]]["dns_name"] else: ip = interface["ip_address"] host = interface["dns_name"] if distro is not None: interface["distro"] = distro.to_datastruct() if mac is None or mac == "": # can't write a DHCP entry for this system continue counter = counter + 1 # the label the entry after the hostname if possible if host is not None and host != "": if name != "eth0": interface["name"] = "%s-%s" % (host, name) else: interface["name"] = "%s" % (host) else: interface["name"] = "generic%d" % counter # add references to the system, profile, and distro # for use in the template if system.name in blender_cache: blended_system = blender_cache[system.name] else: blended_system = utils.blender(self.api, False, system) blender_cache[system.name] = blended_system interface["next_server"] = blended_system["server"] interface["netboot_enabled"] = blended_system[ "netboot_enabled"] interface["hostname"] = blended_system["hostname"] interface["owner"] = blended_system["name"] interface["enable_gpxe"] = blended_system["enable_gpxe"] if not self.settings.always_write_dhcp_entries: if not interface["netboot_enabled"] and interface['static']: continue interface["filename"] = "/pxelinux.0" # can't use pxelinux.0 anymore if distro is not None: if distro.arch.startswith("ppc"): interface["filename"] = yaboot dhcp_tag = interface["dhcp_tag"] if dhcp_tag == "": dhcp_tag = "default" if dhcp_tag not in dhcp_tags: dhcp_tags[dhcp_tag] = {mac: interface} else: dhcp_tags[dhcp_tag][mac] = interface # we are now done with the looping through each interface of each system metadata = { "date": time.asctime(time.gmtime()), "cobbler_server": "%s:%s" % (self.settings.server, self.settings.http_port), "next_server": self.settings.next_server, "yaboot": yaboot, "dhcp_tags": dhcp_tags } if self.logger is not None: self.logger.info("generating %s" % self.settings_file) self.templar.render(template_data, metadata, self.settings_file, None)
def power(self, desired_state): """ state is either "on" or "off". Rebooting is implemented at the api.py level. The user and password need not be supplied. If not supplied they will be taken from the environment, COBBLER_POWER_USER and COBBLER_POWER_PASS. If provided, these will override any other data and be used instead. Users interested in maximum security should take that route. """ template = self.get_command_template() template_file = open(template, "r") meta = utils.blender(self.api, False, self.system) meta["power_mode"] = desired_state # allow command line overrides of the username/password if self.force_user is not None: meta["power_user"] = self.force_user if self.force_pass is not None: meta["power_pass"] = self.force_pass tmp = templar.Templar(self.api._config) cmd = tmp.render(template_file, meta, None, self.system) template_file.close() cmd = cmd.strip() self.logger.info("cobbler power configuration is:") self.logger.info(" type : %s" % self.system.power_type) self.logger.info(" address: %s" % self.system.power_address) self.logger.info(" user : %s" % self.system.power_user) self.logger.info(" id : %s" % self.system.power_id) # if no username/password data, check the environment if meta.get("power_user", "") == "": meta["power_user"] = os.environ.get("COBBLER_POWER_USER", "") if meta.get("power_pass", "") == "": meta["power_pass"] = os.environ.get("COBBLER_POWER_PASS", "") self.logger.info("- %s" % cmd) # use shell so we can have mutliple power commands chained together cmd = ['/bin/sh', '-c', cmd] # Try the power command 5 times before giving up. # Some power switches are flakey for x in range(0, 5): output, rc = utils.subprocess_sp(self.logger, cmd, shell=False) if rc == 0: # If the desired state is actually a query for the status # return different information than command return code if desired_state == 'status': match = re.match('(^Status:\s)(ON|OFF)', output) if match: power_status = match.groups()[1] if power_status == 'ON': return True else: return False utils.die( self.logger, "command succeeded (rc=%s), but output ('%s') was not understood" % (rc, output)) return None break else: time.sleep(2) if not rc == 0: utils.die( self.logger, "command failed (rc=%s), please validate the physical setup and cobbler config" % rc) return rc