def run(self, path, name, network_root=None, autoinstall_file=None, arch=None, breed=None, os_version=None):
        """
        path: the directory we are scanning for files
        name: the base name of the distro
        network_root: the remote path (nfs/http/ftp) for the distro files
        autoinstall_file: user-specified response file, which will override the default
        arch: user-specified architecture
        breed: user-specified breed
        os_version: user-specified OS version
        """
        self.name = name
        self.network_root = network_root
        self.autoinstall_file = autoinstall_file
        self.arch = arch
        self.breed = breed
        self.os_version = os_version

        self.path = path
        self.rootdir = path
        self.pkgdir = path

        # some fixups for the XMLRPC interface, which does not use "None"
        if self.arch == "":
            self.arch = None

        if self.name == "":
            self.name = None

        if self.autoinstall_file == "":
            self.autoinstall_file = None

        if self.os_version == "":
            self.os_version = None

        if self.network_root == "":
            self.network_root = None

        if self.os_version and not self.breed:
            utils.die(self.logger, "OS version can only be specified when a specific breed is selected")

        self.signature = self.scan_signatures()
        if not self.signature:
            error_msg = "No signature matched in %s" % path
            self.logger.error(error_msg)
            raise CX(error_msg)

        # now walk the filesystem looking for distributions that match certain patterns
        self.logger.info("Adding distros from path %s:" % self.path)
        distros_added = []
        os.path.walk(self.path, self.distro_adder, distros_added)

        if len(distros_added) == 0:
            self.logger.warning("No distros imported, bailing out")
            return

        # find out if we can auto-create any repository records from the install tree
        if self.network_root is None:
            self.logger.info("associating repos")
            # FIXME: this automagic is not possible (yet) without mirroring
            self.repo_finder(distros_added)
Exemplo n.º 2
0
 def update_netboot(self, name):
     """
     Write out new pxelinux.cfg files to /tftpboot
     """
     system = self.systems.find(name=name)
     if system is None:
         utils.die(self.logger, "error in system lookup for %s" % name)
     menu_items = self.tftpgen.get_menu_items()['pxe']
     self.tftpgen.write_all_system_files(system, menu_items)
     # generate any templates listed in the system
     self.tftpgen.write_templates(system)
Exemplo n.º 3
0
    def write_autoinstall_snippet(self, file_path, data):

        file_path = self.validate_autoinstall_snippet_file_path(file_path, new_snippet=True)

        file_full_path = "%s/%s" % (self.snippets_base_dir, file_path)
        try:
            utils.mkdir(os.path.dirname(file_full_path))
        except:
            utils.die(self.logger, "unable to create directory for automatic OS installation snippet at %s" % file_path)

        fileh = open(file_full_path, "w+")
        fileh.write(data)
        fileh.close()
Exemplo n.º 4
0
    def remove_autoinstall_template(self, file_path):
        """
        Remove an automatic OS installation template

        @param str file_path automatic installation template relative file path
        """

        file_path = self.validate_autoinstall_template_file_path(file_path, for_item=False)

        file_full_path = "%s/%s" % (self.templates_base_dir, file_path)
        if not self.is_autoinstall_in_use(file_path):
            os.remove(file_full_path)
        else:
            utils.die(self.logger, "attempt to delete in-use file")
Exemplo n.º 5
0
    def gen_config_data(self):
        """
        Generate configuration data for repos and files.
        Returns a dict.
        """
        config_data = {
            'repo_data': self.handle.get_repo_config_for_system(self.system),
            'repos_enabled': self.get_cobbler_resource('repos_enabled'),
        }
        file_set = set()

        for mgmtclass in self.mgmtclasses:
            _mgmtclass = self.handle.find_mgmtclass(name=mgmtclass)
            for file in _mgmtclass.files:
                file_set.add(file)

        # Generate File data
        file_data = {}
        for file in file_set:
            _file = self.handle.find_file(name=file)

            if _file is None:
                raise CX('%s file resource is not defined' % file)

            file_data[file] = {}
            file_data[file]['is_dir'] = _file.is_dir
            file_data[file]['action'] = self.resolve_resource_var(_file.action)
            file_data[file]['group'] = self.resolve_resource_var(_file.group)
            file_data[file]['mode'] = self.resolve_resource_var(_file.mode)
            file_data[file]['owner'] = self.resolve_resource_var(_file.owner)
            file_data[file]['path'] = self.resolve_resource_var(_file.path)

            if not _file.is_dir:
                file_data[file]['template'] = self.resolve_resource_var(_file.template)
                try:
                    t = Template(file=file_data[file]['template'], searchList=[self.host_vars])
                    file_data[file]['content'] = t.respond()
                except:
                    utils.die(self.logger, "Missing template for this file resource %s" % (file_data[file]))

        config_data['files'] = file_data
        return config_data
Exemplo n.º 6
0
    def write_autoinstall_template(self, file_path, data):
        """
        Write an automatic OS installation template

        @param str file_path automatic installation template relative file path
        @param str data automatic installation template content
        """

        file_path = self.validate_autoinstall_template_file_path(file_path, for_item=False, new_autoinstall=True)

        file_full_path = "%s/%s" % (self.templates_base_dir, file_path)
        try:
            utils.mkdir(os.path.dirname(file_full_path))
        except:
            utils.die(self.logger, "unable to create directory for automatic OS installation template at %s" % file_path)

        fileh = open(file_full_path, "w+")
        fileh.write(data)
        fileh.close()

        return True
Exemplo n.º 7
0
Arquivo: api.py Projeto: akurz/cobbler
    def power_system(self, system, power_operation, user=None, password=None, logger=None):
        """
        Power on / power off / get power status /reboot a system.

        @param str system Cobbler system
        @param str power_operation power operation. Valid values: on, off, reboot, status
        @param str token Cobbler authentication token
        @param str user power management user
        @param str password power management password
        @param Logger logger logger
        @return bool if operation was successful
        """

        if power_operation == "on":
            self.power_mgr.power_on(system, user=user, password=password, logger=logger)
        elif power_operation == "off":
            self.power_mgr.power_off(system, user=user, password=password, logger=logger)
        elif power_operation == "status":
            self.power_mgr.get_power_status(system, user=user, password=password, logger=logger)
        elif power_operation == "reboot":
            self.power_mgr.reboot(system, user=user, password=password, logger=logger)
        else:
            utils.die(self.logger, "invalid power operation '%s', expected on/off/status/reboot" % power_operation)
Exemplo n.º 8
0
    def add_entry(self, dirname: str, kernel, initrd):
        """
        When we find a directory with a valid kernel/initrd in it, create the distribution objects as appropriate and
        save them. This includes creating xen and rescue distros/profiles if possible.

        :param dirname: Unkown what this currently does.
        :param kernel: Unkown what this currently does.
        :param initrd: Unkown what this currently does.
        :return: Unkown what this currently does.
        """

        # build a proposed name based on the directory structure
        proposed_name = self.get_proposed_name(dirname, kernel)

        # build a list of arches found in the packages directory
        archs = self.learn_arch_from_tree()
        if not archs and self.arch:
            archs.append(self.arch)
        else:
            if self.arch and self.arch not in archs:
                utils.die("Given arch (%s) not found on imported tree %s" %
                          (self.arch, self.path))

        if len(archs) == 0:
            self.logger.error(
                "No arch could be detected in %s, and none was specified via the --arch option"
                % dirname)
            return []
        elif len(archs) > 1:
            self.logger.warning("- Warning : Multiple archs found : %s" %
                                archs)

        distros_added = []
        for pxe_arch in archs:
            name = proposed_name + "-" + pxe_arch
            existing_distro = self.distros.find(name=name)

            if existing_distro is not None:
                self.logger.warning(
                    "skipping import, as distro name already exists: %s" %
                    name)
                continue
            else:
                self.logger.info("creating new distro: %s" % name)
                new_distro = distro.Distro(self.api)

            if name.find("-autoboot") != -1:
                # this is an artifact of some EL-3 imports
                continue

            new_distro.name = name
            new_distro.kernel = kernel
            new_distro.initrd = initrd
            new_distro.arch = pxe_arch
            new_distro.breed = self.breed
            new_distro.os_version = self.os_version
            new_distro.kernel_options = self.signature.get(
                "kernel_options", "")
            new_distro.kernel_options_post = self.signature.get(
                "kernel_options_post", "")
            new_distro.template_files = self.signature.get(
                "template_files", "")

            boot_files: Dict[str, str] = {}
            for boot_file in self.signature["boot_files"]:
                boot_files['$local_img_path/%s' %
                           boot_file] = '%s/%s' % (self.path, boot_file)
            new_distro.boot_files = boot_files

            self.configure_tree_location(new_distro)

            self.distros.add(new_distro, save=True)
            distros_added.append(new_distro)

            # see if the profile name is already used, if so, skip it and
            # do not modify the existing profile

            existing_profile = self.profiles.find(name=name)

            if existing_profile is None:
                self.logger.info("creating new profile: %s" % name)
                new_profile = profile.Profile(self.api)
            else:
                self.logger.info(
                    "skipping existing profile, name already exists: %s" %
                    name)
                continue

            new_profile.name = name
            new_profile.distro = name
            new_profile.autoinstall = self.autoinstall_file

            # depending on the name of the profile we can
            # define a good virt-type for usage with koan
            if name.find("-xen") != -1:
                new_profile.virt_type = enums.VirtType.XENPV
            elif name.find("vmware") != -1:
                new_profile.virt_type = enums.VirtType.VMWARE
            else:
                new_profile.virt_type = enums.VirtType.KVM

            if self.breed == "windows":
                dest_path = os.path.join(self.path, "boot")
                bootmgr_path = os.path.join(dest_path, "bootmgr.exe")
                bcd_path = os.path.join(dest_path, "bcd")
                winpe_path = os.path.join(dest_path, "winpe.wim")
                if os.path.exists(bootmgr_path) and os.path.exists(
                        bcd_path) and os.path.exists(winpe_path):
                    new_profile.autoinstall_meta = {
                        "kernel": os.path.basename(kernel),
                        "bootmgr": "bootmgr.exe",
                        "bcd": "bcd",
                        "winpe": "winpe.wim",
                        "answerfile": "autounattended.xml"
                    }

            self.profiles.add(new_profile, save=True)

        return distros_added
    def add_entry(self, dirname, kernel, initrd):
        """
        When we find a directory with a valid kernel/initrd in it, create the distribution objects
        as appropriate and save them.  This includes creating xen and rescue distros/profiles
        if possible.
        """

        # build a proposed name based on the directory structure
        proposed_name = self.get_proposed_name(dirname, kernel)

        # build a list of arches found in the packages directory
        archs = self.learn_arch_from_tree()
        if not archs and self.arch:
            archs.append(self.arch)
        else:
            if self.arch and self.arch not in archs:
                utils.die(self.logger, "Given arch (%s) not found on imported tree %s" % (self.arch, self.path))

        if len(archs) == 0:
            self.logger.error("No arch could be detected in %s, and none was specified via the --arch option" % dirname)
            return []
        elif len(archs) > 1:
            self.logger.warning("- Warning : Multiple archs found : %s" % (archs))

        distros_added = []
        for pxe_arch in archs:
            name = proposed_name + "-" + pxe_arch
            existing_distro = self.distros.find(name=name)

            if existing_distro is not None:
                self.logger.warning("skipping import, as distro name already exists: %s" % name)
                continue
            else:
                self.logger.info("creating new distro: %s" % name)
                distro = item_distro.Distro(self.collection_mgr)

            if name.find("-autoboot") != -1:
                # this is an artifact of some EL-3 imports
                continue

            distro.set_name(name)
            distro.set_kernel(kernel)
            distro.set_initrd(initrd)
            distro.set_arch(pxe_arch)
            distro.set_breed(self.breed)
            distro.set_os_version(self.os_version)
            distro.set_kernel_options(self.signature.get("kernel_options", ""))
            distro.set_kernel_options_post(self.signature.get("kernel_options_post", ""))
            distro.set_template_files(self.signature.get("template_files", ""))
            supported_distro_boot_loaders = utils.get_supported_distro_boot_loaders(distro, self.api)
            distro.set_supported_boot_loaders(supported_distro_boot_loaders)
            distro.set_boot_loader(supported_distro_boot_loaders[0])

            boot_files = ''
            for boot_file in self.signature["boot_files"]:
                boot_files += '$local_img_path/%s=%s/%s ' % (boot_file, self.path, boot_file)
            distro.set_boot_files(boot_files.strip())

            self.configure_tree_location(distro)

            self.distros.add(distro, save=True)
            distros_added.append(distro)

            # see if the profile name is already used, if so, skip it and
            # do not modify the existing profile

            existing_profile = self.profiles.find(name=name)

            if existing_profile is None:
                self.logger.info("creating new profile: %s" % name)
                profile = item_profile.Profile(self.collection_mgr)
            else:
                self.logger.info("skipping existing profile, name already exists: %s" % name)
                continue

            profile.set_name(name)
            profile.set_distro(name)
            profile.set_autoinstall(self.autoinstall_file)

            # depending on the name of the profile we can
            # define a good virt-type for usage with koan
            if name.find("-xen") != -1:
                profile.set_virt_type("xenpv")
            elif name.find("vmware") != -1:
                profile.set_virt_type("vmware")
            else:
                profile.set_virt_type("kvm")

            self.profiles.add(profile, save=True)

        return distros_added
Exemplo n.º 10
0
    def rsync_sync(self, repo):
        """
        Handle copying of rsync:// and rsync-over-ssh repos.

        :param repo: The repo to sync via rsync.
        """

        if not repo.mirror_locally:
            utils.die(
                "rsync:// urls must be mirrored locally, yum cannot access them directly"
            )

        if repo.rpm_list != "" and repo.rpm_list != []:
            self.logger.warning(
                "--rpm-list is not supported for rsync'd repositories")

        # FIXME: don't hardcode
        dest_path = os.path.join(self.settings.webdir + "/repo_mirror",
                                 repo.name)

        spacer = ""
        if not repo.mirror.startswith(
                "rsync://") and not repo.mirror.startswith("/"):
            spacer = "-e ssh"
        if not repo.mirror.strip().endswith("/"):
            repo.mirror = "%s/" % repo.mirror

        flags = ''
        for x in repo.rsyncopts:
            if repo.rsyncopts[x]:
                flags += " %s %s" % (x, repo.rsyncopts[x])
            else:
                flags += " %s" % x

        if flags == '':
            flags = self.settings.reposync_rsync_flags

        cmd = "rsync %s --delete-after %s --delete --exclude-from=/etc/cobbler/rsync.exclude %s %s" \
              % (flags, spacer, pipes.quote(repo.mirror), pipes.quote(dest_path))
        rc = utils.subprocess_call(cmd)

        if rc != 0:
            utils.die("cobbler reposync failed")

        # If ran in archive mode then repo should already contain all repodata and does not need createrepo run
        archive = False
        if '--archive' in flags:
            archive = True
        else:
            # split flags and skip all --{options} as we need to look for combined flags like -vaH
            fl = flags.split()
            for f in fl:
                if f.startswith('--'):
                    pass
                else:
                    if 'a' in f:
                        archive = True
                        break
        if not archive:
            repo_walker(dest_path, self.createrepo_walker, repo)

        self.create_local_file(dest_path, repo)
Exemplo n.º 11
0
    def yum_sync(self, repo):
        """
        Handle copying of http:// and ftp:// yum repos.

        :param repo: The yum reporitory to sync.
        """

        # create the config file the hosts will use to access the repository.
        repo_mirror = repo.mirror.strip()
        dest_path = os.path.join(self.settings.webdir + "/repo_mirror",
                                 repo.name.strip())
        self.create_local_file(dest_path, repo)

        if not repo.mirror_locally:
            return

        # command to run
        cmd = self.reposync_cmd()
        # flag indicating not to pull the whole repo
        has_rpm_list = False

        # detect cases that require special handling
        if repo.rpm_list != "" and repo.rpm_list != []:
            has_rpm_list = True

        # create yum config file for use by reposync
        temp_path = os.path.join(dest_path, ".origin")

        if not os.path.isdir(temp_path):
            # FIXME: there's a chance this might break the RHN D/L case
            os.makedirs(temp_path)

        temp_file = self.create_local_file(temp_path, repo, output=False)

        if not has_rpm_list:
            # If we have not requested only certain RPMs, use reposync
            cmd = "%s %s --config=%s --repoid=%s -p %s" \
                  % (cmd, self.rflags, temp_file, pipes.quote(repo.name),
                     pipes.quote(self.settings.webdir + "/repo_mirror"))
            if repo.arch != "":
                if repo.arch == RepoArchs.I386:
                    # Counter-intuitive, but we want the newish kernels too
                    cmd = "%s -a i686" % (cmd)
                else:
                    cmd = "%s -a %s" % (cmd, repo.arch.value)

        else:
            # Create the output directory if it doesn't exist
            if not os.path.exists(dest_path):
                os.makedirs(dest_path)

            use_source = ""
            if repo.arch == "src":
                use_source = "--source"

            # Older yumdownloader sometimes explodes on --resolvedeps if this happens to you, upgrade yum & yum-utils
            extra_flags = self.settings.yumdownloader_flags
            cmd = "/usr/bin/dnf download"
            cmd = "%s %s %s --disablerepo=* --enablerepo=%s -c %s --destdir=%s %s" \
                  % (cmd, extra_flags, use_source, pipes.quote(repo.name), temp_file, pipes.quote(dest_path),
                     " ".join(repo.rpm_list))

        # Now regardless of whether we're doing yumdownloader or reposync or whether the repo was http://, ftp://, or
        # rhn://, execute all queued commands here.  Any failure at any point stops the operation.

        rc = utils.subprocess_call(self.logger, cmd)
        if rc != 0:
            utils.die("cobbler reposync failed")

        # download any metadata we can use
        proxy = None
        if repo.proxy == '<<inherit>>':
            proxy = self.settings.proxy_url_ext
        elif repo.proxy != '<<None>>' and repo.proxy != '':
            proxy = repo.proxy
        (cert, verify) = self.gen_urlgrab_ssl_opts(repo.yumopts)

        # FIXME: These two variables were deleted
        repodata_path = ""
        repomd_path = ""
        if os.path.exists(repodata_path) and not os.path.isfile(repomd_path):
            shutil.rmtree(repodata_path, ignore_errors=False, onerror=None)

        h = librepo.Handle()
        r = librepo.Result()
        h.setopt(librepo.LRO_REPOTYPE, librepo.LR_YUMREPO)
        h.setopt(librepo.LRO_CHECKSUM, True)

        if os.path.isfile(repomd_path):
            h.setopt(librepo.LRO_LOCAL, True)
            h.setopt(librepo.LRO_URLS, [temp_path])
            h.setopt(librepo.LRO_IGNOREMISSING, True)

            try:
                h.perform(r)
            except librepo.LibrepoException as e:
                utils.die("librepo error: " + temp_path + " - " + e.args[1])

            h.setopt(librepo.LRO_LOCAL, False)
            h.setopt(librepo.LRO_URLS, [])
            h.setopt(librepo.LRO_IGNOREMISSING, False)
            h.setopt(librepo.LRO_UPDATE, True)

        h.setopt(librepo.LRO_DESTDIR, temp_path)

        if repo.mirror_type == "metalink":
            h.setopt(librepo.LRO_METALINKURL, repo_mirror)
        elif repo.mirror_type == "mirrorlist":
            h.setopt(librepo.LRO_MIRRORLISTURL, repo_mirror)
        elif repo.mirror_type == "baseurl":
            h.setopt(librepo.LRO_URLS, [repo_mirror])

        if verify:
            h.setopt(librepo.LRO_SSLVERIFYPEER, True)
            h.setopt(librepo.LRO_SSLVERIFYHOST, True)

        if cert:
            sslclientcert, sslclientkey = cert
            h.setopt(librepo.LRO_SSLCLIENTCERT, sslclientcert)
            h.setopt(librepo.LRO_SSLCLIENTKEY, sslclientkey)

        if proxy:
            h.setopt(librepo.LRO_PROXY, proxy)
            h.setopt(librepo.LRO_PROXYTYPE, librepo.PROXY_HTTP)

        try:
            h.perform(r)
        except librepo.LibrepoException as e:
            utils.die("librepo error: " + temp_path + " - " + e.args[1])

        # now run createrepo to rebuild the index
        if repo.mirror_locally:
            repo_walker(dest_path, self.createrepo_walker, repo)
Exemplo n.º 12
0
    def rhn_sync(self, repo):
        """
        Handle mirroring of RHN repos.

        :param repo: The repo object to synchronize.
        """

        # reposync command
        cmd = self.reposync_cmd()

        # flag indicating not to pull the whole repo
        has_rpm_list = False

        # detect cases that require special handling
        if repo.rpm_list != "" and repo.rpm_list != []:
            has_rpm_list = True

        # Create yum config file for use by reposync
        # FIXME: don't hardcode
        dest_path = os.path.join(self.settings.webdir + "/repo_mirror",
                                 repo.name)
        temp_path = os.path.join(dest_path, ".origin")

        if not os.path.isdir(temp_path):
            # FIXME: there's a chance this might break the RHN D/L case
            os.makedirs(temp_path)

        # how we invoke reposync depends on whether this is RHN content or not.

        # This is the somewhat more-complex RHN case.
        # NOTE: this requires that you have entitlements for the server and you give the mirror as rhn://$channelname
        if not repo.mirror_locally:
            utils.die("rhn:// repos do not work with --mirror-locally=1")

        if has_rpm_list:
            self.logger.warning(
                "warning: --rpm-list is not supported for RHN content")
        rest = repo.mirror[6:]  # everything after rhn://
        cmd = "%s %s --repo=%s -p %s" % (cmd, self.rflags, pipes.quote(rest),
                                         pipes.quote(self.settings.webdir +
                                                     "/repo_mirror"))
        if repo.name != rest:
            args = {"name": repo.name, "rest": rest}
            utils.die(
                "ERROR: repository %(name)s needs to be renamed %(rest)s as the name of the cobbler repository "
                "must match the name of the RHN channel" % args)

        arch = repo.arch.value

        if arch == "i386":
            # Counter-intuitive, but we want the newish kernels too
            arch = "i686"

        if arch != "":
            cmd = "%s -a %s" % (cmd, arch)

        # Now regardless of whether we're doing yumdownloader or reposync or whether the repo was http://, ftp://, or
        # rhn://, execute all queued commands here. Any failure at any point stops the operation.

        if repo.mirror_locally:
            utils.subprocess_call(self.logger, cmd)

        # Some more special case handling for RHN. Create the config file now, because the directory didn't exist
        # earlier.

        self.create_local_file(temp_path, repo, output=False)

        # Now run createrepo to rebuild the index

        if repo.mirror_locally:
            repo_walker(dest_path, self.createrepo_walker, repo)

        # Create the config file the hosts will use to access the repository.

        self.create_local_file(dest_path, repo)
Exemplo n.º 13
0
    def run(self):
        """
        Syncs the current configuration file with the config tree.
        Using the ``Check().run_`` functions previously is recommended
        """
        if not os.path.exists(self.bootloc):
            utils.die("cannot find directory: %s" % self.bootloc)

        self.logger.info("running pre-sync triggers")

        # run pre-triggers...
        utils.run_triggers(self.api, None,
                           "/var/lib/cobbler/triggers/sync/pre/*")

        self.distros = self.collection_mgr.distros()
        self.profiles = self.collection_mgr.profiles()
        self.systems = self.collection_mgr.systems()
        self.settings = self.collection_mgr.settings()
        self.repos = self.collection_mgr.repos()

        # execute the core of the sync operation
        self.logger.info("cleaning trees")
        self.clean_trees()

        # Have the tftpd module handle copying bootloaders, distros, images, and all_system_files
        self.tftpd.sync(self.verbose)
        # Copy distros to the webdir
        # Adding in the exception handling to not blow up if files have been moved (or the path references an NFS
        # directory that's no longer mounted)
        for d in self.distros:
            try:
                self.logger.info("copying files for distro: %s" % d.name)
                self.tftpgen.copy_single_distro_files(d, self.settings.webdir,
                                                      True)
                self.tftpgen.write_templates(d, write_file=True)
            except CX as e:
                self.logger.error(e.value)

        # make the default pxe menu anyway...
        self.tftpgen.make_pxe_menu()

        if self.settings.manage_dhcp:
            self.write_dhcp()
        if self.settings.manage_dns:
            self.logger.info("rendering DNS files")
            self.dns.regen_hosts()
            self.dns.write_configs()

        if self.settings.manage_tftpd:
            # copy in boot_files
            self.tftpd.write_boot_files()

        self.logger.info("cleaning link caches")
        self.clean_link_cache()

        if self.settings.manage_rsync:
            self.logger.info("rendering Rsync files")
            self.rsync_gen()

        # run post-triggers
        self.logger.info("running post-sync triggers")
        utils.run_triggers(self.api, None,
                           "/var/lib/cobbler/triggers/sync/post/*")
        utils.run_triggers(self.api, None,
                           "/var/lib/cobbler/triggers/change/*")
Exemplo n.º 14
0
    def apt_sync(self, repo):
        """
        Handle copying of http:// and ftp:// debian repos.

        :param repo: The apt repository to sync.
        """

        # Warn about not having mirror program.
        mirror_program = "/usr/bin/debmirror"
        if not os.path.exists(mirror_program):
            utils.die("no %s found, please install it" % (mirror_program))

        # command to run
        cmd = ""

        # detect cases that require special handling
        if repo.rpm_list != "" and repo.rpm_list != []:
            utils.die("has_rpm_list not yet supported on apt repos")

        if not repo.arch:
            utils.die("Architecture is required for apt repositories")

        # built destination path for the repo
        dest_path = os.path.join("/var/www/cobbler/repo_mirror", repo.name)

        if repo.mirror_locally:
            # NOTE: Dropping @@suite@@ replace as it is also dropped from "from manage_import_debian"_ubuntu.py due that
            # repo has no os_version attribute. If it is added again it will break the Web UI!
            # mirror = repo.mirror.replace("@@suite@@",repo.os_version)
            mirror = repo.mirror

            idx = mirror.find("://")
            method = mirror[:idx]
            mirror = mirror[idx + 3:]

            idx = mirror.find("/")
            host = mirror[:idx]
            mirror = mirror[idx:]

            dists = ",".join(repo.apt_dists)
            components = ",".join(repo.apt_components)

            mirror_data = "--method=%s --host=%s --root=%s --dist=%s --section=%s" \
                          % (pipes.quote(method), pipes.quote(host), pipes.quote(mirror), pipes.quote(dists),
                             pipes.quote(components))

            rflags = "--nocleanup"
            for x in repo.yumopts:
                if repo.yumopts[x]:
                    rflags += " %s %s" % (x, repo.yumopts[x])
                else:
                    rflags += " %s" % x
            cmd = "%s %s %s %s" % (mirror_program, rflags, mirror_data,
                                   dest_path)
            cmd = "%s %s %s %s" % (mirror_program, rflags, mirror_data,
                                   pipes.quote(dest_path))
            if repo.arch == RepoArchs.SRC:
                cmd = "%s --source" % cmd
            else:
                arch = repo.arch.value
                if arch == "x86_64":
                    arch = "amd64"  # FIX potential arch errors
                cmd = "%s --nosource -a %s" % (cmd, arch)

            # Set's an environment variable for subprocess, otherwise debmirror will fail as it needs this variable to
            # exist.
            # FIXME: might this break anything? So far it doesn't
            os.putenv("HOME", "/var/lib/cobbler")

            rc = utils.subprocess_call(cmd)
            if rc != 0:
                utils.die("cobbler reposync failed")
Exemplo n.º 15
0
    def run(self,
            path: str,
            name: str,
            network_root=None,
            autoinstall_file=None,
            arch: Optional[str] = None,
            breed=None,
            os_version=None):
        """
        This is the main entry point in a manager. It is a required function for import modules.

        :param path: the directory we are scanning for files
        :param name: the base name of the distro
        :param network_root: the remote path (nfs/http/ftp) for the distro files
        :param autoinstall_file: user-specified response file, which will override the default
        :param arch: user-specified architecture
        :param breed: user-specified breed
        :param os_version: user-specified OS version
        """
        self.name = name
        self.network_root = network_root
        self.autoinstall_file = autoinstall_file
        self.arch = arch
        self.breed = breed
        self.os_version = os_version

        self.path = path
        self.rootdir = path
        self.pkgdir = path

        # some fixups for the XMLRPC interface, which does not use "None"
        if self.arch == "":
            self.arch = None

        if self.name == "":
            self.name = None

        if self.autoinstall_file == "":
            self.autoinstall_file = None

        if self.os_version == "":
            self.os_version = None

        if self.network_root == "":
            self.network_root = None

        if self.os_version and not self.breed:
            utils.die(
                self.logger,
                "OS version can only be specified when a specific breed is selected"
            )

        self.signature = self.scan_signatures()
        if not self.signature:
            error_msg = "No signature matched in %s" % path
            self.logger.error(error_msg)
            raise CX(error_msg)

        # now walk the filesystem looking for distributions that match certain patterns
        self.logger.info("Adding distros from path %s:" % self.path)
        distros_added = []
        import_walker(self.path, self.distro_adder, distros_added)

        if len(distros_added) == 0:
            self.logger.warning("No distros imported, bailing out")
            return

        # find out if we can auto-create any repository records from the install tree
        if self.network_root is None:
            self.logger.info("associating repos")
            # FIXME: this automagic is not possible (yet) without mirroring
            self.repo_finder(distros_added)
Exemplo n.º 16
0
    def add_entry(self, dirname: str, kernel, initrd):
        """
        When we find a directory with a valid kernel/initrd in it, create the distribution objects as appropriate and
        save them. This includes creating xen and rescue distros/profiles if possible.

        :param dirname: Unkown what this currently does.
        :param kernel: Unkown what this currently does.
        :param initrd: Unkown what this currently does.
        :return: Unkown what this currently does.
        """

        # build a proposed name based on the directory structure
        proposed_name = self.get_proposed_name(dirname, kernel)

        # build a list of arches found in the packages directory
        archs = self.learn_arch_from_tree()
        if not archs and self.arch:
            archs.append(self.arch)
        else:
            if self.arch and self.arch not in archs:
                utils.die(
                    self.logger,
                    "Given arch (%s) not found on imported tree %s" %
                    (self.arch, self.path))

        if len(archs) == 0:
            self.logger.error(
                "No arch could be detected in %s, and none was specified via the --arch option"
                % dirname)
            return []
        elif len(archs) > 1:
            self.logger.warning("- Warning : Multiple archs found : %s" %
                                archs)

        distros_added = []
        for pxe_arch in archs:
            name = proposed_name + "-" + pxe_arch
            existing_distro = self.distros.find(name=name)

            if existing_distro is not None:
                self.logger.warning(
                    "skipping import, as distro name already exists: %s" %
                    name)
                continue
            else:
                self.logger.info("creating new distro: %s" % name)
                new_distro = distro.Distro(self.collection_mgr)

            if name.find("-autoboot") != -1:
                # this is an artifact of some EL-3 imports
                continue

            new_distro.set_name(name)
            new_distro.set_kernel(kernel)
            new_distro.set_initrd(initrd)
            new_distro.set_arch(pxe_arch)
            new_distro.set_breed(self.breed)
            new_distro.set_os_version(self.os_version)
            new_distro.set_kernel_options(
                self.signature.get("kernel_options", ""))
            new_distro.set_kernel_options_post(
                self.signature.get("kernel_options_post", ""))
            new_distro.set_template_files(
                self.signature.get("template_files", ""))
            supported_distro_boot_loaders = utils.get_supported_distro_boot_loaders(
                new_distro, self.api)
            new_distro.set_supported_boot_loaders(
                supported_distro_boot_loaders)
            new_distro.set_boot_loader(supported_distro_boot_loaders[0])

            boot_files = ''
            for boot_file in self.signature["boot_files"]:
                boot_files += '$local_img_path/%s=%s/%s ' % (
                    boot_file, self.path, boot_file)
            new_distro.set_boot_files(boot_files.strip())

            self.configure_tree_location(new_distro)

            self.distros.add(new_distro, save=True)
            distros_added.append(new_distro)

            # see if the profile name is already used, if so, skip it and
            # do not modify the existing profile

            existing_profile = self.profiles.find(name=name)

            if existing_profile is None:
                self.logger.info("creating new profile: %s" % name)
                new_profile = profile.Profile(self.collection_mgr)
            else:
                self.logger.info(
                    "skipping existing profile, name already exists: %s" %
                    name)
                continue

            new_profile.set_name(name)
            new_profile.set_distro(name)
            new_profile.set_autoinstall(self.autoinstall_file)

            # depending on the name of the profile we can
            # define a good virt-type for usage with koan
            if name.find("-xen") != -1:
                new_profile.set_virt_type("xenpv")
            elif name.find("vmware") != -1:
                new_profile.set_virt_type("vmware")
            else:
                new_profile.set_virt_type("kvm")

            self.profiles.add(new_profile, save=True)

        return distros_added
Exemplo n.º 17
0
    def run(self, name=None, verbose: bool = True):
        """
        Syncs the current repo configuration file with the filesystem.

        :param name: The name of the repository to synchronize.
        :param verbose: If the action should be logged verbose or not.
        """

        self.logger.info("run, reposync, run!")

        try:
            self.tries = int(self.tries)
        except:
            utils.die("retry value must be an integer")

        self.verbose = verbose

        report_failure = False
        for repo in self.repos:
            if name is not None and repo.name != name:
                # Invoked to sync only a specific repo, this is not the one
                continue
            elif name is None and not repo.keep_updated:
                # Invoked to run against all repos, but this one is off
                self.logger.info("%s is set to not be updated" % repo.name)
                continue

            repo_mirror = os.path.join(self.settings.webdir, "repo_mirror")
            repo_path = os.path.join(repo_mirror, repo.name)

            if not os.path.isdir(repo_path) and not repo.mirror.lower(
            ).startswith("rhn://"):
                os.makedirs(repo_path)

            # Set the environment keys specified for this repo and save the old one if they modify an existing variable.

            env = repo.environment
            old_env = {}

            for k in list(env.keys()):
                self.logger.debug("setting repo environment: %s=%s" %
                                  (k, env[k]))
                if env[k] is not None:
                    if os.getenv(k):
                        old_env[k] = os.getenv(k)
                    else:
                        os.environ[k] = env[k]

            # Which may actually NOT reposync if the repo is set to not mirror locally but that's a technicality.

            for x in range(self.tries + 1, 1, -1):
                success = False
                try:
                    self.sync(repo)
                    success = True
                    break
                except:
                    utils.log_exc()
                    self.logger.warning("reposync failed, tries left: %s" %
                                        (x - 2))

            # Cleanup/restore any environment variables that were added or changed above.

            for k in list(env.keys()):
                if env[k] is not None:
                    if k in old_env:
                        self.logger.debug("resetting repo environment: %s=%s" %
                                          (k, old_env[k]))
                        os.environ[k] = old_env[k]
                    else:
                        self.logger.debug("removing repo environment: %s=%s" %
                                          (k, env[k]))
                        del os.environ[k]

            if not success:
                report_failure = True
                if not self.nofail:
                    utils.die("reposync failed, retry limit reached, aborting")
                else:
                    self.logger.error(
                        "reposync failed, retry limit reached, skipping")

            self.update_permissions(repo_path)

        if report_failure:
            utils.die(
                "overall reposync failed, at least one repo failed to synchronize"
            )
Exemplo n.º 18
0
    def gen_config_data(self):
        """
        Generate configuration data for repos, ldap, files,
        packages, and monit. Returns a dict.
        """
        config_data = {
            'repo_data': self.handle.get_repo_config_for_system(self.system),
            'repos_enabled': self.get_cobbler_resource('repos_enabled'),
            'ldap_enabled': self.get_cobbler_resource('ldap_enabled'),
            'monit_enabled': self.get_cobbler_resource('monit_enabled')
        }
        package_set = set()
        file_set = set()

        for mgmtclass in self.mgmtclasses:
            _mgmtclass = self.handle.find_mgmtclass(name=mgmtclass)
            for package in _mgmtclass.packages:
                package_set.add(package)
            for file in _mgmtclass.files:
                file_set.add(file)

        # Generate LDAP data
        if self.get_cobbler_resource("ldap_enabled"):
            if self.system.ldap_type in ["", "none"]:
                utils.die(self.logger, "LDAP management type not set for this system (%s, %s)" % (self.system.ldap_type, self.system.name))
            else:
                template = utils.get_ldap_template(self.system.ldap_type)
                t = Template(file=template, searchList=[self.host_vars])
                print t
                config_data['ldap_data'] = t.respond()

        # Generate Package data
        pkg_data = {}
        for package in package_set:
            _package = self.handle.find_package(name=package)
            if _package is None:
                raise CX('%s package resource is not defined' % package)
            else:
                pkg_data[package] = {}
                pkg_data[package]['action'] = self.resolve_resource_var(_package.action)
                pkg_data[package]['installer'] = _package.installer
                pkg_data[package]['version'] = self.resolve_resource_var(_package.version)
                if pkg_data[package]['version'] != "":
                    pkg_data[package]["install_name"] = "%s-%s" % (package, pkg_data[package]['version'])
                else:
                    pkg_data[package]["install_name"] = package
        config_data['packages'] = pkg_data

        # Generate File data
        file_data = {}
        for file in file_set:
            _file = self.handle.find_file(name=file)

            if _file is None:
                raise CX('%s file resource is not defined' % file)

            file_data[file] = {}
            file_data[file]['is_dir'] = _file.is_dir
            file_data[file]['action'] = self.resolve_resource_var(_file.action)
            file_data[file]['group'] = self.resolve_resource_var(_file.group)
            file_data[file]['mode'] = self.resolve_resource_var(_file.mode)
            file_data[file]['owner'] = self.resolve_resource_var(_file.owner)
            file_data[file]['path'] = self.resolve_resource_var(_file.path)

            if not _file.is_dir:
                file_data[file]['template'] = self.resolve_resource_var(_file.template)
                try:
                    t = Template(file=file_data[file]['template'], searchList=[self.host_vars])
                    file_data[file]['content'] = t.respond()
                except:
                    utils.die(self.logger, "Missing template for this file resource %s" % (file_data[file]))

        config_data['files'] = file_data
        return config_data
Exemplo n.º 19
0
    def _power(self,
               system,
               power_operation: str,
               user: Optional[str] = None,
               password: Optional[str] = None) -> Optional[bool]:
        """
        Performs a power operation on a system.
        Internal method

        :param system: Cobbler system
        :type system: System
        :param power_operation: power operation. Valid values: on, off, status. Rebooting is implemented as a set of 2
                                operations (off and on) in a higher level method.
        :param user: power management user. If user and password are not supplied, environment variables
                     COBBLER_POWER_USER and COBBLER_POWER_PASS will be used.
        :param password: power management password
        :return: bool/None if power operation is 'status', return if system is on; otherwise, return None
        :raise CX: if there are errors
        """

        power_command = get_power_command(system.power_type)
        if not power_command:
            utils.die("no power type set for system")

        power_info = {
            "type": system.power_type,
            "address": system.power_address,
            "user": system.power_user,
            "id": system.power_id,
            "options": system.power_options,
            "identity_file": system.power_identity_file
        }

        self.logger.info("cobbler power configuration is: %s",
                         json.dumps(power_info))

        # if no username/password data, check the environment
        if not system.power_user and not user:
            user = os.environ.get("COBBLER_POWER_USER", "")
        if not system.power_pass and not password:
            password = os.environ.get("COBBLER_POWER_PASS", "")

        power_input = self._get_power_input(system, power_operation, user,
                                            password)

        self.logger.info("power command: %s", power_command)
        self.logger.info("power command input: %s", power_input)

        rc = -1

        for x in range(0, POWER_RETRIES):
            output, rc = utils.subprocess_sp(power_command,
                                             shell=False,
                                             input=power_input)
            # Allowed return codes: 0, 1, 2
            # Source: https://github.com/ClusterLabs/fence-agents/blob/master/doc/FenceAgentAPI.md#agent-operations-and-return-values
            if power_operation in ("on", "off", "reboot"):
                if rc == 0:
                    return None
            elif power_operation == "status":
                if rc in (0, 2):
                    match = re.match(r'^(Status:|.+power\s=)\s(on|off)$',
                                     output, re.IGNORECASE | re.MULTILINE)
                    if match:
                        power_status = match.groups()[1]
                        if power_status.lower() == 'on':
                            return True
                        else:
                            return False
                    error_msg = "command succeeded (rc=%s), but output ('%s') was not understood" % (
                        rc, output)
                    utils.die(error_msg)
                    raise CX(error_msg)
            time.sleep(2)

        if not rc == 0:
            error_msg = "command failed (rc=%s), please validate the physical setup and cobbler config" % rc
            utils.die(error_msg)
            raise CX(error_msg)
Exemplo n.º 20
0
    def run(self,
            path: str,
            name: str,
            network_root=None,
            autoinstall_file=None,
            arch: Optional[str] = None,
            breed=None,
            os_version=None):
        """
        This is the main entry point in a manager. It is a required function for import modules.

        :param path: the directory we are scanning for files
        :param name: the base name of the distro
        :param network_root: the remote path (nfs/http/ftp) for the distro files
        :param autoinstall_file: user-specified response file, which will override the default
        :param arch: user-specified architecture
        :param breed: user-specified breed
        :param os_version: user-specified OS version
        :raises CX
        """
        self.name = name
        self.network_root = network_root
        self.autoinstall_file = autoinstall_file
        self.arch = arch
        self.breed = breed
        self.os_version = os_version

        self.path = path
        self.rootdir = path
        self.pkgdir = path

        # some fixups for the XMLRPC interface, which does not use "None"
        if self.arch == "":
            self.arch = None

        if self.name == "":
            self.name = None

        if self.autoinstall_file == "":
            self.autoinstall_file = None

        if self.os_version == "":
            self.os_version = None

        if self.network_root == "":
            self.network_root = None

        if self.os_version and not self.breed:
            utils.die(
                "OS version can only be specified when a specific breed is selected"
            )

        self.signature = self.scan_signatures()
        if not self.signature:
            error_msg = "No signature matched in %s" % path
            self.logger.error(error_msg)
            raise CX(error_msg)

        # now walk the filesystem looking for distributions that match certain patterns
        self.logger.info("Adding distros from path %s:" % self.path)
        distros_added = []
        import_walker(self.path, self.distro_adder, distros_added)

        if len(distros_added) == 0:
            if self.breed == "windows":
                cmd_path = "/usr/bin/wimexport"
                bootwim_path = os.path.join(self.path, "sources", "boot.wim")
                dest_path = os.path.join(self.path, "boot")
                if os.path.exists(cmd_path) and os.path.exists(bootwim_path):
                    winpe_path = os.path.join(dest_path, "winpe.wim")
                    if not os.path.exists(dest_path):
                        utils.mkdir(dest_path)
                    rc = utils.subprocess_call(
                        [cmd_path, bootwim_path, "1", winpe_path, "--boot"],
                        shell=False)
                    if rc == 0:
                        cmd = [
                            "/usr/bin/wimdir %s 1 | /usr/bin/grep -i '^/Windows/Boot/PXE$'"
                            % winpe_path
                        ]
                        pxe_path = utils.subprocess_get(cmd, shell=True)[0:-1]
                        cmd = [
                            "/usr/bin/wimdir %s 1 | /usr/bin/grep -i '^/Windows/System32/config/SOFTWARE$'"
                            % winpe_path
                        ]
                        config_path = utils.subprocess_get(cmd,
                                                           shell=True)[0:-1]
                        cmd_path = "/usr/bin/wimextract"
                        rc = utils.subprocess_call([
                            cmd_path, bootwim_path, "1",
                            "%s/pxeboot.n12" % pxe_path,
                            "%s/bootmgr.exe" % pxe_path, config_path,
                            "--dest-dir=%s" % dest_path, "--no-acls",
                            "--no-attributes"
                        ],
                                                   shell=False)
                        if rc == 0:
                            if HAS_HIVEX:
                                software = os.path.join(
                                    dest_path, os.path.basename(config_path))
                                h = hivex.Hivex(software, write=True)
                                root = h.root()
                                node = h.node_get_child(root, "Microsoft")
                                node = h.node_get_child(node, "Windows NT")
                                node = h.node_get_child(node, "CurrentVersion")
                                h.node_set_value(
                                    node, {
                                        "key":
                                        "SystemRoot",
                                        "t":
                                        REG_SZ,
                                        "value":
                                        "x:\\Windows\0".encode(
                                            encoding="utf-16le")
                                    })
                                node = h.node_get_child(node, "WinPE")

                                # remove the key InstRoot from the registry
                                values = h.node_values(node)
                                new_values = []

                                for value in values:
                                    keyname = h.value_key(value)

                                    if keyname == "InstRoot":
                                        continue

                                    val = h.node_get_value(node, keyname)
                                    valtype = h.value_type(val)[0]
                                    value2 = h.value_value(val)[1]
                                    valobject = {
                                        "key": keyname,
                                        "t": int(valtype),
                                        "value": value2
                                    }
                                    new_values.append(valobject)

                                h.node_set_values(node, new_values)
                                h.commit(software)

                                cmd_path = "/usr/bin/wimupdate"
                                rc = utils.subprocess_call([
                                    cmd_path, winpe_path,
                                    "--command=add %s %s" %
                                    (software, config_path)
                                ],
                                                           shell=False)
                                os.remove(software)
                            else:
                                self.logger.info(
                                    "python3-hivex not found. If you need Automatic Windows "
                                    "Installation support, please install.")
                            import_walker(self.path, self.distro_adder,
                                          distros_added)

        if len(distros_added) == 0:
            self.logger.warning("No distros imported, bailing out")
            return

        # find out if we can auto-create any repository records from the install tree
        if self.network_root is None:
            self.logger.info("associating repos")
            # FIXME: this automagic is not possible (yet) without mirroring
            self.repo_finder(distros_added)
Exemplo n.º 21
0
    def run(self, iso=None, buildisodir=None, profiles=None, systems=None, distro=None, standalone=None, airgapped=None, source=None, exclude_dns=None, mkisofs_opts=None):

        # the airgapped option implies standalone
        if airgapped is True:
            standalone = True

        # the distro option is for stand-alone builds only
        if not standalone and distro is not None:
            utils.die(self.logger, "The --distro option should only be used when creating a standalone or airgapped ISO")
        # if building standalone, we only want --distro and --profiles (optional),
        # systems are disallowed
        if standalone:
            if systems is not None:
                utils.die(self.logger, "When building a standalone ISO, use --distro and --profiles only, not --systems")
            elif distro is None:
                utils.die(self.logger, "When building a standalone ISO, you must specify a --distro")
            if source is not None and not os.path.exists(source):
                utils.die(self.logger, "The source specified (%s) does not exist" % source)

            # insure all profiles specified are children of the distro
            if profiles:
                which_profiles = self.filter_systems_or_profiles(profiles, 'profile')
                for profile in which_profiles:
                    if profile.distro != distro:
                        utils.die(self.logger, "When building a standalone ISO, all --profiles must be under --distro")

        # if iso is none, create it in . as "autoinst.iso"
        if iso is None:
            iso = "autoinst.iso"

        if buildisodir is None:
            buildisodir = self.settings.buildisodir
        else:
            if not os.path.isdir(buildisodir):
                utils.die(self.logger, "The --tempdir specified is not a directory")

            (buildisodir_head, buildisodir_tail) = os.path.split(os.path.normpath(buildisodir))
            if buildisodir_tail != "buildiso":
                buildisodir = os.path.join(buildisodir, "buildiso")

        self.logger.info("using/creating buildisodir: %s" % buildisodir)
        if not os.path.exists(buildisodir):
            os.makedirs(buildisodir)
        else:
            shutil.rmtree(buildisodir)
            os.makedirs(buildisodir)

        # if base of buildisodir does not exist, fail
        # create all profiles unless filtered by "profiles"

        imagesdir = os.path.join(buildisodir, "images")
        isolinuxdir = os.path.join(buildisodir, "isolinux")

        self.logger.info("building tree for isolinux")
        if not os.path.exists(imagesdir):
            os.makedirs(imagesdir)
        if not os.path.exists(isolinuxdir):
            os.makedirs(isolinuxdir)

        self.logger.info("copying miscellaneous files")

        files_to_copy = ["isolinux.bin", "menu.c32", "chain.c32",
                         "ldlinux.c32", "libcom32.c32", "libutil.c32"]

        optional_files = ["ldlinux.c32", "libcom32.c32", "libutil.c32"]

        syslinux_folders = ["/usr/share/syslinux/",
                            "/usr/lib/syslinux/modules/bios/",
                            "/usr/lib/syslinux/",
                            "/usr/lib/ISOLINUX/"]

        # file_copy_success will be used to check for missing files
        file_copy_success = {f: False for f in files_to_copy if f not in optional_files}
        for syslinux_folder in syslinux_folders:
            if os.path.isdir(os.path.join(syslinux_folder)):
                for file_to_copy in files_to_copy:
                    source_file = os.path.join(syslinux_folder, file_to_copy)
                    if os.path.exists(source_file):
                        utils.copyfile(source_file, os.path.join(isolinuxdir, file_to_copy), self.api)
                        file_copy_success[file_to_copy] = True

        if False in file_copy_success.values():
            for k, v in file_copy_success:
                if not v:
                    self.logger.error("File not found: %s" % k)
            utils.die(self.logger, "Required file(s) not found. Please check your syslinux installation")

        if standalone or airgapped:
            self.generate_standalone_iso(imagesdir, isolinuxdir, distro, source, airgapped, profiles)
        else:
            self.generate_netboot_iso(imagesdir, isolinuxdir, profiles, systems, exclude_dns)

        if mkisofs_opts is None:
            mkisofs_opts = ""
        else:
            mkisofs_opts = mkisofs_opts.strip()

        # removed --quiet
        cmd = "mkisofs -o %s %s -r -b isolinux/isolinux.bin -c isolinux/boot.cat" % (iso, mkisofs_opts)
        cmd = cmd + " -no-emul-boot -boot-load-size 4"
        cmd = cmd + r" -boot-info-table -V Cobbler\ Install -R -J -T %s" % buildisodir

        rc = utils.subprocess_call(self.logger, cmd, shell=True)
        if rc != 0:
            utils.die(self.logger, "mkisofs failed")

        self.logger.info("ISO build complete")
        self.logger.info("You may wish to delete: %s" % buildisodir)
        self.logger.info("The output file is: %s" % iso)
Exemplo n.º 22
0
    def generate_standalone_iso(self, imagesdir, isolinuxdir, distname, filesource, airgapped, profiles):
        """
        Create bootable CD image to be used for handsoff CD installtions
        """
        # Get the distro object for the requested distro
        # and then get all of its descendants (profiles/sub-profiles/systems)
        # with sort=True for profile/system heirarchy to allow menu indenting
        distro = self.api.find_distro(distname)
        if distro is None:
            utils.die(self.logger, "distro %s was not found, aborting" % distname)
        descendants = distro.get_descendants(sort=True)
        profiles = utils.input_string_or_list(profiles)

        if filesource is None:
            # Try to determine the source from the distro kernel path
            self.logger.debug("trying to locate source for distro")
            found_source = False
            (source_head, source_tail) = os.path.split(distro.kernel)
            while source_tail != '':
                if source_head == os.path.join(self.api.settings().webdir, "distro_mirror"):
                    filesource = os.path.join(source_head, source_tail)
                    found_source = True
                    self.logger.debug("found source in %s" % filesource)
                    break
                (source_head, source_tail) = os.path.split(source_head)
            # Can't find the source, raise an error
            if not found_source:
                utils.die(self.logger, "Error, no installation source found. When building a standalone ISO, you must specify a --source if the distro install tree is not hosted locally")

        self.logger.info("copying kernels and initrds for standalone distro")
        self.copy_boot_files(distro, isolinuxdir, None)

        self.logger.info("generating an isolinux.cfg")
        isolinuxcfg = os.path.join(isolinuxdir, "isolinux.cfg")
        cfg = open(isolinuxcfg, "w+")
        cfg.write(self.iso_template)

        if airgapped:
            repo_names_to_copy = {}

        for descendant in descendants:
            # if a list of profiles was given, skip any others and their systems
            if (profiles and ((descendant.COLLECTION_TYPE == 'profile' and descendant.name not in profiles) or (descendant.COLLECTION_TYPE == 'system' and descendant.profile not in profiles))):
                continue

            menu_indent = 0
            if descendant.COLLECTION_TYPE == 'system':
                menu_indent = 4

            data = utils.blender(self.api, False, descendant)

            # SUSE is not using 'text'. Instead 'textmode' is used as kernel option.
            utils.kopts_overwrite(None, distro, data['kernel_options'], self.settings)

            cfg.write("\n")
            cfg.write("LABEL %s\n" % descendant.name)
            if menu_indent:
                cfg.write("  MENU INDENT %d\n" % menu_indent)
            cfg.write("  MENU LABEL %s\n" % descendant.name)
            cfg.write("  kernel %s\n" % os.path.basename(distro.kernel))

            append_line = "  append initrd=%s" % os.path.basename(distro.initrd)
            if distro.breed == "redhat":
                append_line += " ks=cdrom:/isolinux/%s.cfg" % descendant.name
            if distro.breed == "suse":
                append_line += " autoyast=file:///isolinux/%s.cfg install=cdrom:///" % descendant.name
                if "install" in data["kernel_options"]:
                    del data["kernel_options"]["install"]
            if distro.breed in ["ubuntu", "debian"]:
                append_line += " auto-install/enable=true preseed/file=/cdrom/isolinux/%s.cfg" % descendant.name

            # add remaining kernel_options to append_line
            append_line += self.add_remaining_kopts(data["kernel_options"])
            cfg.write(append_line)

            if descendant.COLLECTION_TYPE == 'profile':
                autoinstall_data = self.api.autoinstallgen.generate_autoinstall_for_profile(descendant.name)
            elif descendant.COLLECTION_TYPE == 'system':
                autoinstall_data = self.api.autoinstallgen.generate_autoinstall_for_system(descendant.name)

            if distro.breed == "redhat":
                cdregex = re.compile(r"^\s*url .*\n", re.IGNORECASE | re.MULTILINE)
                autoinstall_data = cdregex.sub("cdrom\n", autoinstall_data, count=1)

            if airgapped:
                descendant_repos = data['repos']
                for repo_name in descendant_repos:
                    repo_obj = self.api.find_repo(repo_name)
                    error_fmt = (descendant.COLLECTION_TYPE + " " + descendant.name + " refers to repo " + repo_name + ", which %%s; cannot build airgapped ISO")

                    if repo_obj is None:
                        utils.die(self.logger, error_fmt % "does not exist")
                    if not repo_obj.mirror_locally:
                        utils.die(self.logger, error_fmt % "is not configured for local mirroring")
                    # FIXME: don't hardcode
                    mirrordir = os.path.join(self.settings.webdir, "repo_mirror", repo_obj.name)
                    if not os.path.exists(mirrordir):
                        utils.die(self.logger, error_fmt % "has a missing local mirror directory")

                    repo_names_to_copy[repo_obj.name] = mirrordir

                    # update the baseurl in autoinstall_data to use the cdrom copy of this repo
                    reporegex = re.compile(r"^(\s*repo --name=" + repo_obj.name + " --baseurl=).*", re.MULTILINE)
                    autoinstall_data = reporegex.sub(r"\1" + "file:///mnt/source/repo_mirror/" + repo_obj.name, autoinstall_data)

                # rewrite any split-tree repos, such as in redhat, to use cdrom
                srcreporegex = re.compile(r"^(\s*repo --name=\S+ --baseurl=).*/cobbler/distro_mirror/" + distro.name + r"/?(.*)", re.MULTILINE)
                autoinstall_data = srcreporegex.sub(r"\1" + "file:///mnt/source" + r"\2", autoinstall_data)

            autoinstall_name = os.path.join(isolinuxdir, "%s.cfg" % descendant.name)
            autoinstall_file = open(autoinstall_name, "w+")
            autoinstall_file.write(autoinstall_data)
            autoinstall_file.close()

        self.logger.info("done writing config")
        cfg.write("\n")
        cfg.write("MENU END\n")
        cfg.close()

        if airgapped:
            # copy any repos found in profiles or systems to the iso build
            repodir = os.path.abspath(os.path.join(isolinuxdir, "..", "repo_mirror"))
            if not os.path.exists(repodir):
                os.makedirs(repodir)

            for repo_name in repo_names_to_copy:
                src = repo_names_to_copy[repo_name]
                dst = os.path.join(repodir, repo_name)
                self.logger.info(" - copying repo '" + repo_name + "' for airgapped ISO")

                ok = utils.rsync_files(src, dst, "--exclude=TRANS.TBL --exclude=cache/ --no-g",
                                       logger=self.logger, quiet=True)
                if not ok:
                    utils.die(self.logger, "rsync of repo '" + repo_name + "' failed")

        # copy distro files last, since they take the most time
        cmd = "rsync -rlptgu --exclude=boot.cat --exclude=TRANS.TBL --exclude=isolinux/ %s/ %s/../" % (filesource, isolinuxdir)
        self.logger.info("- copying distro %s files (%s)" % (distname, cmd))
        rc = utils.subprocess_call(self.logger, cmd, shell=True)
        if rc:
            utils.die(self.logger, "rsync of distro files failed")
Exemplo n.º 23
0
    def run(self,
            cobbler_master=None,
            port: str = "80",
            distro_patterns=None,
            profile_patterns=None,
            system_patterns=None,
            repo_patterns=None,
            image_patterns=None,
            mgmtclass_patterns=None,
            package_patterns=None,
            file_patterns=None,
            prune: bool = False,
            omit_data=False,
            sync_all: bool = False,
            use_ssl: bool = False):
        """
        Get remote profiles and distros and sync them locally

        :param cobbler_master: The remote url of the master server.
        :param port: The remote port of the master server.
        :param distro_patterns: The pattern of distros to sync.
        :param profile_patterns: The pattern of profiles to sync.
        :param system_patterns: The pattern of systems to sync.
        :param repo_patterns: The pattern of repositories to sync.
        :param image_patterns: The pattern of images to sync.
        :param mgmtclass_patterns: The pattern of management classes to sync.
        :param package_patterns: The pattern of packages to sync.
        :param file_patterns: The pattern of files to sync.
        :param prune: If the local server should be pruned before coping stuff.
        :param omit_data: If the data behind images etc should be omitted or not.
        :param sync_all: If everything should be synced (then the patterns are useless) or not.
        :param use_ssl: If HTTPS or HTTP should be used.
        """

        self.port = str(port)
        self.distro_patterns = distro_patterns.split()
        self.profile_patterns = profile_patterns.split()
        self.system_patterns = system_patterns.split()
        self.repo_patterns = repo_patterns.split()
        self.image_patterns = image_patterns.split()
        self.mgmtclass_patterns = mgmtclass_patterns.split()
        self.package_patterns = package_patterns.split()
        self.file_patterns = file_patterns.split()
        self.omit_data = omit_data
        self.prune = prune
        self.sync_all = sync_all
        self.use_ssl = use_ssl

        if self.use_ssl:
            protocol = 'https'
        else:
            protocol = 'http'

        if cobbler_master is not None:
            self.master = cobbler_master
        elif len(self.settings.cobbler_master) > 0:
            self.master = self.settings.cobbler_master
        else:
            utils.die(self.logger,
                      'No Cobbler master specified, try --master.')

        self.uri = '%s://%s:%s/cobbler_api' % (protocol, self.master,
                                               self.port)

        self.logger.info("cobbler_master      = %s" % cobbler_master)
        self.logger.info("port                = %s" % self.port)
        self.logger.info("distro_patterns     = %s" % self.distro_patterns)
        self.logger.info("profile_patterns    = %s" % self.profile_patterns)
        self.logger.info("system_patterns     = %s" % self.system_patterns)
        self.logger.info("repo_patterns       = %s" % self.repo_patterns)
        self.logger.info("image_patterns      = %s" % self.image_patterns)
        self.logger.info("mgmtclass_patterns  = %s" % self.mgmtclass_patterns)
        self.logger.info("package_patterns    = %s" % self.package_patterns)
        self.logger.info("file_patterns       = %s" % self.file_patterns)
        self.logger.info("omit_data           = %s" % self.omit_data)
        self.logger.info("sync_all            = %s" % self.sync_all)
        self.logger.info("use_ssl             = %s" % self.use_ssl)

        self.logger.info("XMLRPC endpoint: %s" % self.uri)
        self.logger.debug("test ALPHA")
        self.remote = xmlrpc.client.Server(self.uri)
        self.logger.debug("test BETA")
        self.remote.ping()
        self.local = xmlrpc.client.Server("http://127.0.0.1:%s/cobbler_api" %
                                          self.settings.http_port)
        self.local.ping()

        self.replicate_data()
        self.link_distros()
        self.logger.info("Syncing")
        self.api.sync(logger=self.logger)
        self.logger.info("Done")
Exemplo n.º 24
0
    def gen_config_data(self) -> dict:
        """
        Generate configuration data for repos, files and packages.

        :return: A dict which has all config data in it.
        """
        config_data = {
            'repo_data': self.handle.get_repo_config_for_system(self.system),
            'repos_enabled': self.get_cobbler_resource('repos_enabled'),
        }
        package_set = set()
        file_set = set()

        for mgmtclass in self.mgmtclasses:
            _mgmtclass = self.handle.find_mgmtclass(name=mgmtclass)
            for package in _mgmtclass.packages:
                package_set.add(package)
            for file in _mgmtclass.files:
                file_set.add(file)

        # Generate Package data
        pkg_data = {}
        for package in package_set:
            _package = self.handle.find_package(name=package)
            if _package is None:
                raise CX('%s package resource is not defined' % package)
            else:
                pkg_data[package] = {}
                pkg_data[package]['action'] = self.resolve_resource_var(
                    _package.action)
                pkg_data[package]['installer'] = _package.installer
                pkg_data[package]['version'] = self.resolve_resource_var(
                    _package.version)
                if pkg_data[package]['version'] != "":
                    pkg_data[package]["install_name"] = "%s-%s" % (
                        package, pkg_data[package]['version'])
                else:
                    pkg_data[package]["install_name"] = package
        config_data['packages'] = pkg_data

        # Generate File data
        file_data = {}
        for file in file_set:
            _file = self.handle.find_file(name=file)

            if _file is None:
                raise CX('%s file resource is not defined' % file)

            file_data[file] = {}
            file_data[file]['is_dir'] = _file.is_dir
            file_data[file]['action'] = self.resolve_resource_var(_file.action)
            file_data[file]['group'] = self.resolve_resource_var(_file.group)
            file_data[file]['mode'] = self.resolve_resource_var(_file.mode)
            file_data[file]['owner'] = self.resolve_resource_var(_file.owner)
            file_data[file]['path'] = self.resolve_resource_var(_file.path)

            if not _file.is_dir:
                file_data[file]['template'] = self.resolve_resource_var(
                    _file.template)
                try:
                    t = template_api.CobblerTemplate(
                        file=file_data[file]['template'],
                        searchList=[self.host_vars])
                    file_data[file]['content'] = t.respond()
                except:
                    utils.die(
                        self.logger,
                        "Missing template for this file resource %s" %
                        (file_data[file]))

        config_data['files'] = file_data
        return config_data
Exemplo n.º 25
0
    def _power(self,
               system,
               power_operation,
               user=None,
               password=None,
               logger=None):
        """
        Performs a power operation on a system.
        Internal method

        @param System system Cobbler system
        @param str power_operation power operation. Valid values: on, off, status.
                Rebooting is implemented as a set of 2 operations (off and on) in
                a higher level method.
        @param str user power management user. If user and password are not
                supplied, environment variables COBBLER_POWER_USER and
                COBBLER_POWER_PASS will be used.
        @param str password power management password
        @param Logger logger logger
        @return bool/None if power operation is 'status', return if system is on;
                otherwise, return None
        @raise CX if there are errors
        """

        if logger is None:
            logger = self.logger

        power_command = get_power_command(system.power_type)
        if not power_command:
            utils.die(logger, "no power type set for system")

        meta = utils.blender(self.api, False, system)
        meta["power_mode"] = power_operation

        logger.info("cobbler power configuration is:")
        logger.info("      type   : %s" % system.power_type)
        logger.info("      address: %s" % system.power_address)
        logger.info("      user   : %s" % system.power_user)
        logger.info("      id     : %s" % system.power_id)
        logger.info("      options: %s" % system.power_options)
        logger.info("identity_file: %s" % system.power_identity_file)

        # if no username/password data, check the environment
        if not system.power_user and not user:
            user = os.environ.get("COBBLER_POWER_USER", "")
        if not system.power_pass and not password:
            password = os.environ.get("COBBLER_POWER_PASS", "")

        power_input = self._get_power_input(system, power_operation, logger,
                                            user, password)

        logger.info("power command: %s" % power_command)
        logger.info("power command input: %s" % power_input)

        for x in range(0, POWER_RETRIES):
            output, rc = utils.subprocess_sp(logger,
                                             power_command,
                                             shell=False,
                                             input=power_input)
            # fencing agent returns 2 if the system is powered off
            if rc == 0 or (rc == 2 and power_operation == 'status'):
                # If the desired state is actually a query for the status
                # return different information than command return code
                if power_operation == 'status':
                    match = re.match(r'^(Status:|.+power\s=)\s(on|off)$',
                                     output, re.IGNORECASE | re.MULTILINE)
                    if match:
                        power_status = match.groups()[1]
                        if power_status.lower() == 'on':
                            return True
                        else:
                            return False
                    error_msg = "command succeeded (rc=%s), but output ('%s') was not understood" % (
                        rc, output)
                    utils.die(logger, error_msg)
                    raise CX(error_msg)
                return None
            else:
                time.sleep(2)

        if not rc == 0:
            error_msg = "command failed (rc=%s), please validate the physical setup and cobbler config" % rc
            utils.die(logger, error_msg)
            raise CX(error_msg)