Exemple #1
0
    def add_objects_not_on_local(self, obj_type):
        """
        Add objects locally which are not present on the slave but on the master.

        :param obj_type:
        """
        locals = utils.lod_to_dod(self.local_data[obj_type], "uid")
        remotes = utils.lod_sort_by_key(self.remote_data[obj_type], "depth")

        for rdata in remotes:

            # do not add the system if it is not on the transfer list
            if not rdata["name"] in self.must_include[obj_type]:
                continue

            if not rdata["uid"] in locals:
                creator = getattr(self.api, "new_%s" % obj_type)
                newobj = creator()
                newobj.from_dict(utils.revert_strip_none(rdata))
                try:
                    self.logger.info("adding %s %s" %
                                     (obj_type, rdata["name"]))
                    if not self.api.add_item(
                            obj_type, newobj, logger=self.logger):
                        self.logger.error("failed to add %s %s" %
                                          (obj_type, rdata["name"]))
                except Exception:
                    utils.log_exc(self.logger)
Exemple #2
0
    def signature_update(self, logger):
        try:
            url = self.settings().signature_url
            dlmgr = download_manager.DownloadManager(self._collection_mgr,
                                                     self.logger)
            # write temp json file
            tmpfile = tempfile.NamedTemporaryFile()
            sigjson = dlmgr.urlread(url)
            tmpfile.write(sigjson.text.encode())
            tmpfile.flush()
            logger.debug("Successfully got file from %s" %
                         self.settings().signature_url)
            # test the import without caching it
            try:
                utils.load_signatures(tmpfile.name, cache=False)
            except:
                logger.error(
                    "Downloaded signatures failed test load (tempfile = %s)" %
                    tmpfile.name)

            # rewrite the real signature file and import it for real
            f = open(self.settings().signature_path, "w")
            f.write(sigjson.text)
            f.close()

            utils.load_signatures(self.settings().signature_path)
        except:
            utils.log_exc(logger)
Exemple #3
0
    def signature_update(self, logger):
        try:
            tmpfile = tempfile.NamedTemporaryFile()
            proxies = {}
            proxies['http'] = self.settings().proxy_url_ext
            response = urlgrabber.grabber.urlopen(self.settings().signature_url, proxies=proxies)
            sigjson = response.read()
            tmpfile.write(sigjson)
            tmpfile.flush()

            logger.debug("Successfully got file from %s" % self.settings().signature_url)
            # test the import without caching it
            try:
                utils.load_signatures(tmpfile.name, cache=False)
            except:
                logger.error("Downloaded signatures failed test load (tempfile = %s)" % tmpfile.name)

            # rewrite the real signature file and import it for real
            f = open(self.settings().signature_path, "w")
            f.write(sigjson)
            f.close()

            utils.load_signatures(self.settings().signature_path)
        except:
            utils.log_exc(logger)
Exemple #4
0
    def createrepo_walker(self, repo, dirname, fnames):
        """
        Used to run createrepo on a copied Yum mirror.

        :param repo: The repository object to run for.
        :param dirname: The directory to run in.
        :param fnames: Not known what this is for.
        """
        if os.path.exists(dirname) or repo['breed'] == 'rsync':
            utils.remove_yum_olddata(dirname)

            # add any repo metadata we can use
            mdoptions = []
            if os.path.isfile("%s/.origin/repodata/repomd.xml" % (dirname)):
                if HAS_LIBREPO:
                    rd = self.librepo_getinfo("%s/.origin" % (dirname))
                elif HAS_YUM:
                    rmd = yum.repoMDObject.RepoMD(
                        '', "%s/.origin/repodata/repomd.xml" % (dirname))
                    rd = rmd.repoData
                else:
                    utils.die(self.logger,
                              "yum/librepo is required to use this feature")

                if "group" in rd:
                    if HAS_LIBREPO:
                        groupmdfile = rd['group']['location_href']
                    else:
                        groupmdfile = rmd.getData("group").location[1]
                    mdoptions.append("-g %s" % groupmdfile)
                if "prestodelta" in rd:
                    # need createrepo >= 0.9.7 to add deltas
                    if utils.get_family() in ("redhat", "suse"):
                        cmd = "/usr/bin/rpmquery --queryformat=%{VERSION} createrepo"
                        createrepo_ver = utils.subprocess_get(self.logger, cmd)
                        if not createrepo_ver[0:1].isdigit():
                            cmd = "/usr/bin/rpmquery --queryformat=%{VERSION} createrepo_c"
                            createrepo_ver = utils.subprocess_get(
                                self.logger, cmd)
                        if utils.compare_versions_gt(createrepo_ver, "0.9.7"):
                            mdoptions.append("--deltas")
                        else:
                            self.logger.error(
                                "this repo has presto metadata; you must upgrade createrepo to >= 0.9.7 first and then need to resync the repo through Cobbler."
                            )

            blended = utils.blender(self.api, False, repo)
            flags = blended.get("createrepo_flags", "(ERROR: FLAGS)")
            try:
                cmd = "createrepo %s %s %s" % (" ".join(mdoptions), flags,
                                               pipes.quote(dirname))
                utils.subprocess_call(self.logger, cmd)
            except:
                utils.log_exc(self.logger)
                self.logger.error("createrepo failed.")
            del fnames[:]  # we're in the right place
Exemple #5
0
    def createrepo_walker(self, repo, dirname: str, fnames):
        """
        Used to run createrepo on a copied Yum mirror.

        :param repo: The repository object to run for.
        :param dirname: The directory to run in.
        :param fnames: Not known what this is for.
        """
        if os.path.exists(dirname) or repo.breed == RepoBreeds.RSYNC:
            utils.remove_yum_olddata(dirname)

            # add any repo metadata we can use
            mdoptions = []
            origin_path = os.path.join(dirname, ".origin")
            repodata_path = os.path.join(origin_path, "repodata")

            if os.path.isfile(os.path.join(repodata_path, "repomd.xml")):
                rd = self.librepo_getinfo(origin_path)

                if "group" in rd:
                    groupmdfile = rd['group']['location_href']
                    mdoptions.append("-g %s" %
                                     os.path.join(origin_path, groupmdfile))
                if "prestodelta" in rd:
                    # need createrepo >= 0.9.7 to add deltas
                    if utils.get_family() in ("redhat", "suse"):
                        cmd = "/usr/bin/rpmquery --queryformat=%{VERSION} createrepo"
                        createrepo_ver = utils.subprocess_get(cmd)
                        if not createrepo_ver[0:1].isdigit():
                            cmd = "/usr/bin/rpmquery --queryformat=%{VERSION} createrepo_c"
                            createrepo_ver = utils.subprocess_get(cmd)
                        if utils.compare_versions_gt(createrepo_ver, "0.9.7"):
                            mdoptions.append("--deltas")
                        else:
                            self.logger.error(
                                "this repo has presto metadata; you must upgrade createrepo to >= 0.9.7 "
                                "first and then need to resync the repo through Cobbler."
                            )

            blended = utils.blender(self.api, False, repo)
            flags = blended.get("createrepo_flags", "(ERROR: FLAGS)")
            try:
                cmd = "createrepo %s %s %s" % (" ".join(mdoptions), flags,
                                               pipes.quote(dirname))
                utils.subprocess_call(cmd)
            except:
                utils.log_exc()
                self.logger.error("createrepo failed.")
            del fnames[:]  # we're in the right place
Exemple #6
0
    def remove_objects_not_on_master(self, obj_type):
        locals = utils.lod_to_dod(self.local_data[obj_type], "uid")
        remotes = utils.lod_to_dod(self.remote_data[obj_type], "uid")

        for (luid, ldata) in list(locals.items()):
            if luid not in remotes:
                try:
                    self.logger.info("removing %s %s" %
                                     (obj_type, ldata["name"]))
                    self.api.remove_item(obj_type,
                                         ldata["name"],
                                         recursive=True,
                                         logger=self.logger)
                except Exception:
                    utils.log_exc(self.logger)
Exemple #7
0
    def remove_objects_not_on_master(self, obj_type):
        """
        Remove objects on this slave which are not on the master.

        :param obj_type: The type of object which should be synchronized.
        """
        locals = utils.lod_to_dod(self.local_data[obj_type], "uid")
        remotes = utils.lod_to_dod(self.remote_data[obj_type], "uid")

        for (luid, ldata) in list(locals.items()):
            if luid not in remotes:
                try:
                    self.logger.info("removing %s %s" % (obj_type, ldata["name"]))
                    self.api.remove_item(obj_type, ldata["name"], recursive=True)
                except Exception:
                    utils.log_exc()
Exemple #8
0
    def make_grub(self):
        """
        Create symlink of the GRUB 2 bootloader in case it is available on the system. Additionally build the loaders
        for other architectures if the modules to do so are available.
        """
        symlink(
            pathlib.Path("/usr/share/efi/x86_64/grub.efi"),
            self.bootloaders_dir.joinpath(pathlib.Path("grub/grub.efi")),
            skip_existing=True
        )

        if not utils.command_existing("grub2-mkimage"):
            self.logger.info("grub2-mkimage command not available. Bailing out of GRUB2 generation!")
            return

        for image_format, options in self.boot_loaders_formats.items():
            bl_mod_dir = options.get("mod_dir", image_format)
            mod_dir = self.grub2_mod_dir.joinpath(bl_mod_dir)
            if not mod_dir.exists():
                self.logger.info(
                    'GRUB2 modules directory for arch "%s" did no exist. Skipping GRUB2 creation',
                    image_format
                )
                continue
            try:
                mkimage(
                    image_format,
                    self.bootloaders_dir.joinpath("grub", options["binary_name"]),
                    self.modules + options.get("extra_modules", []),
                )
            except subprocess.CalledProcessError:
                self.logger.info('grub2-mkimage failed for arch "%s"! Maybe you did forget to install the grub modules '
                                 'for the architecture?', image_format)
                utils.log_exc()
                # don't create module symlinks if grub2-mkimage is unsuccessful
                continue
            self.logger.info('Successfully built bootloader for arch "%s"!', image_format)

            # Create a symlink for GRUB 2 modules
            # assumes a single GRUB can be used to boot all kinds of distros
            # if this assumption turns out incorrect, individual "grub" subdirectories are needed
            symlink(
                mod_dir,
                self.bootloaders_dir.joinpath("grub", bl_mod_dir),
                skip_existing=True
            )
Exemple #9
0
def __check_auth_token(xmlrpc_client, api_handle, username, password):
    """
    This checks if the auth token is valid.
    :param xmlrpc_client: The xmlrpc client to check access for.
    :param api_handle: The api instance to retrieve settings of.
    :param username: The username to try.
    :param password: The password to try.
    :return: In any error case this will return 0. Otherwise the return value of the API which should be 1.
    """
    # If the token is not a token this will raise an exception rather than return an integer.
    try:
        return xmlrpc_client.auth.checkAuthToken(username, password)
    except Error:
        logger = api_handle.logger
        logger.error("Error while checking authentication token.")
        log_exc(logger)
        return False
Exemple #10
0
def __import_module(module_path, modname, logger):
    try:
        blip = __import__("cobbler.modules.%s" % modname, globals(), locals(), [modname])
        if not hasattr(blip, "register"):
            if not modname.startswith("__init__"):
                errmsg = _("%(module_path)s/%(modname)s is not a proper module")
                print(errmsg % {'module_path': module_path, 'modname': modname})
            return None
        category = blip.register()
        if category:
            MODULE_CACHE[modname] = blip
        if category not in MODULES_BY_CATEGORY:
            MODULES_BY_CATEGORY[category] = {}
        MODULES_BY_CATEGORY[category][modname] = blip
    except Exception:
        logger.info('Exception raised when loading module %s' % modname)
        log_exc(logger)
Exemple #11
0
def __check_user_login(xmlrpc_client, api_handle, user_enabled: bool, username, password) -> bool:
    """
    This actually performs the login to spacewalk.

    :param xmlrpc_client: The xmlrpc client bound to the target spacewalk instance.
    :param api_handle: The api instance to retrieve settings of.
    :param user_enabled: Weather we allow Spacewalk users to log in or not.
    :param username: The username to log in.
    :param password: The password to log in.
    :return: True if users are allowed to log in and he is of the role ``config_admin`` or ``org_admin``.
    """
    try:
        session = xmlrpc_client.auth.login(username, password)
        # login success by username, role must also match and user_enabled needs to be true.
        roles = xmlrpc_client.user.listRoles(session, username)
        if user_enabled and ("config_admin" in roles or "org_admin" in roles):
            return True
    except Error:
        logger = api_handle.logger
        logger.error("Error while checking user authentication data.")
        log_exc(logger)
    return False
Exemple #12
0
def __import_module(module_path: str, modname: str):
    """
    Import a module which is not part of the core functionality of Cobbler.

    :param module_path: The path to the module.
    :param modname: The name of the module.
    """
    try:
        blip = import_module("cobbler.modules.%s" % modname)
        if not hasattr(blip, "register"):
            if not modname.startswith("__init__"):
                errmsg = "%(module_path)s/%(modname)s is not a proper module"
                print(errmsg % {'module_path': module_path, 'modname': modname})
            return None
        category = blip.register()
        if category:
            MODULE_CACHE[modname] = blip
        if category not in MODULES_BY_CATEGORY:
            MODULES_BY_CATEGORY[category] = {}
        MODULES_BY_CATEGORY[category][modname] = blip
    except Exception:
        logger.info('Exception raised when loading module %s' % modname)
        log_exc()
Exemple #13
0
    def signature_update(self, logger):
        try:
            tmpfile = tempfile.NamedTemporaryFile()
            response = urllib2.urlopen(self.settings().signature_url)
            sigjson = response.read()
            tmpfile.write(sigjson)
            tmpfile.flush()

            logger.debug("Successfully got file from %s" % self.settings().signature_url)
            # test the import without caching it
            if not utils.load_signatures(tmpfile.name, cache=False):
                logger.error("Downloaded signatures failed test load (tempfile = %s)" % tmpfile.name)
                return False

            # rewrite the real signature file and import it for real
            f = open(self.settings().signature_path, "w")
            f.write(sigjson)
            f.close()

            return utils.load_signatures(self.settings().signature_path)
        except:
            utils.log_exc(logger)
            return False
Exemple #14
0
    def replace_objects_newer_on_remote(self, obj_type):
        """
        Replace objects which are newer on the local slave then on the remote slave

        :param obj_type: The type of object to synchronize.
        """
        locals = utils.lod_to_dod(self.local_data[obj_type], "uid")
        remotes = utils.lod_to_dod(self.remote_data[obj_type], "uid")

        for (ruid, rdata) in list(remotes.items()):
            # do not add the system if it is not on the transfer list
            if not rdata["name"] in self.must_include[obj_type]:
                continue

            if ruid in locals:
                ldata = locals[ruid]
                if ldata["mtime"] < rdata["mtime"]:

                    if ldata["name"] != rdata["name"]:
                        self.logger.info("removing %s %s" %
                                         (obj_type, ldata["name"]))
                        self.api.remove_item(obj_type,
                                             ldata["name"],
                                             recursive=True,
                                             logger=self.logger)
                    creator = getattr(self.api, "new_%s" % obj_type)
                    newobj = creator()
                    newobj.from_dict(rdata)
                    try:
                        self.logger.info("updating %s %s" %
                                         (obj_type, rdata["name"]))
                        if not self.api.add_item(obj_type, newobj):
                            self.logger.error("failed to update %s %s" %
                                              (obj_type, rdata["name"]))
                    except Exception:
                        utils.log_exc(self.logger)
Exemple #15
0
    def signature_update(self, logger):
        try:
            url = self.settings().signature_url
            dlmgr = download_manager.DownloadManager(self._collection_mgr, self.logger)
            # write temp json file
            tmpfile = tempfile.NamedTemporaryFile()
            sigjson = dlmgr.urlread(url)
            tmpfile.write(sigjson)
            tmpfile.flush()
            logger.debug("Successfully got file from %s" % self.settings().signature_url)
            # test the import without caching it
            try:
                utils.load_signatures(tmpfile.name, cache=False)
            except:
                logger.error("Downloaded signatures failed test load (tempfile = %s)" % tmpfile.name)

            # rewrite the real signature file and import it for real
            f = open(self.settings().signature_path, "w")
            f.write(sigjson)
            f.close()

            utils.load_signatures(self.settings().signature_path)
        except:
            utils.log_exc(logger)
Exemple #16
0
    def run(self, name=None, verbose=True):
        """
        Syncs the current repo configuration file with the filesystem.
        """

        self.logger.info("run, reposync, run!")

        try:
            self.tries = int(self.tries)
        except:
            utils.die(self.logger, "retry value must be an integer")

        self.verbose = verbose

        report_failure = False
        for repo in self.repos:
            if name is not None and repo.name != name:
                # invoked to sync only a specific repo, this is not the one
                continue
            elif name is None and not repo.keep_updated:
                # invoked to run against all repos, but this one is off
                self.logger.info("%s is set to not be updated" % repo.name)
                continue

            repo_mirror = os.path.join(self.settings.webdir, "repo_mirror")
            repo_path = os.path.join(repo_mirror, repo.name)

            if not os.path.isdir(repo_path) and not repo.mirror.lower(
            ).startswith("rhn://"):
                os.makedirs(repo_path)

            # set the environment keys specified for this repo
            # save the old ones if they modify an existing variable

            env = repo.environment
            old_env = {}

            for k in list(env.keys()):
                self.logger.debug("setting repo environment: %s=%s" %
                                  (k, env[k]))
                if env[k] is not None:
                    if os.getenv(k):
                        old_env[k] = os.getenv(k)
                    else:
                        os.environ[k] = env[k]

            # which may actually NOT reposync if the repo is set to not mirror locally
            # but that's a technicality

            for x in range(self.tries + 1, 1, -1):
                success = False
                try:
                    self.sync(repo)
                    success = True
                    break
                except:
                    utils.log_exc(self.logger)
                    self.logger.warning("reposync failed, tries left: %s" %
                                        (x - 2))

            # cleanup/restore any environment variables that were
            # added or changed above

            for k in list(env.keys()):
                if env[k] is not None:
                    if k in old_env:
                        self.logger.debug("resetting repo environment: %s=%s" %
                                          (k, old_env[k]))
                        os.environ[k] = old_env[k]
                    else:
                        self.logger.debug("removing repo environment: %s=%s" %
                                          (k, env[k]))
                        del os.environ[k]

            if not success:
                report_failure = True
                if not self.nofail:
                    utils.die(
                        self.logger,
                        "reposync failed, retry limit reached, aborting")
                else:
                    self.logger.error(
                        "reposync failed, retry limit reached, skipping")

            self.update_permissions(repo_path)

        if report_failure:
            utils.die(
                self.logger,
                "overall reposync failed, at least one repo failed to synchronize"
            )
    def yum_process_comps_file(self, comps_path: str,
                               distribution: distro.Distro):
        """
        When importing Fedora/EL certain parts of the install tree can also be used as yum repos containing packages
        that might not yet be available via updates in yum. This code identifies those areas. Existing repodata will be
        used as-is, but repodate is created for earlier, non-yum based, installers.

        :param comps_path: Not know what this is exactly for.
        :param distribution: The distributions to check.
        """

        if os.path.exists(os.path.join(comps_path, "repodata")):
            keeprepodata = True
            masterdir = "repodata"
        else:
            # older distros...
            masterdir = "base"
            keeprepodata = False

        # figure out what our comps file is ...
        self.logger.info("looking for %(p1)s/%(p2)s/*comps*.xml" % {
            "p1": comps_path,
            "p2": masterdir
        })
        files = glob.glob("%s/%s/*comps*.xml" % (comps_path, masterdir))
        if len(files) == 0:
            self.logger.info("no comps found here: %s" %
                             os.path.join(comps_path, masterdir))
            return  # no comps xml file found

        # pull the filename from the longer part
        comps_file = files[0].split("/")[-1]

        try:
            # Store the yum configs on the filesystem so we can use them later. And configure them in the automated
            # installation file post section, etc.

            counter = len(distribution.source_repos)

            # find path segment for yum_url (changing filesystem path to http:// trailing fragment)
            seg = comps_path.rfind("distro_mirror")
            urlseg = comps_path[(seg + len("distro_mirror") + 1):]

            fname = os.path.join(self.settings.webdir, "distro_mirror",
                                 "config",
                                 "%s-%s.repo" % (distribution.name, counter))

            repo_url = "http://@@http_server@@/cobbler/distro_mirror/config/%s-%s.repo" % (
                distribution.name, counter)
            repo_url2 = "http://@@http_server@@/cobbler/distro_mirror/%s" % urlseg

            distribution.source_repos.append([repo_url, repo_url2])

            config_dir = os.path.dirname(fname)
            if not os.path.exists(config_dir):
                os.makedirs(config_dir)

            # NOTE: the following file is now a Cheetah template, so it can be remapped during sync, that's why we have
            # the @@http_server@@ left as templating magic.
            # repo_url2 is actually no longer used. (?)

            with open(fname, "w+") as config_file:
                config_file.write("[core-%s]\n" % counter)
                config_file.write("name=core-%s\n" % counter)
                config_file.write(
                    "baseurl=http://@@http_server@@/cobbler/distro_mirror/%s\n"
                    % urlseg)
                config_file.write("enabled=1\n")
                config_file.write("gpgcheck=0\n")
                config_file.write("priority=$yum_distro_priority\n")

            # Don't run creatrepo twice -- this can happen easily for Xen and PXE, when they'll share same repo files.
            if keeprepodata:
                self.logger.info("Keeping repodata as-is :%s/repodata" %
                                 comps_path)
                self.found_repos[comps_path] = 1

            elif comps_path not in self.found_repos:
                utils.remove_yum_olddata(comps_path)
                cmd = "createrepo %s --groupfile %s %s" % (
                    self.settings.createrepo_flags,
                    os.path.join(comps_path, masterdir,
                                 comps_file), comps_path)
                utils.subprocess_call(cmd, shell=True)
                self.found_repos[comps_path] = 1
                # For older distros, if we have a "base" dir parallel with "repodata", we need to copy comps.xml up
                # one...
                p1 = os.path.join(comps_path, "repodata", "comps.xml")
                p2 = os.path.join(comps_path, "base", "comps.xml")
                if os.path.exists(p1) and os.path.exists(p2):
                    shutil.copyfile(p1, p2)
        except:
            self.logger.error(
                "error launching createrepo (not installed?), ignoring")
            utils.log_exc()
    def yum_process_comps_file(self, comps_path, distro):
        """
        When importing Fedora/EL certain parts of the install tree can also be used
        as yum repos containing packages that might not yet be available via updates
        in yum.  This code identifies those areas. Existing repodata will be used as-is,
        but repodate is created for earlier, non-yum based, installers.
        """

        if os.path.exists(os.path.join(comps_path, "repodata")):
            keeprepodata = True
            masterdir = "repodata"
        else:
            # older distros...
            masterdir = "base"
            keeprepodata = False

        # figure out what our comps file is ...
        self.logger.info("looking for %(p1)s/%(p2)s/*comps*.xml" % {"p1": comps_path, "p2": masterdir})
        files = glob.glob("%s/%s/*comps*.xml" % (comps_path, masterdir))
        if len(files) == 0:
            self.logger.info("no comps found here: %s" % os.path.join(comps_path, masterdir))
            return      # no comps xml file found

        # pull the filename from the longer part
        comps_file = files[0].split("/")[-1]

        try:
            # store the yum configs on the filesystem so we can use them later.
            # and configure them in the automated installation file post section,
            # etc

            counter = len(distro.source_repos)

            # find path segment for yum_url (changing filesystem path to http:// trailing fragment)
            seg = comps_path.rfind("distro_mirror")
            urlseg = comps_path[(seg + len("distro_mirror") + 1):]

            fname = os.path.join(self.settings.webdir, "distro_mirror", "config", "%s-%s.repo" % (distro.name, counter))

            repo_url = "http://@@http_server@@/cobbler/distro_mirror/config/%s-%s.repo" % (distro.name, counter)
            repo_url2 = "http://@@http_server@@/cobbler/distro_mirror/%s" % (urlseg)

            distro.source_repos.append([repo_url, repo_url2])

            config_dir = os.path.dirname(fname)
            if not os.path.exists(config_dir):
                os.makedirs(config_dir)

            # NOTE: the following file is now a Cheetah template, so it can be remapped
            # during sync, that's why we have the @@http_server@@ left as templating magic.
            # repo_url2 is actually no longer used. (?)

            config_file = open(fname, "w+")
            config_file.write("[core-%s]\n" % counter)
            config_file.write("name=core-%s\n" % counter)
            config_file.write("baseurl=http://@@http_server@@/cobbler/distro_mirror/%s\n" % (urlseg))
            config_file.write("enabled=1\n")
            config_file.write("gpgcheck=0\n")
            config_file.write("priority=$yum_distro_priority\n")
            config_file.close()

            # don't run creatrepo twice -- this can happen easily for Xen and PXE, when
            # they'll share same repo files.
            if keeprepodata:
                self.logger.info("Keeping repodata as-is :%s/repodata" % comps_path)
                self.found_repos[comps_path] = 1

            elif comps_path not in self.found_repos:
                utils.remove_yum_olddata(comps_path)
                cmd = "createrepo %s --groupfile %s %s" % (self.settings.createrepo_flags, os.path.join(comps_path, masterdir, comps_file), comps_path)
                utils.subprocess_call(self.logger, cmd, shell=True)
                self.found_repos[comps_path] = 1
                # for older distros, if we have a "base" dir parallel with "repodata", we need to copy comps.xml up one...
                p1 = os.path.join(comps_path, "repodata", "comps.xml")
                p2 = os.path.join(comps_path, "base", "comps.xml")
                if os.path.exists(p1) and os.path.exists(p2):
                    shutil.copyfile(p1, p2)

        except:
            self.logger.error("error launching createrepo (not installed?), ignoring")
            utils.log_exc(self.logger)