Exemplo n.º 1
0
    def replicate_data(self):

        # distros
        self.logger.info("Copying Distros")
        local_distros = self.api.distros()
        try:
            remote_distros = self.remote.get_distros()
        except:
            utils.die(self.logger, "Failed to contact remote server")

        if self.sync_all or self.sync_trees:
            self.logger.info("Rsyncing Distribution Trees")
            self.rsync_it(os.path.join(self.settings.webdir, "ks_mirror"), self.settings.webdir)

        for distro in remote_distros:
            self.logger.info("Importing remote distro %s." % distro["name"])
            if os.path.exists(distro["kernel"]):
                remote_mtime = distro["mtime"]
                if self.should_add_or_replace(distro, "distros"):
                    new_distro = self.api.new_distro()
                    new_distro.from_datastruct(distro)
                    try:
                        self.api.add_distro(new_distro)
                        self.logger.info("Copied distro %s." % distro["name"])
                    except Exception, e:
                        utils.log_exc(self.logger)
                        self.logger.error("Failed to copy distro %s" % distro["name"])
                else:
                    # FIXME: force logic
                    self.logger.info("Not copying distro %s, sufficiently new mtime" % distro["name"])
            else:
                self.logger.error("Failed to copy distro %s, content not here yet." % distro["name"])
Exemplo n.º 2
0
    def replace_objects_newer_on_remote(self, obj_type):
        locals = utils.lod_to_dod(self.local_data[obj_type], "uid")
        remotes = utils.lod_to_dod(self.remote_data[obj_type], "uid")

        for (ruid, rdata) in remotes.iteritems():
            # do not add the system if it is not on the transfer list
            if not rdata["name"] in self.must_include[obj_type]:
                continue

            if ruid in locals:
                ldata = locals[ruid]
                if ldata["mtime"] < rdata["mtime"]:

                    if ldata["name"] != rdata["name"]:
                        self.logger.info("removing %s %s" % (obj_type, ldata["name"]))
                        self.api.remove_item(obj_type, ldata["name"], recursive=True, logger=self.logger)
                    creator = getattr(self.api, "new_%s" % obj_type)
                    newobj = creator()
                    newobj.from_dict(rdata)
                    try:
                        self.logger.info("updating %s %s" % (obj_type, rdata["name"]))
                        if not self.api.add_item(obj_type, newobj):
                            self.logger.error("failed to update %s %s" % (obj_type, rdata["name"]))
                    except Exception:
                        utils.log_exc(self.logger)
Exemplo n.º 3
0
    def replace_objects_newer_on_remote(self, obj_type):
         locals = utils.loh_to_hoh(self.local_data[obj_type],"uid")
         remotes = utils.loh_to_hoh(self.remote_data[obj_type],"uid")

         for (ruid, rdata) in remotes.iteritems():

             # do not add the system if it is not on the transfer list
             if not self.must_include[obj_type].has_key(rdata["name"]):
                 continue

             if locals.has_key(ruid):
                 ldata = locals[ruid]
                 if ldata["mtime"] < rdata["mtime"]:

                     if ldata["name"] != rdata["name"]:
                         self.logger.info("removing %s %s" % (obj_type, ldata["name"]))
                         self.api.remove_item(obj_type, ldata["name"], recursive=True, logger=self.logger)
                     creator = getattr(self.api, "new_%s" % obj_type)
                     newobj = creator()
                     newobj.from_datastruct(rdata)
                     try:
                         self.logger.info("updating %s %s" % (obj_type, rdata["name"]))
                         self.api.add_item(obj_type, newobj)
                     except Exception, e:
                         utils.log_exc(self.logger)
Exemplo n.º 4
0
    def replace_objects_newer_on_remote(self, obj_type):
         locals = utils.loh_to_hoh(self.local_data[obj_type],"uid")
         remotes = utils.loh_to_hoh(self.remote_data[obj_type],"uid")

         for (ruid, rdata) in remotes.iteritems():

             # do not add the system if it is not on the transfer list
             if not self.must_include[obj_type].has_key(rdata["name"]):
                 continue

             if locals.has_key(ruid):
                 ldata = locals[ruid]
                 if ldata["mtime"] < rdata["mtime"]:

                     if ldata["name"] != rdata["name"]:
                         self.logger.info("removing %s %s" % (obj_type, ldata["name"]))
                         self.api.remove_item(obj_type, ldata["name"], recursive=True, logger=self.logger)
                     creator = getattr(self.api, "new_%s" % obj_type)
                     newobj = creator()
                     if obj_type == 'distro':
                         rdata = self.fix_distro(rdata)
                     newobj.from_datastruct(rdata)
                     try:
                         self.logger.info("updating %s %s" % (obj_type, rdata["name"]))
                         self.api.add_item(obj_type, newobj)
                     except Exception, e:
                         utils.log_exc(self.logger)
Exemplo n.º 5
0
    def generate_kickstart_for_profile(self,g):

        g = self.api.find_profile(name=g)
        if g is None:
           return "# profile not found"

        distro = g.get_conceptual_parent()
        meta = utils.blender(self.api, False, g)
        if distro is None:
           raise CX(_("profile %(profile)s references missing distro %(distro)s") % { "profile" : g.name, "distro" : g.distro })
        kickstart_path = utils.find_kickstart(meta["kickstart"])
        if kickstart_path is not None and os.path.exists(kickstart_path):
            # the input is an *actual* file, hence we have to copy it
            try:
                meta = utils.blender(self.api, False, g)
                ksmeta = meta["ks_meta"]
                del meta["ks_meta"]
                meta.update(ksmeta) # make available at top level
                meta["yum_repo_stanza"] = self.generate_repo_stanza(g,True)
                meta["yum_config_stanza"] = self.generate_config_stanza(g,True)
                meta["kickstart_done"]  = self.generate_kickstart_signal(0, g, None)
                meta["kickstart_start"] = self.generate_kickstart_signal(1, g, None)
                meta["kernel_options"] = utils.hash_to_string(meta["kernel_options"])
                kfile = open(kickstart_path)
                data = self.templar.render(kfile, meta, None, g)
                kfile.close()
                return data
            except:
                utils.log_exc(self.api.logger)
                raise

        elif kickstart_path is not None and not os.path.exists(kickstart_path):
            if kickstart_path.find("http://") == -1 and kickstart_path.find("ftp://") == -1 and kickstart_path.find("nfs:") == -1:
                return "# Error, cannot find %s" % kickstart_path
        return "# kickstart is sourced externally, or is missing, and cannot be displayed here: %s" % meta["kickstart"]
Exemplo n.º 6
0
    def remove_objects_not_on_master(self, obj_type):
        locals = utils.loh_to_hoh(self.local_data[obj_type],"uid")
        remotes = utils.loh_to_hoh(self.remote_data[obj_type],"uid")

        for (luid, ldata) in locals.iteritems():
            if not remotes.has_key(luid):
                try:
                    self.logger.info("removing %s %s" % (obj_type, ldata["name"]))
                    self.api.remove_item(obj_type, ldata["name"], recursive=True, logger=self.logger)
                except Exception, e:
                    utils.log_exc(self.logger)
Exemplo n.º 7
0
    def remove_objects_not_on_master(self, obj_type):
        locals = utils.lod_to_dod(self.local_data[obj_type], "uid")
        remotes = utils.lod_to_dod(self.remote_data[obj_type], "uid")

        for (luid, ldata) in locals.iteritems():
            if luid not in remotes:
                try:
                    self.logger.info("removing %s %s" % (obj_type, ldata["name"]))
                    self.api.remove_item(obj_type, ldata["name"], recursive=True, logger=self.logger)
                except Exception:
                    utils.log_exc(self.logger)
Exemplo n.º 8
0
def load_modules(module_path=mod_path, blacklist=None):
    logger = clogger.Logger()

    #识别可能的module文件
    filenames = glob.glob("%s/*.py" % module_path)
    filenames += glob.glob("%s/*.pyc" % module_path)
    filenames += glob.glob("%s/*.pyo" % module_path)

    mods = set()

    #跳过__init__.py,重复名称
    for fn in filenames:
        basename = os.path.basename(fn)
        if basename == "__init__.py":
            continue
        if basename[-3:] == ".py":
            modname = basename[:-3]
        elif basename[-4:] in [".pyc", ".pyo"]:
            modname = basename[:-4]

        # No need to try importing the same module over and over if
        # we have a .py, .pyc, and .pyo
        if modname in mods:
            continue
        mods.add(modname)

        try:
            #载入模块
            blip = __import__("modules.%s" % (modname), globals(), locals(),
                              [modname])
            if not hasattr(blip, "register"):
                if not modname.startswith("__init__"):
                    #非预期的文件,报错
                    errmsg = _(
                        "%(module_path)s/%(modname)s is not a proper module")
                    print errmsg % {
                        'module_path': module_path,
                        'modname': modname
                    }
                continue
            #注册module
            category = blip.register()
            if category:
                MODULE_CACHE[modname] = blip
            if category not in MODULES_BY_CATEGORY:
                MODULES_BY_CATEGORY[category] = {}
            MODULES_BY_CATEGORY[category][modname] = blip
        except Exception:
            logger.info('Exception raised when loading module %s' % modname)
            log_exc(logger)

    #返回载入的所有module,按分类划分module
    return (MODULE_CACHE, MODULES_BY_CATEGORY)
Exemplo n.º 9
0
    def process(self, message):
        # find principal
        from_hdr = parseaddr(message['From'])[1].lower()
        try:
            principal = UserModel().objects.get(email=from_hdr)
        except UserModel().DoesNotExist:
            # member not found
            raise MailInException('Member not found: %s' % from_hdr)

        # deliver message
        try:
            self.processRecipient(principal, message)
        except Exception, e:
            log_exc()
            raise MailInException(e)
Exemplo n.º 10
0
    def process(self, message):
        # find principal
        from_hdr = parseaddr(message["From"])[1].lower()
        try:
            principal = UserModel().objects.get(email=from_hdr)
        except UserModel().DoesNotExist:
            # member not found
            raise MailInException("Member not found: %s" % from_hdr)

        # deliver message
        try:
            self.processRecipient(principal, message)
        except Exception, e:
            log_exc()
            raise MailInException(e)
Exemplo n.º 11
0
class MailInTransport(object):
    def __call__(self, request, *args, **kw):
        mail = request.POST.get('mail') or request.GET.get('mail')
        if not mail:
            return HttpResponse('failed', status=500)

        # convert mail
        try:
            msg = message_from_string(mail.encode('utf-8'))
        except:
            log_exc('Error parsing email')
            return HttpResponse('failed on parsing', status=500)

        # check message for loops, wrong mta hosts, etc
        try:
            config_instance.checkMessage(msg, mail, request)
        except MailInException, msg:
            log(str(msg))
            return HttpResponse('failed on checking', status=500)

        # process message
        try:
            config_instance.process(msg)
        except MailInException, msg:
            log_exc('Error processing email')
            return HttpResponse('failed on processing', status=500)
Exemplo n.º 12
0
def load_modules(module_path=mod_path, blacklist=None):
    logger = clogger.Logger()

    filenames = glob.glob("%s/*.py" % module_path)
    filenames += glob.glob("%s/*.pyc" % module_path)
    filenames += glob.glob("%s/*.pyo" % module_path)

    mods = set()

    for fn in filenames:
        basename = os.path.basename(fn)
        if basename == "__init__.py":
            continue
        if basename[-3:] == ".py":
            modname = basename[:-3]
        elif basename[-4:] in [".pyc", ".pyo"]:
            modname = basename[:-4]

        # No need to try importing the same module over and over if
        # we have a .py, .pyc, and .pyo
        if modname in mods:
            continue
        mods.add(modname)

        try:
            blip = __import__("modules.%s" % (modname), globals(), locals(),
                              [modname])
            if not hasattr(blip, "register"):
                if not modname.startswith("__init__"):
                    errmsg = _(
                        "%(module_path)s/%(modname)s is not a proper module")
                    print errmsg % {
                        'module_path': module_path,
                        'modname': modname
                    }
                continue
            category = blip.register()
            if category:
                MODULE_CACHE[modname] = blip
            if category not in MODULES_BY_CATEGORY:
                MODULES_BY_CATEGORY[category] = {}
            MODULES_BY_CATEGORY[category][modname] = blip
        except Exception:
            logger.info('Exception raised when loading module %s' % modname)
            log_exc(logger)

    return (MODULE_CACHE, MODULES_BY_CATEGORY)
Exemplo n.º 13
0
    def __call__(self, request, *args, **kw):
        mail = request.POST.get('mail') or request.GET.get('mail')
        if not mail:
            return HttpResponse('failed', status=500)

        # convert mail
        try:
            msg = message_from_string(mail.encode('utf-8'))
        except:
            log_exc('Error parsing email')
            return HttpResponse('failed on parsing', status=500)

        # check message for loops, wrong mta hosts, etc
        try:
            config_instance.checkMessage(msg, mail, request)
        except MailInException, msg:
            log(str(msg))
            return HttpResponse('failed on checking', status=500)
Exemplo n.º 14
0
    def createrepo_walker(self, repo, dirname, fnames):
        """
        Used to run createrepo on a copied Yum mirror.
        """
        if os.path.exists(dirname) or repo['breed'] == 'rsync':
            utils.remove_yum_olddata(dirname)

            # add any repo metadata we can use
            mdoptions = []
            if os.path.isfile("%s/.origin/repomd.xml" % (dirname)):
                if not HAS_YUM:
                    utils.die(self.logger,
                              "yum is required to use this feature")

                rmd = yum.repoMDObject.RepoMD(
                    '', "%s/.origin/repomd.xml" % (dirname))
                if rmd.repoData.has_key("group"):
                    groupmdfile = rmd.getData("group").location[1]
                    mdoptions.append("-g %s" % groupmdfile)
                if rmd.repoData.has_key("prestodelta"):
                    # need createrepo >= 0.9.7 to add deltas
                    if utils.check_dist() in ("redhat", "fedora", "centos",
                                              "scientific linux", "suse",
                                              "opensuse"):
                        cmd = "/usr/bin/rpmquery --queryformat=%{VERSION} createrepo"
                        createrepo_ver = utils.subprocess_get(self.logger, cmd)
                        if createrepo_ver >= "0.9.7":
                            mdoptions.append("--deltas")
                        else:
                            self.logger.error(
                                "this repo has presto metadata; you must upgrade createrepo to >= 0.9.7 first and then need to resync the repo through cobbler."
                            )

            blended = utils.blender(self.api, False, repo)
            flags = blended.get("createrepo_flags", "(ERROR: FLAGS)")
            try:
                # BOOKMARK
                cmd = "createrepo %s %s %s" % (" ".join(mdoptions), flags,
                                               dirname)
                utils.subprocess_call(self.logger, cmd)
            except:
                utils.log_exc(self.logger)
                self.logger.error("createrepo failed.")
            del fnames[:]  # we're in the right place
Exemplo n.º 15
0
    def render(self, request, compress=True):
        request = self._get_request(request)

        if self.resource:
            return self.resource(request).GET()

        path = self.path

        resource = traverse(path, request)
        if resource is None:
            return u''

        gresource = IResource(resource, None)
        if gresource is not None:
            try:
                return gresource.render(request)
            except Exception, err:
                log_exc(str(err))
                raise
Exemplo n.º 16
0
    def add_objects_not_on_local(self, obj_type):
        locals = utils.lod_to_dod(self.local_data[obj_type], "uid")
        remotes = utils.lod_sort_by_key(self.remote_data[obj_type], "depth")

        for rdata in remotes:

            # do not add the system if it is not on the transfer list
            if not rdata["name"] in self.must_include[obj_type]:
                continue

            if not rdata["uid"] in locals:
                creator = getattr(self.api, "new_%s" % obj_type)
                newobj = creator()
                newobj.from_dict(rdata)
                try:
                    self.logger.info("adding %s %s" % (obj_type, rdata["name"]))
                    if not self.api.add_item(obj_type, newobj, logger=self.logger):
                        self.logger.error("failed to add %s %s" % (obj_type, rdata["name"]))
                except Exception:
                    utils.log_exc(self.logger)
Exemplo n.º 17
0
    def add_objects_not_on_local(self, obj_type):
         locals   = utils.loh_to_hoh(self.local_data[obj_type], "uid")
         remotes  = utils.loh_sort_by_key(self.remote_data[obj_type],"depth")
         remotes2 = utils.loh_to_hoh(self.remote_data[obj_type],"depth")

         for rdata in remotes:

             # do not add the system if it is not on the transfer list
             if not self.must_include[obj_type].has_key(rdata["name"]):
                 continue

             if not locals.has_key(rdata["uid"]):
                 creator = getattr(self.api, "new_%s" % obj_type)
                 newobj = creator()
                 newobj.from_datastruct(rdata)
                 try:
                     self.logger.info("adding %s %s" % (obj_type, rdata["name"]))
                     self.api.add_item(obj_type, newobj)
                 except Exception, e:
                     utils.log_exc(self.logger)
Exemplo n.º 18
0
    def add_objects_not_on_local(self, obj_type):
        locals = utils.loh_to_hoh(self.local_data[obj_type], "uid")
        remotes = utils.loh_sort_by_key(self.remote_data[obj_type], "depth")
        remotes2 = utils.loh_to_hoh(self.remote_data[obj_type], "depth")

        for rdata in remotes:

            # do not add the system if it is not on the transfer list
            if not self.must_include[obj_type].has_key(rdata["name"]):
                continue

            if not locals.has_key(rdata["uid"]):
                creator = getattr(self.api, "new_%s" % obj_type)
                newobj = creator()
                newobj.from_datastruct(rdata)
                try:
                    self.logger.info("adding %s %s" %
                                     (obj_type, rdata["name"]))
                    self.api.add_item(obj_type, newobj)
                except Exception, e:
                    utils.log_exc(self.logger)
Exemplo n.º 19
0
    def createrepo_walker(self, repo, dirname, fnames):
        """
        Used to run createrepo on a copied Yum mirror.
        """
        if os.path.exists(dirname) or repo["breed"] == "rsync":
            utils.remove_yum_olddata(dirname)

            # add any repo metadata we can use
            mdoptions = []
            if os.path.isfile("%s/repodata/repomd.xml" % (dirname)):
                if not HAS_YUM:
                    utils.die(self.logger, "yum is required to use this feature")

                rmd = yum.repoMDObject.RepoMD("", "%s/repodata/repomd.xml" % (dirname))
                if rmd.repoData.has_key("group"):
                    groupmdfile = rmd.getData("group").location[1]
                    mdoptions.append("-g %s" % groupmdfile)
                if rmd.repoData.has_key("prestodelta"):
                    # need createrepo >= 0.9.7 to add deltas
                    if utils.check_dist() == "redhat" or utils.check_dist() == "suse":
                        cmd = "/usr/bin/rpmquery --queryformat=%{VERSION} createrepo"
                        createrepo_ver = utils.subprocess_get(self.logger, cmd)
                        if createrepo_ver >= "0.9.7":
                            mdoptions.append("--deltas")
                        else:
                            utils.die(
                                self.logger,
                                "this repo has presto metadata; you must upgrade createrepo to >= 0.9.7 first and then need to resync the repo through cobbler.",
                            )

            blended = utils.blender(self.api, False, repo)
            flags = blended.get("createrepo_flags", "(ERROR: FLAGS)")
            try:
                # BOOKMARK
                cmd = "createrepo %s %s %s" % (" ".join(mdoptions), flags, dirname)
                utils.subprocess_call(self.logger, cmd)
            except:
                utils.log_exc(self.logger)
                self.logger.error("createrepo failed.")
            del fnames[:]  # we're in the right place
Exemplo n.º 20
0
    def remove_objects_not_on_master(self, obj_type):
        locals = utils.lod_to_dod(self.local_data[obj_type], "uid")
        remotes = utils.lod_to_dod(self.remote_data[obj_type], "uid")

        obj_pattern = getattr(self, "%s_patterns" % obj_type)
        if obj_pattern and self.prune:
            self.logger.info("Found pattern for %s. Pruning non-matching items" % obj_type)
            keep_obj = {}
            remote_names = utils.loh_to_hoh(self.remote_data[obj_type], "name")
            for name in remote_names.keys():
                if name in self.must_include[obj_type] and remote_names[name]["uid"] in remotes:
                    self.logger.info("Adding %s:%s to keep list" % (name, remote_names[name]["uid"]))
                    keep_obj[remote_names[name]["uid"]] = remotes[remote_names[name]["uid"]]
            remotes = keep_obj

        for (luid, ldata) in locals.iteritems():
            if luid not in remotes:
                try:
                    self.logger.info("removing %s %s" % (obj_type, ldata["name"]))
                    self.api.remove_item(obj_type, ldata["name"], recursive=True, logger=self.logger)
                except Exception:
                    utils.log_exc(self.logger)
Exemplo n.º 21
0
    def signature_update(self, logger):
        try:
            tmpfile = tempfile.NamedTemporaryFile()
            response = urllib2.urlopen(self.settings().signature_url)
            sigjson = response.read()
            tmpfile.write(sigjson)
            tmpfile.flush()

            logger.debug("Successfully got file from %s" % self.settings().signature_url)
            # test the import without caching it
            if not utils.load_signatures(tmpfile.name,cache=False):
                logger.error("Downloaded signatures failed test load (tempfile = %s)" % tmpfile.name)
                return False

            # rewrite the real signature file and import it for real
            f = open(self.settings().signature_path,"w")
            f.write(sigjson)
            f.close()

            return utils.load_signatures(self.settings().signature_path)
        except:
            utils.log_exc(logger)
            return False
Exemplo n.º 22
0
    def signature_update(self, logger):
        try:
            tmpfile = tempfile.NamedTemporaryFile()
            response = urllib2.urlopen(self.settings().signature_url)
            sigjson = response.read()
            tmpfile.write(sigjson)
            tmpfile.flush()

            logger.debug("Successfully got file from %s" % self.settings().signature_url)
            # test the import without caching it
            if not utils.load_signatures(tmpfile.name,cache=False):
                logger.error("Downloaded signatures failed test load (tempfile = %s)" % tmpfile.name)
                return False

            # rewrite the real signature file and import it for real
            f = open(self.settings().signature_path,"w")
            f.write(sigjson)
            f.close()

            return utils.load_signatures(self.settings().signature_path)
        except:
            utils.log_exc(logger)
            return False
Exemplo n.º 23
0
    def process(self, message):
        recipient = IRecipient(self.context, None)
        if recipient is None:
            raise MailInException('Recipent not found.')

        # find principal
        from_hdr = parseaddr(message['From'])[1].lower()
        try:
            principal = getPrincipalByEMail(from_hdr)
        except PrincipalLookupError:
            if IAnonymousSupport.providedBy(recipient):
                principal = getUtility(IUnauthenticatedPrincipal)
            else:
                # member not found
                raise MailInException('Member not found: %s'%from_hdr)

        # set security context
        interaction = queryInteraction()
        if interaction is not None:
            request = copy.copy(interaction.participations[0])
        else:
            request = TestRequest()

        request.setPrincipal(principal)
        request.interaction = None

        endInteraction()
        newInteraction(request)

        # deliver message
        try:
            recipient.process(message)
        except:
            log_exc()

        # restore old security context
        restoreInteraction()
Exemplo n.º 24
0
    def yum_process_comps_file(self, comps_path, distro):
        """
        When importing Fedora/EL certain parts of the install tree can also be used
        as yum repos containing packages that might not yet be available via updates
        in yum.  This code identifies those areas. Existing repodata will be used as-is,
        but repodate is created for earlier, non-yum based, installers.
        """

        if os.path.exists(os.path.join(comps_path, "repodata")):
            keeprepodata = True
            masterdir = "repodata"
        else:
            # older distros...
            masterdir = "base"
            keeprepodata = False

        # figure out what our comps file is ...
        self.logger.info("looking for %(p1)s/%(p2)s/*comps*.xml" % {
            "p1": comps_path,
            "p2": masterdir
        })
        files = glob.glob("%s/%s/*comps*.xml" % (comps_path, masterdir))
        if len(files) == 0:
            self.logger.info("no comps found here: %s" %
                             os.path.join(comps_path, masterdir))
            return  # no comps xml file found

        # pull the filename from the longer part
        comps_file = files[0].split("/")[-1]

        try:
            # store the yum configs on the filesystem so we can use them later.
            # and configure them in the kickstart post, etc

            counter = len(distro.source_repos)

            # find path segment for yum_url (changing filesystem path to http:// trailing fragment)
            seg = comps_path.rfind("ks_mirror")
            urlseg = comps_path[seg + 10:]

            fname = os.path.join(self.settings.webdir, "ks_mirror", "config",
                                 "%s-%s.repo" % (distro.name, counter))

            repo_url = "http://@@http_server@@/cobbler/ks_mirror/config/%s-%s.repo" % (
                distro.name, counter)
            repo_url2 = "http://@@http_server@@/cobbler/ks_mirror/%s" % (
                urlseg)

            distro.source_repos.append([repo_url, repo_url2])

            # NOTE: the following file is now a Cheetah template, so it can be remapped
            # during sync, that's why we have the @@http_server@@ left as templating magic.
            # repo_url2 is actually no longer used. (?)

            config_file = open(fname, "w+")
            config_file.write("[core-%s]\n" % counter)
            config_file.write("name=core-%s\n" % counter)
            config_file.write(
                "baseurl=http://@@http_server@@/cobbler/ks_mirror/%s\n" %
                (urlseg))
            config_file.write("enabled=1\n")
            config_file.write("gpgcheck=0\n")
            config_file.write("priority=$yum_distro_priority\n")
            config_file.close()

            # don't run creatrepo twice -- this can happen easily for Xen and PXE, when
            # they'll share same repo files.
            if keeprepodata:
                self.logger.info("Keeping repodata as-is :%s/repodata" %
                                 comps_path)
                self.found_repos[comps_path] = 1

            elif comps_path not in self.found_repos:
                utils.remove_yum_olddata(comps_path)
                cmd = "createrepo %s --groupfile %s %s" % (
                    self.settings.createrepo_flags,
                    os.path.join(comps_path, masterdir,
                                 comps_file), comps_path)
                utils.subprocess_call(self.logger, cmd, shell=True)
                self.found_repos[comps_path] = 1
                # for older distros, if we have a "base" dir parallel with "repodata", we need to copy comps.xml up one...
                p1 = os.path.join(comps_path, "repodata", "comps.xml")
                p2 = os.path.join(comps_path, "base", "comps.xml")
                if os.path.exists(p1) and os.path.exists(p2):
                    shutil.copyfile(p1, p2)

        except:
            self.logger.error(
                "error launching createrepo (not installed?), ignoring")
            utils.log_exc(self.logger)
Exemplo n.º 25
0
    def run(self, name=None, verbose=True):
        """
        Syncs the current repo configuration file with the filesystem.
        """
            
        self.logger.info("run, reposync, run!")
        
        try:
            self.tries = int(self.tries)
        except:
            utils.die(self.logger,"retry value must be an integer")

        self.verbose = verbose

        report_failure = False
        for repo in self.repos:

            env = repo.environment

            for k in env.keys():
                self.logger.info("environment: %s=%s" % (k,env[k]))
                if env[k] is not None:
                    os.putenv(k,env[k])

            if name is not None and repo.name != name:
                # invoked to sync only a specific repo, this is not the one
                continue
            elif name is None and not repo.keep_updated:
                # invoked to run against all repos, but this one is off
                self.logger.info("%s is set to not be updated" % repo.name)
                continue

            repo_mirror = os.path.join(self.settings.webdir, "repo_mirror")
            repo_path = os.path.join(repo_mirror, repo.name)
            mirror = repo.mirror

            if not os.path.isdir(repo_path) and not repo.mirror.lower().startswith("rhn://"):
                os.makedirs(repo_path)
            
            # which may actually NOT reposync if the repo is set to not mirror locally
            # but that's a technicality

            for x in range(self.tries+1,1,-1):
                success = False
                try:
                    self.sync(repo) 
                    success = True
                    break
                except:
                    utils.log_exc(self.logger)
                    self.logger.warning("reposync failed, tries left: %s" % (x-2))

            if not success:
                report_failure = True
                if not self.nofail:
                    utils.die(self.logger,"reposync failed, retry limit reached, aborting")
                else:
                    self.logger.error("reposync failed, retry limit reached, skipping")

            self.update_permissions(repo_path)

        if report_failure:
            utils.die(self.logger,"overall reposync failed, at least one repo failed to synchronize")

        return True
Exemplo n.º 26
0
    def yum_process_comps_file(self, comps_path, distro):
        """
        When importing Fedora/EL certain parts of the install tree can also be used
        as yum repos containing packages that might not yet be available via updates
        in yum.  This code identifies those areas. Existing repodata will be used as-is,
        but repodate is created for earlier, non-yum based, installers.
        """

        if os.path.exists(os.path.join(comps_path, "repodata")):
            keeprepodata = True
            masterdir = "repodata"
        else:
            # older distros...
            masterdir = "base"
            keeprepodata = False

        # figure out what our comps file is ...
        self.logger.info("looking for %(p1)s/%(p2)s/*comps*.xml" % {"p1": comps_path, "p2": masterdir})
        files = glob.glob("%s/%s/*comps*.xml" % (comps_path, masterdir))
        if len(files) == 0:
            self.logger.info("no comps found here: %s" % os.path.join(comps_path, masterdir))
            return      # no comps xml file found

        # pull the filename from the longer part
        comps_file = files[0].split("/")[-1]

        try:
            # store the yum configs on the filesystem so we can use them later.
            # and configure them in the automated installation file post section,
            # etc

            counter = len(distro.source_repos)

            # find path segment for yum_url (changing filesystem path to http:// trailing fragment)
            seg = comps_path.rfind("distro_mirror")
            urlseg = comps_path[(seg + len("distro_mirror") + 1):]

            fname = os.path.join(self.settings.webdir, "distro_mirror", "config", "%s-%s.repo" % (distro.name, counter))

            repo_url = "http://@@http_server@@/cobbler/distro_mirror/config/%s-%s.repo" % (distro.name, counter)
            repo_url2 = "http://@@http_server@@/cobbler/distro_mirror/%s" % (urlseg)

            distro.source_repos.append([repo_url, repo_url2])

            # NOTE: the following file is now a Cheetah template, so it can be remapped
            # during sync, that's why we have the @@http_server@@ left as templating magic.
            # repo_url2 is actually no longer used. (?)

            config_file = open(fname, "w+")
            config_file.write("[core-%s]\n" % counter)
            config_file.write("name=core-%s\n" % counter)
            config_file.write("baseurl=http://@@http_server@@/cobbler/distro_mirror/%s\n" % (urlseg))
            config_file.write("enabled=1\n")
            config_file.write("gpgcheck=0\n")
            config_file.write("priority=$yum_distro_priority\n")
            config_file.close()

            # don't run creatrepo twice -- this can happen easily for Xen and PXE, when
            # they'll share same repo files.
            if keeprepodata:
                self.logger.info("Keeping repodata as-is :%s/repodata" % comps_path)
                self.found_repos[comps_path] = 1

            elif comps_path not in self.found_repos:
                utils.remove_yum_olddata(comps_path)
                cmd = "createrepo %s --groupfile %s %s" % (self.settings.createrepo_flags, os.path.join(comps_path, masterdir, comps_file), comps_path)
                utils.subprocess_call(self.logger, cmd, shell=True)
                self.found_repos[comps_path] = 1
                # for older distros, if we have a "base" dir parallel with "repodata", we need to copy comps.xml up one...
                p1 = os.path.join(comps_path, "repodata", "comps.xml")
                p2 = os.path.join(comps_path, "base", "comps.xml")
                if os.path.exists(p1) and os.path.exists(p2):
                    shutil.copyfile(p1, p2)

        except:
            self.logger.error("error launching createrepo (not installed?), ignoring")
            utils.log_exc(self.logger)
Exemplo n.º 27
0
    def run(self, name=None, verbose=True):
        """
        Syncs the current repo configuration file with the filesystem.
        """

        self.logger.info("run, reposync, run!")

        try:
            self.tries = int(self.tries)
        except:
            utils.die(self.logger, "retry value must be an integer")

        self.verbose = verbose

        report_failure = False
        for repo in self.repos:
            if name is not None and repo.name != name:
                # invoked to sync only a specific repo, this is not the one
                continue
            elif name is None and not repo.keep_updated:
                # invoked to run against all repos, but this one is off
                self.logger.info("%s is set to not be updated" % repo.name)
                continue

            repo_mirror = os.path.join(self.settings.webdir, "repo_mirror")
            repo_path = os.path.join(repo_mirror, repo.name)

            if not os.path.isdir(repo_path) and not repo.mirror.lower().startswith("rhn://"):
                os.makedirs(repo_path)

            # set the environment keys specified for this repo
            # save the old ones if they modify an existing variable

            env = repo.environment
            old_env = {}

            for k in env.keys():
                self.logger.debug("setting repo environment: %s=%s" % (k, env[k]))
                if env[k] is not None:
                    if os.getenv(k):
                        old_env[k] = os.getenv(k)
                    else:
                        os.environ[k] = env[k]

            # which may actually NOT reposync if the repo is set to not mirror locally
            # but that's a technicality

            for x in range(self.tries + 1, 1, -1):
                success = False
                try:
                    self.sync(repo)
                    success = True
                    break
                except:
                    utils.log_exc(self.logger)
                    self.logger.warning("reposync failed, tries left: %s" % (x - 2))

            # cleanup/restore any environment variables that were
            # added or changed above

            for k in env.keys():
                if env[k] is not None:
                    if k in old_env:
                        self.logger.debug("resetting repo environment: %s=%s" % (k, old_env[k]))
                        os.environ[k] = old_env[k]
                    else:
                        self.logger.debug("removing repo environment: %s=%s" % (k, env[k]))
                        del os.environ[k]

            if not success:
                report_failure = True
                if not self.nofail:
                    utils.die(self.logger, "reposync failed, retry limit reached, aborting")
                else:
                    self.logger.error("reposync failed, retry limit reached, skipping")

            self.update_permissions(repo_path)

        if report_failure:
            utils.die(self.logger, "overall reposync failed, at least one repo failed to synchronize")
Exemplo n.º 28
0
    def run(self, name=None, verbose=True):
        """
        Syncs the current repo configuration file with the filesystem.
        """

        self.logger.info("run, reposync, run!")

        try:
            self.tries = int(self.tries)
        except:
            utils.die(self.logger, "retry value must be an integer")

        self.verbose = verbose

        report_failure = False
        for repo in self.repos:

            env = repo.environment

            for k in env.keys():
                self.logger.info("environment: %s=%s" % (k, env[k]))
                if env[k] is not None:
                    os.putenv(k, env[k])

            if name is not None and repo.name != name:
                # invoked to sync only a specific repo, this is not the one
                continue
            elif name is None and not repo.keep_updated:
                # invoked to run against all repos, but this one is off
                self.logger.info("%s is set to not be updated" % repo.name)
                continue

            repo_mirror = os.path.join(self.settings.webdir, "repo_mirror")
            repo_path = os.path.join(repo_mirror, repo.name)
            mirror = repo.mirror

            if not os.path.isdir(repo_path) and not repo.mirror.lower(
            ).startswith("rhn://"):
                os.makedirs(repo_path)

            # which may actually NOT reposync if the repo is set to not mirror locally
            # but that's a technicality

            for x in range(self.tries + 1, 1, -1):
                success = False
                try:
                    self.sync(repo)
                    success = True
                except:
                    utils.log_exc(self.logger)
                    self.logger.warning("reposync failed, tries left: %s" %
                                        (x - 2))

            if not success:
                report_failure = True
                if not self.nofail:
                    utils.die(
                        self.logger,
                        "reposync failed, retry limit reached, aborting")
                else:
                    self.logger.error(
                        "reposync failed, retry limit reached, skipping")

            self.update_permissions(repo_path)

        if report_failure:
            utils.die(
                self.logger,
                "overall reposync failed, at least one repo failed to synchronize"
            )

        return True
Exemplo n.º 29
0
    def run(self, name=None, verbose=True):
        """
        Syncs the current repo configuration file with the filesystem.
        """

        self.logger.info("run, reposync, run!")

        try:
            self.tries = int(self.tries)
        except:
            utils.die(self.logger, "retry value must be an integer")

        self.verbose = verbose

        report_failure = False
        for repo in self.repos:
            if name is not None and repo.name != name:
                # invoked to sync only a specific repo, this is not the one
                continue
            elif name is None and not repo.keep_updated:
                # invoked to run against all repos, but this one is off
                self.logger.info("%s is set to not be updated" % repo.name)
                continue

            repo_mirror = os.path.join(self.settings.webdir, "repo_mirror")
            repo_path = os.path.join(repo_mirror, repo.name)

            if not os.path.isdir(repo_path) and not repo.mirror.lower(
            ).startswith("rhn://"):
                os.makedirs(repo_path)

            # set the environment keys specified for this repo
            # save the old ones if they modify an existing variable

            env = repo.environment
            old_env = {}

            for k in env.keys():
                self.logger.debug("setting repo environment: %s=%s" %
                                  (k, env[k]))
                if env[k] is not None:
                    if os.getenv(k):
                        old_env[k] = os.getenv(k)
                    else:
                        os.environ[k] = env[k]

            # which may actually NOT reposync if the repo is set to not mirror locally
            # but that's a technicality

            for x in range(self.tries + 1, 1, -1):
                success = False
                try:
                    self.sync(repo)
                    success = True
                    break
                except:
                    utils.log_exc(self.logger)
                    self.logger.warning("reposync failed, tries left: %s" %
                                        (x - 2))

            # cleanup/restore any environment variables that were
            # added or changed above

            for k in env.keys():
                if env[k] is not None:
                    if k in old_env:
                        self.logger.debug("resetting repo environment: %s=%s" %
                                          (k, old_env[k]))
                        os.environ[k] = old_env[k]
                    else:
                        self.logger.debug("removing repo environment: %s=%s" %
                                          (k, env[k]))
                        del os.environ[k]

            if not success:
                report_failure = True
                if not self.nofail:
                    utils.die(
                        self.logger,
                        "reposync failed, retry limit reached, aborting")
                else:
                    self.logger.error(
                        "reposync failed, retry limit reached, skipping")

            self.update_permissions(repo_path)

        if report_failure:
            utils.die(
                self.logger,
                "overall reposync failed, at least one repo failed to synchronize"
            )
Exemplo n.º 30
0
        # workaround for profile inheritance, must load in order
        def __depth_sort(a, b):
            return cmp(a["depth"], b["depth"])

        remote_profiles.sort(__depth_sort)

        for profile in remote_profiles:
            self.logger.info("Importing remote profile %s" % profile["name"])
            if self.should_add_or_replace(profile, "profiles"):
                new_profile = self.api.new_profile()
                new_profile.from_datastruct(profile)
                try:
                    self.api.add_profile(new_profile)
                    self.logger.info("Copied profile %s." % profile["name"])
                except Exception, e:
                    utils.log_exc(self.logger)
                    self.logger.error("Failed to copy profile %s." % profile["name"])
            else:
                self.logger.info("Not copying profile %s, sufficiently new mtime" % profile["name"])

        # images
        self.logger.info("Copying Images")
        remote_images = self.remote.get_images()
        for image in remote_images:
            self.logger.info("Importing remote image %s" % image["name"])
            if self.should_add_or_replace(image, "images"):
                new_image = self.api.new_image()
                new_image.from_datastruct(image)
                try:
                    self.api.add_image(new_image)
                    self.logger.info("Copied image %s." % image["name"])