Esempio n. 1
0
def splitJobList(jobList,
                 src,
                 targetSet,
                 hidden=False,
                 callback=ChangesetCallback()):
    log.debug("Changeset Key conflict detected; splitting job further...")
    jobs = {}
    for job in jobList:
        name = job[0]
        if ':' in name:
            name = name.split(':')[0]
        l = jobs.setdefault(name, [])
        l.append(job)
    i = 0
    for smallJobList in jobs.itervalues():
        (outFd, tmpName) = util.mkstemp()
        os.close(outFd)
        log.debug(
            "jobsplit %d of %d %s" %
            (i + 1, len(jobs), displayBundle([(0, x) for x in smallJobList])))
        src.createChangeSetFile(smallJobList,
                                tmpName,
                                recurse=False,
                                callback=callback,
                                mirrorMode=True)
        for target in targetSet:
            target.commitChangeSetFile(tmpName,
                                       hidden=hidden,
                                       callback=callback)
        os.unlink(tmpName)
        callback.done()
        i += 1
    return
Esempio n. 2
0
def buildBundles(sourceRepos, target, troveList, absolute=False):
    bundles = []
    log.debug("grouping %d troves based on version and flavor", len(troveList))
    groupList = groupTroves(troveList)
    log.debug("building grouped job list")
    bundles = buildJobList(sourceRepos, target.repo, groupList, absolute)
    return bundles
Esempio n. 3
0
def recurseTrove(sourceRepos,
                 name,
                 version,
                 flavor,
                 callback=ChangesetCallback()):
    global recursedGroups
    assert (trove.troveIsGroup(name))
    # there's nothing much we can recurse from the source
    if name.endswith(":source"):
        return []
    # avoid grabbing the same group multiple times
    if (name, version, flavor) in recursedGroups:
        return []
    log.debug("recursing group trove: %s=%s[%s]" % (name, version, flavor))
    groupCs = sourceRepos.createChangeSet([(name, (None, None),
                                            (version, flavor), True)],
                                          withFiles=False,
                                          withFileContents=False,
                                          recurse=False,
                                          callback=callback)
    recursedGroups.add((name, version, flavor))
    ret = []
    for troveCs in groupCs.iterNewTroveList():
        for name, ops in troveCs.iterChangedTroves(True, True):
            for oper, version, flavor, byDefault in ops:
                if oper != '-':
                    ret.append((name, version, flavor))
    return ret
Esempio n. 4
0
def buildBundles(sourceRepos, target, troveList, absolute=False):
    bundles = []
    log.debug("grouping %d troves based on version and flavor", len(troveList))
    groupList = groupTroves(troveList)
    log.debug("building grouped job list")
    bundles = buildJobList(sourceRepos, target.repo, groupList, absolute)
    return bundles
Esempio n. 5
0
 def _createDirs(self):
     for dir, mode, uid, gid in self.dirsToAdd:
         dir = self.root + dir
         log.debug("creating chroot:%s", dir)
         util.mkdirChain(dir)
         if mode:
             os.chmod(dir, mode)
         if (uid or gid) and not os.getuid():
             os.chown(dir, uid, gid)
Esempio n. 6
0
 def _createDirs(self):
     for dir, mode, uid, gid in self.dirsToAdd:
         dir = self.root + dir
         log.debug("creating chroot:%s", dir)
         util.mkdirChain(dir)
         if mode:
             os.chmod(dir, mode)
         if (uid or gid) and not os.getuid():
             os.chown(dir, uid, gid)
Esempio n. 7
0
 def addTroveList(self, tl):
     # Filter out troves which are already in the local repository. Since
     # the marks aren't distinct (they increase, but not monotonically), it's
     # possible that something new got committed with the same mark we
     # last updated to, so we have to look again at all of the troves in the
     # source repository with the last mark which made it into our target.
     present = self.repo.hasTroves([x[1] for x in tl], hidden=True)
     ret = [x for x in tl if not present[x[1]]]
     log.debug("%s found %d troves not present", self.name, len(ret))
     return ret
Esempio n. 8
0
 def addTroveList(self, tl):
     # Filter out troves which are already in the local repository. Since
     # the marks aren't distinct (they increase, but not monotonically), it's
     # possible that something new got committed with the same mark we
     # last updated to, so we have to look again at all of the troves in the
     # source repository with the last mark which made it into our target.
     present = self.repo.hasTroves([ x[1] for x in tl ], hidden = True)
     ret = [ x for x in tl if not present[x[1]] ]
     log.debug("%s found %d troves not present", self.name, len(ret))
     return ret
Esempio n. 9
0
 def mirrorGPG(self, src, host):
     if self.__gpg.has_key(host):
         return
     keyList = src.getNewPGPKeys(host, -1)
     self.__gpg[host] = keyList
     if not len(keyList):
         return
     log.debug("%s adding %d gpg keys", self.name, len(keyList))
     if self.test:
         return
     self.repo.addPGPKeyList(self.cfg.host, keyList)
Esempio n. 10
0
 def mirrorGPG(self, src, host):
     if self.__gpg.has_key(host):
         return
     keyList = src.getNewPGPKeys(host, -1)
     self.__gpg[host] = keyList
     if not len(keyList):
         return
     log.debug("%s adding %d gpg keys", self.name, len(keyList))
     if self.test:
         return
     self.repo.addPGPKeyList(self.cfg.host, keyList)
Esempio n. 11
0
 def _compare(src, dst):
     srcName, srcSet = src
     dstName, dstSet = dst
     counter = 0
     for x in srcSet.difference(dstSet):
         log.debug(" - %s %s " % (srcName, x))
         counter += 1
     for x in dstSet.difference(srcSet):
         log.debug(" + %s %s" % (dstName, x))
         counter += 1
     return counter
Esempio n. 12
0
 def _compare(src, dst):
     srcName, srcSet = src
     dstName, dstSet = dst
     counter = 0
     for x in srcSet.difference(dstSet):
         log.debug(" - %s %s " % (srcName, x))
         counter += 1
     for x in dstSet.difference(srcSet):
         log.debug(" + %s %s" % (dstName, x))
         counter += 1
     return counter
Esempio n. 13
0
def mainWorkflow(cfg = None, callback=ChangesetCallback(),
                 test=False, sync=False, infoSync=False,
                 checkSync=False, fastSync=False):
    import fcntl
    if cfg.lockFile:
        try:
            log.debug('checking for lock file')
            lock = open(cfg.lockFile, 'w')
            fcntl.lockf(lock, fcntl.LOCK_EX|fcntl.LOCK_NB)
        except IOError:
            log.warning('lock held by another process, exiting')
            return

    # need to make sure we have a 'source' section
    if not cfg.hasSection('source'):
        log.debug("ERROR: mirror configuration file is missing a [source] section")
        raise RuntimeError("Mirror configuration file is missing a [source] section")
    sourceRepos = _getMirrorClient(cfg, 'source')

    # Optional reference repository
    if cfg.hasSection('reference'):
        refRepos = _getMirrorClient(cfg, 'reference')
    else:
        refRepos = sourceRepos

    # we need to build a target repo client for each of the "target*"
    # sections in the config file
    targets = []
    for name in cfg.iterSectionNames():
        if not name.startswith("target"):
            continue
        target = _getMirrorClient(cfg, name)
        target = TargetRepository(target, cfg, name, test=test)
        targets.append(target)
    # checkSync is a special operation...
    if checkSync:
        return checkSyncRepos(cfg, refRepos, targets)
    # we pass in the sync flag only the first time around, because after
    # that we need the targetRepos mark to advance accordingly after being
    # reset to -1
    callAgain = mirrorRepository(sourceRepos, targets, cfg,
                                 test = test, sync = sync,
                                 syncSigs = infoSync,
                                 callback = callback,
                                 fastSync = fastSync,
                                 referenceRepos=refRepos,
                                 )
    while callAgain:
        callAgain = mirrorRepository(sourceRepos, targets, cfg,
                                     test = test, callback = callback,
                                     fastSync = fastSync,
                                     referenceRepos=refRepos,
                                     )
Esempio n. 14
0
 def addUser(self, name, uid, gid=None, home=None, shell='/bin/bash'):
     """
         Adds user that must be in /etc/passwd when root is instantiated
     """
     if gid is None:
         gid = uid
     if gid == uid:
         self.groupsToSupport.append((name, gid, []))
     if home is None:
         home = '/home/%s' % name
     log.debug("adding user %s (%s,%s) home=%s", name, uid, gid, home)
     self.usersToSupport.append((name, uid, gid, home, shell))
Esempio n. 15
0
 def _copyFiles(self):
     for (sourceFile, targetFile, mode) in self.filesToCopy:
         log.debug("copying file %s into chroot:%s", sourceFile, targetFile)
         try:
             target = self.root + targetFile
             target = os.path.realpath(target)
             util.mkdirChain(os.path.dirname(target))
             shutil.copy(sourceFile, target)
             if mode is not None:
                 os.chmod(target, mode)
         except (IOError, OSError), e:
             raise errors.OpenError("Could not copy in file %s to %s: %s" % (sourceFile, targetFile, e))
Esempio n. 16
0
 def addUser(self, name, uid, gid=None, home=None, shell="/bin/bash"):
     """
         Adds user that must be in /etc/passwd when root is instantiated
     """
     if gid is None:
         gid = uid
     if gid == uid:
         self.groupsToSupport.append((name, gid, []))
     if home is None:
         home = "/home/%s" % name
     log.debug("adding user %s (%s,%s) home=%s", name, uid, gid, home)
     self.usersToSupport.append((name, uid, gid, home, shell))
Esempio n. 17
0
 def _copyFiles(self):
     for (sourceFile, targetFile, mode) in self.filesToCopy:
         log.debug("copying file %s into chroot:%s", sourceFile, targetFile)
         try:
             target = self.root + targetFile
             target = os.path.realpath(target)
             util.mkdirChain(os.path.dirname(target))
             shutil.copy(sourceFile, target)
             if mode is not None:
                 os.chmod(target, mode)
         except (IOError, OSError), e:
             raise errors.OpenError('Could not copy in file %s to %s: %s' %
                                    (sourceFile, targetFile, e))
Esempio n. 18
0
 def commitChangeSetFile(self, filename, hidden, callback):
     if self.test:
         return 0
     callback.setPrefix(self.name + ": ")
     t1 = time.time()
     ret = self.repo.commitChangeSetFile(filename, mirror=True, hidden=hidden,
                                         callback=callback)
     t2 = time.time()
     callback.done()
     hstr = ""
     if hidden: hstr = "hidden "
     log.debug("%s %scommit (%.2f sec)", self.name, hstr, t2-t1)
     return ret
Esempio n. 19
0
def mirrorRemoved(sourceRepos, targetRepos, troveSet, test = False, callback = ChangesetCallback()):
    if not troveSet:
        return 0
    log.debug("checking on %d removed troves", len(troveSet))
    # these removed troves better exist on the target
    present = targetRepos.hasTroves(list(troveSet))
    missing = [ x for x in troveSet if not present[x] ]
    # we can not have any "missing" troves while we mirror removals
    for t in missing:
        log.warning("Mirroring removed trove: valid trove not found on target: %s", t)
        troveSet.remove(t)
    # for the remaining removed troves, are any of them already mirrored?
    jobList = [ (name, (None, None), (version, flavor), True) for
                (name, version, flavor) in troveSet ]
    cs = targetRepos.createChangeSet(jobList, recurse=False, withFiles=False,
                                     withFileContents=False, callback=callback)
    for trvCs in cs.iterNewTroveList():
        if trvCs.getType() == trove.TROVE_TYPE_REMOVED:
            troveSet.remove(trvCs.getNewNameVersionFlavor())
    log.debug("mirroring %d removed troves", len(troveSet))
    if not troveSet:
        return 0
    jobList = [ (name, (None, None), (version, flavor), True) for
                (name, version, flavor) in troveSet ]
    log.debug("mirroring removed troves %s" % (displayJobList(jobList),))
    # grab the removed troves changeset
    cs = sourceRepos.createChangeSet(jobList, recurse = False,
                                     withFiles = False, withFileContents = False,
                                     callback = callback)
    log.debug("committing")
    targetRepos.commitChangeSet(cs, mirror = True, callback = callback)
    callback.done()
    return len(jobList)
Esempio n. 20
0
def mirrorRemoved(sourceRepos, targetRepos, troveSet, test = False, callback = ChangesetCallback()):
    if not troveSet:
        return 0
    log.debug("checking on %d removed troves", len(troveSet))
    # these removed troves better exist on the target
    present = targetRepos.hasTroves(list(troveSet))
    missing = [ x for x in troveSet if not present[x] ]
    # we can not have any "missing" troves while we mirror removals
    for t in missing:
        log.warning("Mirroring removed trove: valid trove not found on target: %s", t)
        troveSet.remove(t)
    # for the remaining removed troves, are any of them already mirrored?
    jobList = [ (name, (None, None), (version, flavor), True) for
                (name, version, flavor) in troveSet ]
    cs = targetRepos.createChangeSet(jobList, recurse=False, withFiles=False,
                                     withFileContents=False, callback=callback)
    for trvCs in cs.iterNewTroveList():
        if trvCs.getType() == trove.TROVE_TYPE_REMOVED:
            troveSet.remove(trvCs.getNewNameVersionFlavor())
    log.debug("mirroring %d removed troves", len(troveSet))
    if not troveSet:
        return 0
    jobList = [ (name, (None, None), (version, flavor), True) for
                (name, version, flavor) in troveSet ]
    log.debug("mirroring removed troves %s" % (displayJobList(jobList),))
    # grab the removed troves changeset
    cs = sourceRepos.createChangeSet(jobList, recurse = False,
                                     withFiles = False, withFileContents = False,
                                     callback = callback)
    log.debug("committing")
    targetRepos.commitChangeSet(cs, mirror = True, callback = callback)
    callback.done()
    return len(jobList)
Esempio n. 21
0
 def commitChangeSetFile(self, filename, hidden, callback):
     if self.test:
         return 0
     callback = copy.copy(callback)
     callback.setPrefix(self.name + ": ")
     t1 = time.time()
     ret = self.repo.commitChangeSetFile(filename, mirror=True, hidden=hidden,
                                         callback=callback)
     t2 = time.time()
     callback.done()
     hstr = ""
     if hidden: hstr = "hidden "
     log.debug("%s %scommit (%.2f sec)", self.name, hstr, t2-t1)
     return ret
Esempio n. 22
0
    def _copyDirs(self):
        for (sourceDir, targetDir) in self.dirsToCopy:
            targetDir = self.root + targetDir
            if os.path.exists(targetDir):
                if os.path.islink(targetDir):
                    os.unlink(targetDir)
                else:
                    util.rmtree(targetDir)

            util.mkdirChain(os.path.realpath(os.path.dirname(targetDir)))
            log.debug("copying dir %s into chroot:%s", sourceDir, targetDir)
            try:
                shutil.copytree(sourceDir, os.path.realpath(targetDir))
            except shutil.Error, e:
                errorList = "\n".join("cannot copy %s to %s: %s" % x for x in e.args[0])
                raise errors.OpenError("Could not copy in directory %s:\n%s" % (sourceDir, errorList))
Esempio n. 23
0
    def __init__(self, client):
        log.debug("loading system model cache");

        troveCache = trovecache.TroveCache(None)
        troveCache.load(client.cfg.dbPath + '/modelcache')

        model = cml.CML(client.cfg)
        troveSet = client.cmlGraph(model)
        troveSet.g.realize(modelupdate.CMLActionData(troveCache,
                                              client.cfg.flavor[0],
                                              client.getRepos(), client.cfg))

        self.troveTups = set()
        for withFiles, trv in troveCache.cache.values():
            for nvf in trv.iterTroveList(strongRefs = True, weakRefs = True):
                self.troveTups.add(nvf)
Esempio n. 24
0
    def __init__(self, client):
        log.debug("loading system model cache")

        troveCache = trovecache.TroveCache(None)
        troveCache.load(client.cfg.dbPath + '/modelcache')

        model = cml.CML(client.cfg)
        troveSet = client.cmlGraph(model)
        troveSet.g.realize(
            modelupdate.CMLActionData(troveCache, client.cfg.flavor[0],
                                      client.getRepos(), client.cfg))

        self.troveTups = set()
        for withFiles, trv in troveCache.cache.values():
            for nvf in trv.iterTroveList(strongRefs=True, weakRefs=True):
                self.troveTups.add(nvf)
Esempio n. 25
0
def mirrorTroveInfo(src, targets, mark, cfg, resync=False):
    if resync:
        log.debug("performing a full trove info sync")
        infoList = _getAllInfo(src, cfg)
        infoList = [(mark, t, ti) for t, ti in infoList ]
    else:
        log.debug("getting new trove info entries")
        infoList = _getNewInfo(src, cfg, mark)
    log.debug("obtained %d trove info records for mirroring", len(infoList))
    infoList = [(m,t,ti) for (m,t,ti) in infoList if _filterTup(t, cfg)]
    if not len(infoList):
        log.debug("no troveinfo records need to be mirrored")
        return 0
    log.debug("mirroring %d changed trove info records" % len(infoList))
    updateCount = sum(_parallel(targets,
        TargetRepository.setTroveInfo, infoList))
    return updateCount
Esempio n. 26
0
    def _copyDirs(self):
        for (sourceDir, targetDir) in self.dirsToCopy:
            targetDir = self.root + targetDir
            if os.path.exists(targetDir):
                if os.path.islink(targetDir):
                    os.unlink(targetDir)
                else:
                    util.rmtree(targetDir)

            util.mkdirChain(os.path.realpath(os.path.dirname(targetDir)))
            log.debug("copying dir %s into chroot:%s", sourceDir, targetDir)
            try:
                shutil.copytree(sourceDir, os.path.realpath(targetDir))
            except shutil.Error, e:
                errorList = '\n'.join('cannot copy %s to %s: %s' % x
                                      for x in e.args[0])
                raise errors.OpenError('Could not copy in directory %s:\n%s' %
                                       (sourceDir, errorList))
Esempio n. 27
0
def checkSyncRepos(config, sourceRepos, targetRepos):
    checkConfig(config)
    targets = _makeTargets(config, targetRepos)
    log.setVerbosity(log.DEBUG)

    # retrieve the set of troves from a give repository
    def _getTroveSet(config, repo):
        def _flatten(troveSpec):
            l = []
            for name, versionD in troveSpec.iteritems():
                for version, flavorList in versionD.iteritems():
                    l += [(name, version, flavor) for flavor in flavorList]
            return set(l)

        troveSpecs = {}
        if config.labels:
            d = troveSpecs.setdefault(None, {})
            for l in config.labels:
                d[l] = ''
            t = repo.getTroveVersionsByLabel(
                troveSpecs, troveTypes=netclient.TROVE_QUERY_ALL)
        else:
            troveSpecs = {None: None}
            t = repo.getTroveVersionList(config.host,
                                         troveSpecs,
                                         troveTypes=netclient.TROVE_QUERY_ALL)
        return _flatten(t)

    # compare source with each target
    def _compare(src, dst):
        srcName, srcSet = src
        dstName, dstSet = dst
        counter = 0
        for x in srcSet.difference(dstSet):
            log.debug(" - %s %s " % (srcName, x))
            counter += 1
        for x in dstSet.difference(srcSet):
            log.debug(" + %s %s" % (dstName, x))
            counter += 1
        return counter

    log.debug("Retrieving list of troves from source %s" %
              str(sourceRepos.c.map))
    sourceSet = _getTroveSet(config, sourceRepos)
    hasDiff = 0
    for target in targets:
        log.debug("Retrieving list of troves from %s %s" %
                  (target.name, str(target.repo.c.map)))
        targetSet = _getTroveSet(config, target.repo)
        log.debug("Diffing source and %s" % target.name)
        hasDiff += _compare(("source", sourceSet), (target.name, targetSet))
    log.debug("Done")
    return hasDiff
Esempio n. 28
0
 def _exceptionOccured(self, exc_info):
     etype, e, tb = exc_info
     # format the exception
     msg = "%s" % etype.__name__
     s = str(e)
     if s:
         msg += ": %s" % s
     # get the line info that raised the exception
     inner = tb.tb_next
     while inner.tb_next:
         inner = inner.tb_next
     filename = inner.tb_frame.f_code.co_filename
     linenum = inner.tb_frame.f_lineno
     log.warning("Unhandled exception occurred when invoking callback:\n" "%s:%s\n" " %s", filename, linenum, msg)
     # log the full traceback if debugging (--debug=all)
     log.debug("".join(traceback.format_exception(*exc_info)))
     if not hasattr(self, "exceptions"):
         self.exceptions = []
     self.exceptions.append(e)
Esempio n. 29
0
 def setTroveInfo(self, infoList):
     log.debug("%s checking what troveinfo needs to be mirrored", self.name)
     # Items whose mark is the same as currentMark might not have their trove
     # available on the server (it might be coming as part of this mirror
     # run).
     inQuestion = [ x[1] for x in infoList if str(long(x[0])) >= self.mark ]
     present = self.repo.hasTroves(inQuestion, hidden=True)
     # filter out the not present troves which will get mirrored in
     # the current mirror run
     infoList = [ (t, ti) for (m, t, ti) in infoList if present.get(t, True) ]
     # avoid busy work for troveinfos which are empty
     infoList = [ (t, ti) for (t, ti) in infoList if len(ti.freeze()) > 0 ]
     if self.test:
         return 0
     try:
         self.repo.setTroveInfo(infoList)
     except errors.InvalidServerVersion: # to older servers we can only transport sigs
         infoList = [ (t, ti.sigs.freeze()) for t, ti in infoList ]
         # only send up the troves that actually have a signature change
         infoList = [ x for x in infoList if len(x[1]) > 0 ]
         log.debug("%s pushing %d trove sigs...", self.name, len(infoList))
         self.repo.setTroveSigs(infoList)
     else:
         log.debug("%s uploaded %d info records", self.name, len(infoList))
     return len(infoList)
Esempio n. 30
0
def getTrovesByPath(repos, pathList, versionFilter, flavorFilter, labelPath,
                    defaultFlavor):
    if not pathList:
        return []

    if versionFilter == VERSION_FILTER_ALL:
        queryFn = repos.getTroveVersionsByPath
    elif versionFilter == VERSION_FILTER_LEAVES:
        queryFn = repos.getTroveLeavesByPath
    elif versionFilter == VERSION_FILTER_LATEST:
        queryFn = repos.getTroveLeavesByPath
    else:
        assert (0)

    allResults = {}
    for label in labelPath:
        try:
            results = queryFn(pathList, label)
        except errors.MethodNotSupported:
            log.debug('repository server for the %s label does not support '
                      'queries by path' % label)
            continue
        for path, tups in results.iteritems():
            allResults.setdefault(path, []).extend(tups)

    allResults = [allResults.get(x) for x in pathList]

    finalList = []
    for tupList in allResults:
        if not tupList:
            continue
        source = trovesource.SimpleTroveSource(tupList)
        source.searchAsRepository()
        troveNames = set(x[0] for x in tupList)
        # no affinity when searching by path.
        results = getTrovesToDisplay(source, troveNames, [], [], versionFilter,
                                     flavorFilter, labelPath, defaultFlavor,
                                     None)
        finalList.extend(results)
    return finalList
Esempio n. 31
0
def getTrovesByPath(repos, pathList, versionFilter, flavorFilter, labelPath,
                    defaultFlavor):
    if not pathList:
        return []

    if versionFilter == VERSION_FILTER_ALL:
        queryFn = repos.getTroveVersionsByPath
    elif versionFilter == VERSION_FILTER_LEAVES:
        queryFn = repos.getTroveLeavesByPath
    elif versionFilter == VERSION_FILTER_LATEST:
        queryFn = repos.getTroveLeavesByPath
    else:
        assert(0)

    allResults = {}
    for label in labelPath:
        try:
            results = queryFn(pathList, label)
        except errors.MethodNotSupported:
            log.debug('repository server for the %s label does not support '
                      'queries by path' %label)
            continue
        for path, tups in results.iteritems():
            allResults.setdefault(path, []).extend(tups)

    allResults = [ allResults.get(x) for x in pathList ]

    finalList = [ ]
    for tupList in allResults:
        if not tupList:
            continue
        source = trovesource.SimpleTroveSource(tupList)
        source.searchAsRepository()
        troveNames = set(x[0] for x in tupList)
        # no affinity when searching by path.
        results = getTrovesToDisplay(source, troveNames, [], [],
                                     versionFilter, flavorFilter, labelPath,
                                     defaultFlavor, None)
        finalList.extend(results)
    return finalList
Esempio n. 32
0
 def setTroveInfo(self, infoList):
     log.debug("%s checking what troveinfo needs to be mirrored", self.name)
     # Items whose mark is the same as currentMark might not have their trove
     # available on the server (it might be coming as part of this mirror
     # run).
     inQuestion = [x[1] for x in infoList if str(long(x[0])) >= self.mark]
     present = self.repo.hasTroves(inQuestion, hidden=True)
     # filter out the not present troves which will get mirrored in
     # the current mirror run
     infoList = [(t, ti) for (m, t, ti) in infoList if present.get(t, True)]
     # avoid busy work for troveinfos which are empty
     infoList = [(t, ti) for (t, ti) in infoList if len(ti.freeze()) > 0]
     if self.test:
         return 0
     try:
         self.repo.setTroveInfo(infoList)
     except errors.InvalidServerVersion:  # to older servers we can only transport sigs
         infoList = [(t, ti.sigs.freeze()) for t, ti in infoList]
         # only send up the troves that actually have a signature change
         infoList = [x for x in infoList if len(x[1]) > 0]
         log.debug("%s pushing %d trove sigs...", self.name, len(infoList))
         self.repo.setTroveSigs(infoList)
     else:
         log.debug("%s uploaded %d info records", self.name, len(infoList))
     return len(infoList)
Esempio n. 33
0
def parseUpdateList(updateList,
                    keepExisting,
                    updateByDefault=True,
                    withFrozenFlavor=False):
    # If keepExisting is true, we want our specifications to be relative
    # to nothing. If it's false, they should be absolute as updateChangeSet
    # interperts absolute jobs as ones which should be rooted (if there is
    # anything available to root them to).

    areAbsolute = not keepExisting

    applyList = []

    if type(updateList) is str:
        updateList = (updateList, )

    for updateStr in updateList:
        if os.path.exists(updateStr) and os.path.isfile(updateStr):
            applyList.append(_getChangeSet(updateStr))
            continue
        else:
            troveSpec = parseTroveSpec(updateStr,
                                       withFrozenFlavor=withFrozenFlavor)
            if troveSpec[0][0] == '-':
                applyList.append(
                    (troveSpec[0], troveSpec[1:], (None, None), False))
            elif troveSpec[0][0] == '+':
                applyList.append(
                    (troveSpec[0], (None, None), troveSpec[1:], areAbsolute))
            elif updateByDefault:
                applyList.append(
                    (troveSpec[0], (None, None), troveSpec[1:], areAbsolute))
            else:
                applyList.append(
                    (troveSpec[0], troveSpec[1:], (None, None), False))
            log.debug("will look for %s", applyList[-1])

    # dedup
    return set(applyList)
Esempio n. 34
0
 def _exceptionOccured(self, exc_info):
     etype, e, tb = exc_info
     # format the exception
     msg = '%s' % etype.__name__
     s = str(e)
     if s:
         msg += ': %s' % s
     # get the line info that raised the exception
     inner = tb.tb_next
     while inner.tb_next:
         inner = inner.tb_next
     filename = inner.tb_frame.f_code.co_filename
     linenum = inner.tb_frame.f_lineno
     log.warning(
         'Unhandled exception occurred when invoking callback:\n'
         '%s:%s\n'
         ' %s', filename, linenum, msg)
     # log the full traceback if debugging (--debug=all)
     log.debug(''.join(traceback.format_exception(*exc_info)))
     if not hasattr(self, 'exceptions'):
         self.exceptions = []
     self.exceptions.append(e)
Esempio n. 35
0
def _getAllInfo(src, cfg):
    log.debug("resync all trove info from source. This will take a while...")
    # grab the full list of all the trove versions and flavors in the src
    troveDict = src.getTroveVersionList(cfg.host, { None : None })
    troveList = []
    # filter out the stuff we don't need
    for name, versionD in troveDict.iteritems():
        for version, flavorList in versionD.iteritems():
            for flavor in flavorList:
                tup = (name, version, flavor)
                troveList.append(tup)
    del troveDict
    # retrieve the sigs and the metadata records to sync over
    sigList = src.getTroveSigs(troveList)
    metaList = src.getTroveInfo(trove._TROVEINFO_TAG_METADATA, troveList)
    infoList = []
    for t, s, ti in itertools.izip(troveList, sigList, metaList):
        if ti is None:
            ti = trove.TroveInfo()
        ti.sigs.thaw(s)
        infoList.append((t, ti))
    return infoList
Esempio n. 36
0
def _getAllInfo(src, cfg):
    log.debug("resync all trove info from source. This will take a while...")
    # grab the full list of all the trove versions and flavors in the src
    troveDict = src.getTroveVersionList(cfg.host, {None: None})
    troveList = []
    # filter out the stuff we don't need
    for name, versionD in troveDict.iteritems():
        for version, flavorList in versionD.iteritems():
            for flavor in flavorList:
                tup = (name, version, flavor)
                troveList.append(tup)
    del troveDict
    # retrieve the sigs and the metadata records to sync over
    sigList = src.getTroveSigs(troveList)
    metaList = src.getTroveInfo(trove._TROVEINFO_TAG_METADATA, troveList)
    infoList = []
    for t, s, ti in itertools.izip(troveList, sigList, metaList):
        if ti is None:
            ti = trove.TroveInfo()
        ti.sigs.thaw(s)
        infoList.append((t, ti))
    return infoList
Esempio n. 37
0
def copytree(sources, dest, symlinks=False, filemode=None, dirmode=None, fileowner=None, dirowner=None, callback=None):
    """
    Copies tree(s) from sources to dest, returning a list of
    the filenames that it has written.
    """
    sourcelist = []
    totalFiles = 0
    for source in util.braceGlob(sources):
        if os.path.isdir(source):
            if source[-1] == "/":
                source = source[:-1]
            thisdest = "%s%s%s" % (dest, os.sep, os.path.basename(source))
            log.debug("copying [tree] %s to %s", source, thisdest)
            _copytree(source, thisdest, symlinks, callback=callback)
            if dirmode:
                os.chmod(thisdest, dirmode)
            if dirowner:
                os.chown(thisdest, *dirowner)
            os.path.walk(
                source, _copyVisit, (sourcelist, len(source), thisdest, filemode, dirmode, fileowner, dirowner)
            )
        else:
            log.debug("copying [file] %s to %s", source, dest)
            shutil.copy2(source, dest)
            totalFiles += 1
            if callback:
                callback(totalFiles)

            if dest.endswith(os.sep):
                thisdest = dest + os.sep + os.path.basename(source)
            else:
                thisdest = dest
            if filemode:
                os.chmod(thisdest, filemode)
            if fileowner:
                os.chown(thisdest, *fileowner)
            sourcelist.append(thisdest)
    return sourcelist
Esempio n. 38
0
def splitJobList(jobList, src, targetSet, hidden = False, callback = ChangesetCallback()):
    log.debug("Changeset Key conflict detected; splitting job further...")
    jobs = {}
    for job in jobList:
        name = job[0]
        if ':' in name:
            name = name.split(':')[0]
        l = jobs.setdefault(name, [])
        l.append(job)
    i = 0
    for smallJobList in jobs.itervalues():
        (outFd, tmpName) = util.mkstemp()
        os.close(outFd)
        log.debug("jobsplit %d of %d %s" % (
            i + 1, len(jobs), displayBundle([(0,x) for x in smallJobList])))
        src.createChangeSetFile(smallJobList, tmpName, recurse = False,
                                callback = callback, mirrorMode = True)
        for target in targetSet:
            target.commitChangeSetFile(tmpName, hidden = hidden, callback = callback)
        os.unlink(tmpName)
        callback.done()
        i += 1
    return
Esempio n. 39
0
def recurseTrove(sourceRepos, name, version, flavor,
                 callback = ChangesetCallback()):
    global recursedGroups
    assert(trove.troveIsGroup(name))
    # there's nothing much we can recurse from the source
    if name.endswith(":source"):
        return []
    # avoid grabbing the same group multiple times
    if (name, version, flavor) in recursedGroups:
        return []
    log.debug("recursing group trove: %s=%s[%s]" % (name, version, flavor))
    groupCs = sourceRepos.createChangeSet(
        [(name, (None, None), (version, flavor), True)],
        withFiles=False, withFileContents=False, recurse=False,
        callback = callback)
    recursedGroups.add((name, version, flavor))
    ret = []
    for troveCs in groupCs.iterNewTroveList():
        for name, ops in troveCs.iterChangedTroves(True, True):
            for oper, version, flavor, byDefault in ops:
                if oper != '-':
                    ret.append((name, version, flavor))
    return ret
Esempio n. 40
0
def parseUpdateList(updateList, keepExisting, updateByDefault=True,
        withFrozenFlavor=False):
    # If keepExisting is true, we want our specifications to be relative
    # to nothing. If it's false, they should be absolute as updateChangeSet
    # interperts absolute jobs as ones which should be rooted (if there is
    # anything available to root them to).

    areAbsolute = not keepExisting

    applyList = []

    if type(updateList) is str:
        updateList = ( updateList, )

    for updateStr in updateList:
        if os.path.exists(updateStr) and os.path.isfile(updateStr):
            applyList.append(_getChangeSet(updateStr))
            continue
        else:
            troveSpec = parseTroveSpec(updateStr,
                withFrozenFlavor=withFrozenFlavor)
            if troveSpec[0][0] == '-':
                applyList.append((troveSpec[0], troveSpec[1:],
                                  (None, None), False))
            elif troveSpec[0][0] == '+':
                applyList.append((troveSpec[0], (None, None),
                                  troveSpec[1:], areAbsolute))
            elif updateByDefault:
                applyList.append((troveSpec[0], (None, None),
                                  troveSpec[1:], areAbsolute))
            else:
                applyList.append((troveSpec[0], troveSpec[1:],
                                  (None, None), False))
            log.debug("will look for %s", applyList[-1])

    # dedup
    return set(applyList)
Esempio n. 41
0
def checkSyncRepos(config, sourceRepos, targetRepos):
    checkConfig(config)
    targets = _makeTargets(config, targetRepos)
    log.setVerbosity(log.DEBUG)

    # retrieve the set of troves from a give repository
    def _getTroveSet(config, repo):
        def _flatten(troveSpec):
            l = []
            for name, versionD in troveSpec.iteritems():
                for version, flavorList in versionD.iteritems():
                    l += [ (name, version, flavor) for flavor in flavorList ]
            return set(l)
        troveSpecs = {}
        if config.labels:
            d = troveSpecs.setdefault(None, {})
            for l in config.labels:
                d[l] = ''
            t = repo.getTroveVersionsByLabel(troveSpecs, troveTypes = netclient.TROVE_QUERY_ALL)
        else:
            troveSpecs = {None : None}
            t = repo.getTroveVersionList(config.host, troveSpecs,
                                         troveTypes = netclient.TROVE_QUERY_ALL)
        return _flatten(t)
    # compare source with each target
    def _compare(src, dst):
        srcName, srcSet = src
        dstName, dstSet = dst
        counter = 0
        for x in srcSet.difference(dstSet):
            log.debug(" - %s %s " % (srcName, x))
            counter += 1
        for x in dstSet.difference(srcSet):
            log.debug(" + %s %s" % (dstName, x))
            counter += 1
        return counter
    log.debug("Retrieving list of troves from source %s" % str(sourceRepos.c.map))
    sourceSet = _getTroveSet(config, sourceRepos)
    hasDiff = 0
    for target in targets:
        log.debug("Retrieving list of troves from %s %s" % (target.name, str(target.repo.c.map)))
        targetSet = _getTroveSet(config, target.repo)
        log.debug("Diffing source and %s" % target.name)
        hasDiff += _compare( ("source", sourceSet), (target.name, targetSet) )
    log.debug("Done")
    return hasDiff
Esempio n. 42
0
def _getNewSigs(src, cfg, mark):
    # talking to an old source server. We do the best and we get the sigs out
    sigList = src.getNewSigList(cfg.host, str(mark))
    log.debug("obtained %d changed trove sigs", len(sigList))
    sigList = [ x for x in sigList if _filterTup(x[1], cfg) ]
    log.debug("%d changed sigs after label and match filtering", len(sigList))
    # protection against duplicate items returned in the list by some servers
    sigList = list(set(sigList))
    sigList.sort(lambda a,b: cmp(a[0], b[0]))
    log.debug("downloading %d signatures from source repository", len(sigList))
    # XXX: we could also get the metadata in here, but getTroveInfo
    # would use a getChangeSet call against older repos, severely
    # impacting performance
    sigs = src.getTroveSigs([ x[1] for x in sigList ])
    # need to convert the sigs into TroveInfo instances
    def _sig2info(sig):
        ti = trove.TroveInfo()
        ti.sigs.thaw(sig)
        return ti
    sigs = [ _sig2info(s) for s in sigs]
    # we're gonna iterate repeatedely over the returned set, no itertools can do
    return [(m, t, ti) for (m,t),ti in itertools.izip(sigList, sigs) ]
Esempio n. 43
0
def _getNewSigs(src, cfg, mark):
    # talking to an old source server. We do the best and we get the sigs out
    sigList = src.getNewSigList(cfg.host, str(mark))
    log.debug("obtained %d changed trove sigs", len(sigList))
    sigList = [ x for x in sigList if _filterTup(x[1], cfg) ]
    log.debug("%d changed sigs after label and match filtering", len(sigList))
    # protection against duplicate items returned in the list by some servers
    sigList = list(set(sigList))
    sigList.sort(lambda a,b: cmp(a[0], b[0]))
    log.debug("downloading %d signatures from source repository", len(sigList))
    # XXX: we could also get the metadata in here, but getTroveInfo
    # would use a getChangeSet call against older repos, severely
    # impacting performance
    sigs = src.getTroveSigs([ x[1] for x in sigList ])
    # need to convert the sigs into TroveInfo instances
    def _sig2info(sig):
        ti = trove.TroveInfo()
        ti.sigs.thaw(sig)
        return ti
    sigs = [ _sig2info(s) for s in sigs]
    # we're gonna iterate repeatedely over the returned set, no itertools can do
    return [(m, t, ti) for (m,t),ti in itertools.izip(sigList, sigs) ]
Esempio n. 44
0
def mirrorRepository(
        sourceRepos,
        targetRepos,
        cfg,
        test=False,
        sync=False,
        syncSigs=False,
        callback=ChangesetCallback(),
        fastSync=False,
        referenceRepos=None,
):
    if referenceRepos is None:
        referenceRepos = sourceRepos
    checkConfig(cfg)
    targets = _makeTargets(cfg, targetRepos, test)
    log.debug("-" * 20 + " start loop " + "-" * 20)

    hidden = len(targets) > 1 or cfg.useHiddenCommits
    if hidden:
        log.debug("will use hidden commits to synchronize target mirrors")

    if sync:
        currentMark = -1
    else:
        marks = [t.getMirrorMark() for t in targets]
        # we use the oldest mark as a starting point (since we have to
        # get stuff from source for that oldest one anyway)
        currentMark = min(marks)
    log.debug("using common mirror mark %s", currentMark)
    # reset mirror mark to the lowest common denominator
    for t in targets:
        if t.getMirrorMark() != currentMark:
            t.setMirrorMark(currentMark)
    # mirror gpg signatures from the src into the targets
    for t in targets:
        t.mirrorGPG(referenceRepos, cfg.host)
    # mirror changed trove information for troves already mirrored
    if fastSync:
        updateCount = 0
        log.debug("skip trove info records sync because of fast-sync")
    else:
        updateCount = mirrorTroveInfo(referenceRepos, targets, currentMark,
                                      cfg, syncSigs)
    newMark, troveList = getTroveList(referenceRepos, cfg, currentMark)
    if not troveList:
        if newMark > currentMark:  # something was returned, but filtered out
            for t in targets:
                t.setMirrorMark(newMark)
            return -1  # call again
        return 0
    # prepare a new max mark to be used when we need to break out of a loop
    crtMaxMark = max(long(x[0]) for x in troveList)
    if currentMark > 0 and crtMaxMark == currentMark:
        # if we're hung on the current max then we need to
        # forcibly advance the mark in case we're stuck
        crtMaxMark += 1  # only used if we filter out all troves below
    initTLlen = len(troveList)

    # removed troves are a special blend - we keep them separate
    removedSet = set(
        [x[1] for x in troveList if x[2] == trove.TROVE_TYPE_REMOVED])
    troveList = [(x[0], x[1]) for x in troveList
                 if x[2] != trove.TROVE_TYPE_REMOVED]

    # figure out if we need to recurse the group-troves
    if cfg.recurseGroups:
        # avoid adding duplicates
        troveSetList = set([x[1] for x in troveList])
        for mark, (name, version, flavor) in troveList:
            if trove.troveIsGroup(name):
                recTroves = recurseTrove(referenceRepos,
                                         name,
                                         version,
                                         flavor,
                                         callback=callback)

                # add sources here:
                if cfg.includeSources:
                    troveInfo = referenceRepos.getTroveInfo(
                        trove._TROVEINFO_TAG_SOURCENAME, recTroves)
                    sourceComps = set()
                    for nvf, source in itertools.izip(recTroves, troveInfo):
                        sourceComps.add((source(), nvf[1].getSourceVersion(),
                                         parseFlavor('')))
                    recTroves.extend(sourceComps)

                # add the results at the end with the current mark
                for (n, v, f) in recTroves:
                    if (n, v, f) not in troveSetList:
                        troveList.append((mark, (n, v, f)))
                        troveSetList.add((n, v, f))
        log.debug("after group recursion %d troves are needed", len(troveList))
        # we need to make sure we mirror the GPG keys of any newly added troves
        newHosts = set(
            [x[1].getHost() for x in troveSetList.union(removedSet)])
        for host in newHosts.difference(set([cfg.host])):
            for t in targets:
                t.mirrorGPG(referenceRepos, host)

    # we check which troves from the troveList are needed on each
    # target and we split the troveList into separate lists depending
    # on how many targets require each
    byTarget = {}
    targetSetList = []
    if len(troveList):
        byTrove = {}
        for i, target in enumerate(targets):
            for t in target.addTroveList(troveList):
                bt = byTrove.setdefault(t, set())
                bt.add(i)
        # invert the dict by target now
        for trv, ts in byTrove.iteritems():
            targetSet = [targets[i] for i in ts]
            try:
                targetIdx = targetSetList.index(targetSet)
            except ValueError:
                targetSetList.append(targetSet)
                targetIdx = len(targetSetList) - 1
            bt = byTarget.setdefault(targetIdx, [])
            bt.append(trv)
        del byTrove
    # if we were returned troves, but we filtered them all out, advance the
    # mark and signal "try again"
    if len(byTarget) == 0 and len(removedSet) == 0 and initTLlen:
        # we had troves and now we don't
        log.debug("no troves found for our label %s" % cfg.labels)
        for t in targets:
            t.setMirrorMark(crtMaxMark)
        # try again
        return -1

    # now we get each section of the troveList for each targetSet. We
    # start off mirroring by those required by fewer targets, using
    # the assumption that those troves are what is required for the
    # targets to catch up to a common set
    if len(byTarget) > 1:
        log.debug("split %d troves into %d chunks by target", len(troveList),
                  len(byTarget))
    # sort the targetSets by length
    targetSets = list(enumerate(targetSetList))
    targetSets.sort(lambda a, b: cmp(len(a[1]), len(b[1])))
    bundlesMark = 0
    for idx, targetSet in targetSets:
        troveList = byTarget[idx]
        if not troveList:  # XXX: should not happen...
            continue
        log.debug("mirroring %d troves into %d targets", len(troveList),
                  len(targetSet))
        # since these troves are required for all targets, we can use
        # the "first" one to build the relative changeset requests
        target = list(targetSet)[0]
        bundles = buildBundles(sourceRepos, target, troveList,
                               cfg.absoluteChangesets)
        for i, bundle in enumerate(bundles):
            jobList = [x[1] for x in bundle]
            # XXX it's a shame we can't give a hint as to what server to use
            # to avoid having to open the changeset and read in bits of it
            if test:
                log.debug("test mode: not mirroring (%d of %d) %s" %
                          (i + 1, len(bundles), jobList))
                updateCount += len(bundle)
                continue
            (outFd, tmpName) = util.mkstemp()
            os.close(outFd)
            log.debug("getting (%d of %d) %s" %
                      (i + 1, len(bundles), displayBundle(bundle)))
            try:
                sourceRepos.createChangeSetFile(jobList,
                                                tmpName,
                                                recurse=False,
                                                callback=callback,
                                                mirrorMode=True)
            except changeset.ChangeSetKeyConflictError:
                splitJobList(jobList,
                             sourceRepos,
                             targetSet,
                             hidden=hidden,
                             callback=callback)
            else:
                for target in targetSet:
                    target.commitChangeSetFile(tmpName,
                                               hidden=hidden,
                                               callback=callback)
            try:
                os.unlink(tmpName)
            except OSError:
                pass
            callback.done()
        updateCount += len(bundle)
        # compute the max mark of the bundles we comitted
        mark = max([min([x[0] for x in bundle]) for bundle in bundles])
        if mark > bundlesMark:
            bundlesMark = mark
    else:  # only when we're all done looping advance mark to the new max
        if bundlesMark == 0 or bundlesMark <= currentMark:
            bundlesMark = crtMaxMark  # avoid repeating the same query...
        for target in targets:
            if hidden:  # if we've hidden the last commits, show them now
                target.presentHiddenTroves()
            target.setMirrorMark(bundlesMark)
    # mirroring removed troves requires one by one processing
    for target in targets:
        copySet = removedSet.copy()
        updateCount += mirrorRemoved(referenceRepos,
                                     target.repo,
                                     copySet,
                                     test=test,
                                     callback=callback)
    # if this was a noop because the removed troves were already mirrored
    # we need to keep going
    if updateCount == 0 and len(removedSet):
        for target in targets:
            target.setMirrorMark(crtMaxMark)
        return -1
    return updateCount
Esempio n. 45
0
 def dbg(self, *args, **kwargs):
     args = self._addClassName(args)
     log.debug(*args, **kwargs)
Esempio n. 46
0
def getTroveList(src, cfg, mark):
    # FIXME: getNewTroveList should accept and only return troves on
    # the labels we're interested in
    log.debug("looking for new troves")
    # make sure we always treat the mark as an integer
    troveList = [(long(m), (n, v, f), t)
                 for m, (n, v,
                         f), t in src.getNewTroveList(cfg.host, str(mark))]
    if not len(troveList):
        # this should be the end - no more troves to look at
        log.debug("no new troves found")
        return (mark, [])
    # we need to protect ourselves from duplicate items in the troveList
    l = len(troveList)
    troveList = list(set(troveList))
    if len(troveList) < l:
        l = len(troveList)
        log.debug("after duplicate elimination %d troves are left",
                  len(troveList))
    # if we filter out the entire list of troves we have been
    # returned, we need to tell the caller what was the highest mark
    # we had so it can continue asking for more
    maxMark = max([x[0] for x in troveList])
    # filter out troves on labels and parse through matchTroves
    troveList = [x for x in troveList if _filterTup(x[1], cfg)]
    if len(troveList) < l:
        l = len(troveList)
        log.debug("after label filtering and matchTroves %d troves are left",
                  l)
        if not troveList:
            return (maxMark, [])
    # sort deterministically by mark, version, flavor, reverse name
    troveList.sort(lambda a, b: cmp(a[0], b[0]) or cmp(a[1][1], b[1][1]) or
                   cmp(a[1][2], b[1][2]) or cmp(b[1][0], a[1][0]))
    log.debug("%d new troves returned", len(troveList))
    # We cut off the last troves that have the same flavor, version to
    # avoid committing an incomplete trove. This could happen if the
    # server side only listed some of a trove's components due to
    # server side limits on how many results it can return on each query
    lastIdx = len(troveList) - 1
    # compare with the last one
    ml, (nl, vl, fl), tl = troveList[-1]
    while lastIdx >= 0:
        lastIdx -= 1
        m, (n, v, f), t = troveList[lastIdx]
        if v == vl and f == fl:
            continue
        lastIdx += 1
        break
    # the min mark of the troves we skip has to be higher than max
    # mark of troves we'll commit or otherwise we'll skip them for good...
    if lastIdx >= 0:
        firstMark = max([x[0] for x in troveList[:lastIdx]])
        lastMark = min([x[0] for x in troveList[lastIdx:]])
        if lastMark > firstMark:
            troveList = troveList[:lastIdx]
            log.debug("reduced new trove list to %d to avoid partial commits",
                      len(troveList))
    # since we're returning at least on trove, the caller will make the next mark decision
    return (mark, troveList)
Esempio n. 47
0
def mirrorRepository(sourceRepos, targetRepos, cfg,
                     test = False, sync = False, syncSigs = False,
                     callback = ChangesetCallback(),
                     fastSync = False,
                     referenceRepos=None,
                     ):
    if referenceRepos is None:
        referenceRepos = sourceRepos
    checkConfig(cfg)
    targets = _makeTargets(cfg, targetRepos, test)
    log.debug("-" * 20 + " start loop " + "-" * 20)

    hidden = len(targets) > 1 or cfg.useHiddenCommits
    if hidden:
        log.debug("will use hidden commits to synchronize target mirrors")

    if sync:
        currentMark = -1
    else:
        marks = [ t.getMirrorMark() for t in targets ]
        # we use the oldest mark as a starting point (since we have to
        # get stuff from source for that oldest one anyway)
        currentMark = min(marks)
    log.debug("using common mirror mark %s", currentMark)
    # reset mirror mark to the lowest common denominator
    for t in targets:
        if t.getMirrorMark() != currentMark:
            t.setMirrorMark(currentMark)
    # mirror gpg signatures from the src into the targets
    for t in targets:
        t.mirrorGPG(referenceRepos, cfg.host)
    # mirror changed trove information for troves already mirrored
    if fastSync:
        updateCount = 0
        log.debug("skip trove info records sync because of fast-sync")
    else:
        updateCount = mirrorTroveInfo(referenceRepos, targets, currentMark,
                cfg, syncSigs)
    newMark, troveList = getTroveList(referenceRepos, cfg, currentMark)
    if not troveList:
        if newMark > currentMark: # something was returned, but filtered out
            for t in targets:
                t.setMirrorMark(newMark)
            return -1 # call again
        return 0
    # prepare a new max mark to be used when we need to break out of a loop
    crtMaxMark = max(long(x[0]) for x in troveList)
    if currentMark > 0 and crtMaxMark == currentMark:
        # if we're hung on the current max then we need to
        # forcibly advance the mark in case we're stuck
        crtMaxMark += 1 # only used if we filter out all troves below
    initTLlen = len(troveList)

    # removed troves are a special blend - we keep them separate
    removedSet  = set([ x[1] for x in troveList if x[2] == trove.TROVE_TYPE_REMOVED ])
    troveList = [ (x[0], x[1]) for x in troveList if x[2] != trove.TROVE_TYPE_REMOVED ]

    # figure out if we need to recurse the group-troves
    if cfg.recurseGroups:
        # avoid adding duplicates
        troveSetList = set([x[1] for x in troveList])
        for mark, (name, version, flavor) in troveList:
            if trove.troveIsGroup(name):
                recTroves = recurseTrove(referenceRepos, name,
                        version, flavor, callback=callback)

                # add sources here:
                if cfg.includeSources:
                    troveInfo = referenceRepos.getTroveInfo(
                        trove._TROVEINFO_TAG_SOURCENAME, recTroves)
                    sourceComps = set()
                    for nvf, source in itertools.izip(recTroves, troveInfo):
                        sourceComps.add((source(), nvf[1].getSourceVersion(),
                                         parseFlavor('')))
                    recTroves.extend(sourceComps)

                # add the results at the end with the current mark
                for (n, v, f) in recTroves:
                    if (n, v, f) not in troveSetList:
                        troveList.append((mark, (n, v, f)))
                        troveSetList.add((n, v, f))
        log.debug("after group recursion %d troves are needed", len(troveList))
        # we need to make sure we mirror the GPG keys of any newly added troves
        newHosts = set([x[1].getHost() for x in troveSetList.union(removedSet)])
        for host in newHosts.difference(set([cfg.host])):
            for t in targets:
                t.mirrorGPG(referenceRepos, host)

    # we check which troves from the troveList are needed on each
    # target and we split the troveList into separate lists depending
    # on how many targets require each
    byTarget = {}
    targetSetList = []
    if len(troveList):
        byTrove = {}
        for i, target in enumerate(targets):
            for t in target.addTroveList(troveList):
                bt = byTrove.setdefault(t, set())
                bt.add(i)
        # invert the dict by target now
        for trv, ts in byTrove.iteritems():
            targetSet = [ targets[i] for i in ts ]
            try:
                targetIdx = targetSetList.index(targetSet)
            except ValueError:
                targetSetList.append(targetSet)
                targetIdx = len(targetSetList)-1
            bt = byTarget.setdefault(targetIdx, [])
            bt.append(trv)
        del byTrove
    # if we were returned troves, but we filtered them all out, advance the
    # mark and signal "try again"
    if len(byTarget) == 0 and len(removedSet) == 0 and initTLlen:
        # we had troves and now we don't
        log.debug("no troves found for our label %s" % cfg.labels)
        for t in targets:
            t.setMirrorMark(crtMaxMark)
        # try again
        return -1

    # now we get each section of the troveList for each targetSet. We
    # start off mirroring by those required by fewer targets, using
    # the assumption that those troves are what is required for the
    # targets to catch up to a common set
    if len(byTarget) > 1:
        log.debug("split %d troves into %d chunks by target", len(troveList), len(byTarget))
    # sort the targetSets by length
    targetSets = list(enumerate(targetSetList))
    targetSets.sort(lambda a,b: cmp(len(a[1]), len(b[1])))
    bundlesMark = 0
    for idx, targetSet in targetSets:
        troveList = byTarget[idx]
        if not troveList: # XXX: should not happen...
            continue
        log.debug("mirroring %d troves into %d targets", len(troveList), len(targetSet))
        # since these troves are required for all targets, we can use
        # the "first" one to build the relative changeset requests
        target = list(targetSet)[0]
        bundles = buildBundles(sourceRepos, target, troveList, cfg.absoluteChangesets)
        for i, bundle in enumerate(bundles):
            jobList = [ x[1] for x in bundle ]
            # XXX it's a shame we can't give a hint as to what server to use
            # to avoid having to open the changeset and read in bits of it
            if test:
                log.debug("test mode: not mirroring (%d of %d) %s" % (i + 1, len(bundles), jobList))
                updateCount += len(bundle)
                continue
            (outFd, tmpName) = util.mkstemp()
            os.close(outFd)
            log.debug("getting (%d of %d) %s" % (i + 1, len(bundles), displayBundle(bundle)))
            try:
                sourceRepos.createChangeSetFile(jobList, tmpName, recurse = False,
                                                callback = callback, mirrorMode = True)
            except changeset.ChangeSetKeyConflictError:
                splitJobList(jobList, sourceRepos, targetSet, hidden=hidden,
                             callback=callback)
            else:
                for target in targetSet:
                    target.commitChangeSetFile(tmpName, hidden=hidden, callback=callback)
            try:
                os.unlink(tmpName)
            except OSError:
                pass
            callback.done()
        updateCount += len(bundle)
        # compute the max mark of the bundles we comitted
        mark = max([min([x[0] for x in bundle]) for bundle in bundles])
        if mark > bundlesMark:
            bundlesMark = mark
    else: # only when we're all done looping advance mark to the new max
        if bundlesMark == 0 or bundlesMark <= currentMark:
            bundlesMark = crtMaxMark # avoid repeating the same query...
        for target in targets:
            if hidden: # if we've hidden the last commits, show them now
                target.presentHiddenTroves()
            target.setMirrorMark(bundlesMark)
    # mirroring removed troves requires one by one processing
    for target in targets:
        copySet = removedSet.copy()
        updateCount += mirrorRemoved(referenceRepos, target.repo, copySet,
                                     test=test, callback=callback)
    # if this was a noop because the removed troves were already mirrored
    # we need to keep going
    if updateCount == 0 and len(removedSet):
        for target in targets:
            target.setMirrorMark(crtMaxMark)
        return -1
    return updateCount
Esempio n. 48
0
def getTroveList(src, cfg, mark):
    # FIXME: getNewTroveList should accept and only return troves on
    # the labels we're interested in
    log.debug("looking for new troves")
    # make sure we always treat the mark as an integer
    troveList = [(long(m), (n,v,f), t) for m, (n,v,f), t in
                  src.getNewTroveList(cfg.host, str(mark))]
    if not len(troveList):
        # this should be the end - no more troves to look at
        log.debug("no new troves found")
        return (mark, [])
    # we need to protect ourselves from duplicate items in the troveList
    l = len(troveList)
    troveList = list(set(troveList))
    if len(troveList) < l:
        l = len(troveList)
        log.debug("after duplicate elimination %d troves are left", len(troveList))
    # if we filter out the entire list of troves we have been
    # returned, we need to tell the caller what was the highest mark
    # we had so it can continue asking for more
    maxMark = max([x[0] for x in troveList])
    # filter out troves on labels and parse through matchTroves
    troveList = [ x for x in troveList if _filterTup(x[1],cfg) ]
    if len(troveList) < l:
        l = len(troveList)
        log.debug("after label filtering and matchTroves %d troves are left", l)
        if not troveList:
            return (maxMark, [])
    # sort deterministically by mark, version, flavor, reverse name
    troveList.sort(lambda a,b: cmp(a[0], b[0]) or
                   cmp(a[1][1], b[1][1]) or
                   cmp(a[1][2], b[1][2]) or
                   cmp(b[1][0], a[1][0]) )
    log.debug("%d new troves returned", len(troveList))
    # We cut off the last troves that have the same flavor, version to
    # avoid committing an incomplete trove. This could happen if the
    # server side only listed some of a trove's components due to
    # server side limits on how many results it can return on each query
    lastIdx = len(troveList)-1
    # compare with the last one
    ml, (nl,vl,fl), tl = troveList[-1]
    while lastIdx >= 0:
        lastIdx -= 1
        m, (n,v,f), t = troveList[lastIdx]
        if v == vl and f == fl:
            continue
        lastIdx += 1
        break
    # the min mark of the troves we skip has to be higher than max
    # mark of troves we'll commit or otherwise we'll skip them for good...
    if lastIdx >= 0:
        firstMark = max([x[0] for x in troveList[:lastIdx]])
        lastMark = min([x[0] for x in troveList[lastIdx:]])
        if lastMark > firstMark:
            troveList = troveList[:lastIdx]
            log.debug("reduced new trove list to %d to avoid partial commits", len(troveList))
    # since we're returning at least on trove, the caller will make the next mark decision
    return (mark, troveList)
Esempio n. 49
0
 def presentHiddenTroves(self, newMark):
     log.debug("%s unhiding comitted troves", self.name)
     self.repo.presentHiddenTroves(self.cfg.host)
     self.setMirrorMark(newMark)
Esempio n. 50
0
 def presentHiddenTroves(self):
     log.debug("%s unhiding comitted troves", self.name)
     self.repo.presentHiddenTroves(self.cfg.host)
Esempio n. 51
0
    return trovetup.TroveSpec(specStr,
        allowEmptyName=allowEmptyName, withFrozenFlavor=withFrozenFlavor)

def _getChangeSet(path):
        try:
            cs = changeset.ChangeSetFromFile(path)
        except BadContainer, msg:
            # ensure that it is obvious that a file is being referenced
            if path[0] not in './':
                path = './' + path
            log.error("'%s' is not a valid conary changeset: %s" %
                      (path, msg))
            # XXX sys.exit is gross
            import sys
            sys.exit(1)
        log.debug("found changeset file %s" % path)
        return cs

def parseUpdateList(updateList, keepExisting, updateByDefault=True,
        withFrozenFlavor=False):
    # If keepExisting is true, we want our specifications to be relative
    # to nothing. If it's false, they should be absolute as updateChangeSet
    # interperts absolute jobs as ones which should be rooted (if there is
    # anything available to root them to).

    areAbsolute = not keepExisting

    applyList = []

    if type(updateList) is str:
        updateList = ( updateList, )
Esempio n. 52
0
    def filterSuggestions(self, depList, sugg, suggMap):
        """
            Given a list of several suggestions for one dependency,
            pick the dep that matches the best.
        """
        troves = set()

        for (troveTup, depSet) in depList:
            choicesBySolution = {}
            seen = set()
            if depSet in sugg:
                suggList = set()
                choicesAndDep = itertools.izip(sugg[depSet],
                                               depSet.iterDeps(sort=True))
                for choiceList, (depClass, dep) in choicesAndDep:
                    troveNames = set(x[0] for x in choiceList)

                    if self.db:
                        affTroveDict = \
                            dict((x, self.db.trovesByName(x))
                                 for x in troveNames)
                    else:
                        affTroveDict = dict.fromkeys(troveNames, {})

                    # iterate over flavorpath -- use suggestions
                    # from first flavor on flavorpath that gets a match
                    for installFlavor in self.flavor:
                        choice = self.selectResolutionTrove(troveTup, dep,
                                                            depClass,
                                                            choiceList,
                                                            installFlavor,
                                                            affTroveDict)
                        if choice:
                            suggList.add(choice)
                            l = suggMap.setdefault(troveTup, set())
                            l.add(choice)

                            if choice not in seen:
                                if choice not in choicesBySolution:
                                    d = deps.DependencySet()
                                    choicesBySolution[choice] = d
                                else:
                                    d = choicesBySolution[choice]
                                d.addDep(depClass, dep)
                            break

                if choicesBySolution:
                    for choice, depSet in sorted(choicesBySolution.iteritems()):
                        seen.add(choice)
                        depSet = str(depSet).split('\n')
                        if len(depSet) > 5:
                            depSet = depSet[0:5] + ['...']
                        depSet = '\n               '.join(depSet)
                        log.debug('Resolved:\n'
                                  '    %s=%s/%s[%s]\n'
                                  '    Required:  %s\n'
                                  '    Adding: %s=%s/%s[%s]',
                                     troveTup[0], troveTup[1].trailingLabel(), troveTup[1].trailingRevision(),troveTup[2], depSet, choice[0], choice[1].trailingLabel(), choice[1].trailingRevision(), choice[2])

                troves.update([ (x[0], (None, None), x[1:], True)
                                for x in suggList ])


        return troves
Esempio n. 53
0
 def setMirrorMark(self, mark):
     self.mark = str(long(mark))
     log.debug("%s setting mirror mark to %s", self.name, self.mark)
     if self.test:
         return
     self.repo.setMirrorMark(self.cfg.host, self.mark)
Esempio n. 54
0
 def setMirrorMark(self, mark):
     self.mark = str(long(mark))
     log.debug("%s setting mirror mark to %s", self.name, self.mark)
     if self.test:
         return
     self.repo.setMirrorMark(self.cfg.host, self.mark)
Esempio n. 55
0
 def repair(self):
     if not self._status:
         log.debug("no errors detected on check run, nothing to fix")
         return True
     return self.fix()
Esempio n. 56
0
 def presentHiddenTroves(self):
     log.debug("%s unhiding comitted troves", self.name)
     self.repo.presentHiddenTroves(self.cfg.host)