示例#1
0
def reorderChangesets(f, csPath, initialSizes, maxisosize, isoblocksize,
                      baseTrove):
    reservedTroves = []
    sizedList = []
    infoTroves = []
    baseTroves = []
    for line in f:
        csFile = line.split()[0]
        trvName = line.split()[1]
        if trvName in basicminimal:
            reservedTroves.append(line)
        spaceUsed = spaceused(join(csPath, csFile), isoblocksize)
        if trvName.startswith('info-'):
            infoTroves.append((spaceUsed, line))
        elif trvName == baseTrove:
            baseTroves.append((spaceUsed, line))
        else:
            sizedList.append((spaceUsed, line))
    sizedList = [x for x in reversed(sorted(sizedList))]

    reservedList = []
    if reservedTroves:
        trvNames = set()
        for trvLine in reservedTroves:
            trvNames.add(trvLine.split()[1])
            cs = changeset.ChangeSetFromFile( \
                join(csPath, trvLine.split()[0]))
            trv = Trove([x for x in cs.iterNewTroveList()][0])
            for includedTrv in [x[0].split(':')[0] for x in \
                                trv.iterTroveList(strongRefs = True,
                                                  weakRefs = True)]:
                trvNames.add(includedTrv)

        for size, line in sizedList[:]:
            if line.split()[1] in trvNames:
                reservedList.append((size, line))
                sizedList.remove((size, line))

    sizedList = infoTroves + reservedList + baseTroves + sizedList

    reOrdList = [[[], maxisosize - initialSizes[0]]]

    for size, line in sizedList:
        match = False
        for i, (changesets, limit) in enumerate(reOrdList):
            if maxisosize and (size <= limit):
                reOrdList[i][0].append(line)
                reOrdList[i][1] -= size
                match = True
                break
        if not match:
            reOrdList.append([[line], maxisosize - size - initialSizes[1]])

    csList = []
    for disc in [x[0] for x in reOrdList]:
        csList.extend(disc)
    return csList
示例#2
0
def iter_new_troves(changeset, helper):
    '''
    Take a changeset and yield trove objects corresponding to the new
    versions of all troves in that changeset. This involves fetching old
    troves and applying the changeset to them to produce the new troves.
    '''

    # First get a list of all relative trove changesets
    old_troves = []
    for trove_cs in changeset.iterNewTroveList():
        if trove_cs.getOldVersion():
            old_troves.append(trove_cs.getOldNameVersionFlavor())

    # Now fetch trove objects corresponding to old versions
    old_dict = {}
    if old_troves:
        for old_trove in helper.getRepos().getTroves(old_troves):
            old_dict.setdefault(old_trove.getNameVersionFlavor(),
                                []).append(old_trove)

    # Iterate over changeset again, yielding new trove objects
    for trove_cs in changeset.iterNewTroveList():
        if trove_cs.getOldVersion():
            trv = old_dict[trove_cs.getOldNameVersionFlavor()].pop()
            trv.applyChangeSet(trove_cs)
            yield trv
        else:
            yield Trove(trove_cs)
示例#3
0
 def _logDifferenceInPrebuiltReqs(self, trv, buildReqTups, preBuiltReqs):
     existsTrv = Trove('@update',  
                            versions.NewVersion(),
                            deps.Flavor(), None)
     availableTrv = Trove('@update', 
                                versions.NewVersion(), 
                                deps.Flavor(), None)
     for troveNVF in preBuiltReqs:
         existsTrv.addTrove(*troveNVF)
     for troveNVF in buildReqTups:
         availableTrv.addTrove(*troveNVF)
     jobs = availableTrv.diff(existsTrv)[2]
     formatter = display.JobTupFormatter(affinityDb=None)
     formatter.dcfg.setTroveDisplay(fullVersions=True,
                                    fullFlavors=True,
                                    showComponents=True)
     formatter.dcfg.setJobDisplay(compressJobs=True)
     formatter.prepareJobLists([jobs])
     self.logger.info('Could count %s=%s[%s]{%s} as prebuilt - the'
                      ' following changes have been made in its'
                      ' buildreqs:' % trv.getNameVersionFlavor(
                                                         withContext=True))
     for line in formatter.formatJobTups(jobs):
         self.logger.info(line)
     self.logger.info('...Rebuilding')
示例#4
0
    def _fetchOldChangeSets(self):
        """
        Fetch old versions of each trove, where they can be found
        and are suitably sane.
        """
        versionSpecs = []
        latestSpecs = []
        targetLabel = self.helper.plan.getTargetLabel()
        for package in self.packages:
            version = macro.expand(package.getBaseVersion(), package)
            versionSpecs.append((package.getName(),
                '%s/%s' % (targetLabel, version), None))
            latestSpecs.append((package.getName(), str(targetLabel), None))

        # Pick the new version for each package by querying all existing
        # versions (including markremoved ones) with the same version.
        results = self.helper.getRepos().findTroves(None, versionSpecs,
            allowMissing=True, getLeaves=False,
            troveTypes=trovesource.TROVE_QUERY_ALL)
        for package, (recipeText, recipeObj), query in zip(
                self.packages, self.recipes, versionSpecs):
            newVersion = _createVersion(package, self.helper,
                    recipeObj.version)
            existingVersions = [x[1] for x in results.get(query, ())]
            while newVersion in existingVersions:
                newVersion.incrementSourceCount()
            package.nextVersion = newVersion
        # Grab the latest existing version so we can reuse autosources from it
        results = self.helper.getRepos().findTroves(None, latestSpecs,
                allowMissing=True)
        toGet = []
        oldVersions = []
        for package, query in zip(self.packages, latestSpecs):
            if not results.get(query):
                oldVersions.append(None)
                continue
            n, v, f = max(results[query])
            toGet.append((n, (None, None), (v, f), True))
            oldVersions.append((n, v, f))

        self.oldChangeSet = self.helper.createChangeSet(toGet)
        for package, oldVersion in zip(self.packages, oldVersions):
            if oldVersion:
                trvCs = self.oldChangeSet.getNewTroveVersion(*oldVersion)
                self.oldTroves.append(Trove(trvCs))
            else:
                self.oldTroves.append(None)
示例#5
0
文件: shadow.py 项目: sassoftware/bob
def _makeSourceTrove(package, helper):
    cs = ChangeSet()
    filesToAdd = {}
    ver = macro.expand(package.getBaseVersion(), package)
    version = _createVersion(package, helper, ver)
    latestSpec = (package.getName(), str(version.trailingLabel()), None)
    results = helper.getRepos().findTroves(None,[latestSpec],allowMissing=True,
                        getLeaves=False,troveTypes=trovesource.TROVE_QUERY_ALL)
    if results:
        existingVersions = [x[1] for x in results.get(latestSpec, ())]
        while version in existingVersions:
            version.incrementSourceCount()

    new = Trove(package.name, version, deps.Flavor())
    new.setFactory(package.targetConfig.factory)
    message = "Temporary Source for %s" % version
    message = message.rstrip() + "\n"
    new.changeChangeLog(ChangeLog(name=helper.cfg.name,
                contact=helper.cfg.contact, message=message))
    for path, contents in package.recipeFiles.iteritems():
        isText = path == package.getRecipeName()
        pathId = hashlib.md5(path).digest()
        fileHelper = filetypes.RegularFile(contents=contents,
                    config=isText)
        fileStream = fileHelper.get(pathId)
        fileStream.flags.isSource(set=True)
        fileId = fileStream.fileId()
        fileVersion = new.getVersion()
        key = pathId + fileId
        filesToAdd[key] = (fileStream, fileHelper.contents, isText)
        new.addFile(pathId, path, fileVersion, fileId)
    new.invalidateDigests()
    new.computeDigests()
    for key, (fileObj, fileContents, cfgFile) in filesToAdd.items():
        cs.addFileContents(fileObj.pathId(), fileObj.fileId(),
                    ChangedFileTypes.file, fileContents, cfgFile)
        cs.addFile(None, fileObj.fileId(), fileObj.freeze())
    cs.newTrove(new.diff(None, absolute=True)[0])
    return new.getNameVersionFlavor(), cs
示例#6
0
文件: shadow.py 项目: sassoftware/bob
    def _merge(self):
        changeSet = ChangeSet()
        deleteDirs = set()
        doCommit = False
        # If this is not None then all ephemeral sources will still be fetched
        # but will be placed in this directory instead.
        if self.helper.plan.ephemeralSourceDir:
            ephDir = self.helper.makeEphemeralDir()
        else:
            ephDir = None

        def _addFile(path, contents, isText):
            if path in oldFiles:
                # Always recycle pathId if available.
                pathId, _, oldFileId, oldFileVersion = oldFiles[path]
            else:
                pathId = hashlib.md5(path).digest()
                oldFileId = oldFileVersion = None

            fileHelper = filetypes.RegularFile(contents=contents,
                    config=isText)
            fileStream = fileHelper.get(pathId)
            fileStream.flags.isSource(set=True)
            fileId = fileStream.fileId()

            # If the fileId matches, recycle the fileVersion too.
            if fileId == oldFileId:
                fileVersion = oldFileVersion
            else:
                fileVersion = newTrove.getVersion()

            filesToAdd[fileId] = (fileStream, fileHelper.contents, isText)
            newTrove.addFile(pathId, path, fileVersion, fileId)

        for package, (recipeText, recipeObj), oldTrove in zip(
                self.packages, self.recipes, self.oldTroves):

            filesToAdd = {}
            oldFiles = {}
            if oldTrove is not None:
                for pathId, path, fileId, fileVer in oldTrove.iterFileList():
                    oldFiles[path] = (pathId, path, fileId, fileVer)
            newTrove = Trove(package.name, package.nextVersion, deps.Flavor())
            newTrove.setFactory(package.targetConfig.factory)

            # Add upstream files to new trove. Recycle pathids from the old
            # version.
            # LAZY: assume that everything other than the recipe is binary.
            # Conary has a magic module, but it only accepts filenames!
            for path, contents in package.recipeFiles.iteritems():
                isText = path == package.getRecipeName()
                _addFile(path, contents, isText)

            # Collect requested auto sources from recipe. Unknown recipe types
            # will not be loaded so recipeObj will be the class, so assume
            # these have no sources.
            if not inspect.isclass(recipeObj):
                recipeFiles = dict((os.path.basename(x.getPath()), x)
                    for x in recipeObj.getSourcePathList())
                newFiles = set(x[1] for x in newTrove.iterFileList())

                needFiles = set(recipeFiles) - newFiles
                for autoPath in needFiles:
                    source = recipeFiles[autoPath]
                    if getattr(source, 'contents', None
                            ) and not source.sourcename:
                        # Ignore trove scripts that have inline contents
                        continue
                    if not autoPath:
                        log.error("bob does not support 'gussed' filenames; "
                                "please use a full path for source '%s' in "
                                "package %s", source.getPath(), package.name)
                        raise RuntimeError("Unsupported source action")
                    if (autoPath in oldFiles
                            and not self.helper.plan.refreshSources
                            and not source.ephemeral):
                        # File exists in old version.
                        pathId, path, fileId, fileVer = oldFiles[autoPath]
                        newTrove.addFile(pathId, path, fileVer, fileId)
                        continue

                    if source.ephemeral and not ephDir:
                        continue

                    # File doesn't exist; need to create it.
                    if source.ephemeral:
                        laUrl = lookaside.laUrl(source.getPath())
                        tempDir = joinPaths(ephDir,
                                os.path.dirname(laUrl.filePath()))
                        mkdirChain(tempDir)
                    else:
                        tempDir = tempfile.mkdtemp()
                        deleteDirs.add(tempDir)
                    snapshot = _getSnapshot(self.helper, package, source,
                            tempDir)

                    if not source.ephemeral and snapshot:
                        autoPathId = hashlib.md5(autoPath).digest()
                        autoObj = FileFromFilesystem(snapshot, autoPathId)
                        autoObj.flags.isAutoSource(set=True)
                        autoObj.flags.isSource(set=True)
                        autoFileId = autoObj.fileId()

                        autoContents = filecontents.FromFilesystem(snapshot)
                        filesToAdd[autoFileId] = (autoObj, autoContents, False)
                        newTrove.addFile(autoPathId, autoPath,
                            newTrove.getVersion(), autoFileId)

            # If the old and new troves are identical, just use the old one.
            if oldTrove and _sourcesIdentical(
                    oldTrove, newTrove, [self.oldChangeSet, filesToAdd]):
                package.setDownstreamVersion(oldTrove.getVersion())
                log.debug('Skipped %s=%s', oldTrove.getName(),
                        oldTrove.getVersion())
                continue

            # Add files and contents to changeset.
            for fileId, (fileObj, fileContents, cfgFile) in filesToAdd.items():
                changeSet.addFileContents(fileObj.pathId(), fileObj.fileId(),
                    ChangedFileTypes.file, fileContents, cfgFile)
                changeSet.addFile(None, fileObj.fileId(), fileObj.freeze())

            # Create a changelog entry.
            changeLog = ChangeLog(
                name=self.helper.cfg.name, contact=self.helper.cfg.contact,
                message=self.helper.plan.commitMessage + '\n')
            newTrove.changeChangeLog(changeLog)

            # Calculate trove digests and add the trove to the changeset
            newTrove.invalidateDigests()
            newTrove.computeDigests()
            newTroveCs = newTrove.diff(None, absolute=True)[0]
            changeSet.newTrove(newTroveCs)
            doCommit = True

            package.setDownstreamVersion(newTrove.getVersion())
            log.debug('Created %s=%s', newTrove.getName(), newTrove.getVersion())

        if doCommit:
            cook.signAbsoluteChangesetByConfig(changeSet, self.helper.cfg)
            f = tempfile.NamedTemporaryFile(dir=os.getcwd(), suffix='.ccs',
                    delete=False)
            f.close()
            changeSet.writeToFile(f.name)
            try:
                self.helper.getRepos().commitChangeSet(changeSet)
            except:
                log.error("Error committing changeset to repository, "
                        "failed changeset is saved at %s", f.name)
                raise
            else:
                os.unlink(f.name)

        for path in deleteDirs:
            shutil.rmtree(path)
示例#7
0
文件: commit.py 项目: pombreda/rmake3
def commitJobs(conaryclient, jobList, reposName, message=None,
               commitOutdatedSources=False, sourceOnly = False,
               excludeSpecs=None, writeToFile=None):
    jobsToCommit = {}
    alreadyCommitted = []
    finalCs = changeset.ReadOnlyChangeSet()
    mapping = {}
    for job in jobList:
        if job.isCommitted():
            alreadyCommitted.append(job)
        else:
            jobsToCommit[job.jobId] = job
    jobsToCommit = jobsToCommit.values() # dedup job list

    if not jobsToCommit:
        err = 'Job(s) already committed'
        return False, err

    allTroves = []
    trovesByBranch = {}
    alreadyCommitted = False
    for job in jobsToCommit:
        mapping[job.jobId] = {}
        for trove in job.iterTroves():
            allTroves.append(trove)
            troveVersion = trove.getVersion()
            if troveVersion.getHost() == reposName:
                if not troveVersion.branch().hasParentBranch():
                    message = ('Cannot commit filesystem cook %s - '
                               ' nowhere to commit to!' % trove.getName())
                    return False, message
    assert(allTroves)
    source = trovesource.SimpleTroveSource()
    if excludeSpecs:
        excludeSpecsWithContext = {}
        troveMap = {}
        for excludeSpec in excludeSpecs:
            if len(excludeSpec) == 4:
                context = excludeSpec[3]
            else:
                context = None

            excludeSpecsWithContext.setdefault(
                                        excludeSpec[:3], []).append(context)
        excludeSpecs = [ x[:3] for x in excludeSpecs ]

        for trove in allTroves:
            troveTup = (trove.getName().split(':')[0],
                        trove.getVersion(),
                        trove.getFlavor())
            source.addTrove(*troveTup)
            troveMap.setdefault(troveTup, []).append(trove)

        source.searchAsDatabase()
        matches = source.findTroves(None, excludeSpecs, None, allowMissing=True)
        trvMatches = []
        for excludeSpec, matchList in matches.iteritems():
            contexts = excludeSpecsWithContext[excludeSpec]
            for match in matchList:
                for trv in troveMap[match]:
                    if trv.context in contexts or None in contexts:
                        trvMatches.append(trv)

        allTroves = [ x for x in allTroves if x not in trvMatches ]
        if not allTroves:
            message = ('All troves excluded - not committing')
            return False, message

    repos = conaryclient.getRepos()

    trovesByNBF = {}
    sourcesToCheck = []
    branchMap = {}
    trovesToClone = []
    for trove in allTroves:
        builtTroves = list(trove.iterBuiltTroves())
        if not builtTroves:
            continue
        if builtTroves[0][1].getHost() != reposName:
            alreadyCommitted = True
            for n,v,f in builtTroves:
                trovesByNBF[n, v.branch(), f] = (trove, v)
            continue

        troveVersion = trove.getVersion()
        if troveVersion.getHost() == reposName:
            sourceTup = (trove.getName(), troveVersion, Flavor())
            targetBranch = troveVersion.branch().parentBranch()
            branchMap[troveVersion.branch()] = targetBranch
            nbf = trove.getName(), targetBranch, Flavor()
            if nbf in trovesByNBF:
                if trovesByNBF[nbf][1] != troveVersion:
                    badVersion = trovesByNBF[nbf][1]
                    return False, ("Cannot commit two different versions of source component %s:"
                                   " %s and %s" % (trove.getName(), troveVersion, badVersion))
            trovesByNBF[nbf] = trove, troveVersion
            sourcesToCheck.append(sourceTup)
        if sourceOnly:
            continue

        for troveTup in builtTroves:
            branch = troveTup[1].branch()
            targetBranch = branch.parentBranch()
            # add mapping so that when the cloning is done
            # we can tell what commit resulted in what binaries.
            nbf = (troveTup[0], targetBranch, troveTup[2])
            if nbf in trovesByNBF:
                otherBinary = trovesByNBF[nbf][0].getBinaryTroves()[0]
                if otherBinary[1].branch() == targetBranch:
                    # this one's already committed.
                    break
                # discard the later of the two commits.
                if trovesByNBF[nbf][0].getVersion() > trove.getVersion():
                    # we're the earlier one
                    badTrove, badVersion = trovesByNBF[nbf]
                    newTrove = trove
                    newVersion = troveTup[1]
                else:
                    badTrove = trove
                    badVersion = troveTup[1]
                    newTrove, newVersion = trovesByNBF[nbf]
                name = nbf[0]
                flavor = nbf[2]

                skipped = []
                for badTroveTup in badTrove.iterBuiltTroves():
                    badNbf = (badTroveTup[0], targetBranch, badTroveTup[2])
                    if not ':' in badTroveTup[0]:
                        skipped.append(badTroveTup[0])

                    if badNbf in trovesByNBF and badTrove is trovesByNBF[badNbf][0]:
                        del trovesByNBF[badNbf]

                skipped = '%s' % (', '.join(skipped))
                log.warning("Not committing %s on %s[%s]%s - overridden by"
                            " %s[%s]%s" % (skipped, badTroveTup[1],
                             badTroveTup[2],
                             badTrove.getContextStr(), newVersion, flavor,
                             newTrove.getContextStr()))
                if trove is badTrove:
                    break

            trovesByNBF[nbf] = trove, troveTup[1]
            branchMap[branch] = targetBranch

    for nbf, (trove, tupVersion) in trovesByNBF.items():
        if tupVersion.branch() != nbf[1]:
            trovesToClone.append((nbf[0], tupVersion, nbf[2]))

    if not trovesToClone:
        if sourceOnly:
            err = 'Could not find sources to commit'
        elif alreadyCommitted:
            log.warning('All built troves have already been committed')
            return True, {}
        else:
            err = 'Can only commit built troves, none found'
        return False, err
    if sourcesToCheck and not commitOutdatedSources:
        outdated = _checkOutdatedSources(repos, sourcesToCheck)
        if outdated:
            outdated = ( '%s=%s (replaced by newer %s)' \
                         % (name, builtVer, newVer.trailingRevision())
                         for (name, builtVer, newVer) in outdated)
            err = ('The following source troves are out of date:\n%s\n\n'
                   'Use --commit-outdated-sources to commit anyway' %
                   '\n'.join(outdated))
            return False, err

    # only update build info if we'll be okay if some buildreqs are not 
    # updated
    callback = callbacks.CloneCallback(conaryclient.cfg, message)
    passed, cs = conaryclient.createTargetedCloneChangeSet(
                                        branchMap, trovesToClone,
                                        updateBuildInfo=True,
                                        cloneSources=False,
                                        trackClone=False,
                                        callback=callback, fullRecurse=False)
    if passed:
        oldTroves = []
        for troveCs in cs.iterNewTroveList():
            if troveCs.getOldVersion():
                oldTroves.append(troveCs.getOldNameVersionFlavor())
        if oldTroves:
            oldDict = {}
            for oldTrove in repos.getTroves(oldTroves):
                oldDict.setdefault(oldTrove.getNameVersionFlavor(),
                                    []).append(oldTrove)
        for troveCs in cs.iterNewTroveList():
            if troveCs.getOldVersion():
                trv = oldDict[troveCs.getOldNameVersionFlavor()].pop()
                trv.applyChangeSet(troveCs)
            else:
                trv = Trove(troveCs)
            for _, childVersion, _ in trv.iterTroveList(strongRefs=True,
                                                        weakRefs=True):
                # make sure there are not any references to the internal
                # rmake repository - that would be a bad bug - easy to
                # do with the way we do cooking of groups.
                onRepos = childVersion.getHost() == reposName
                assert not onRepos, "Trove %s references repository" % trv
            n,v,f = troveCs.getNewNameVersionFlavor()
            trove, troveVersion = trovesByNBF[n, v.branch(), f]
            troveNVFC = trove.getNameVersionFlavor(withContext=True)
            # map jobId -> trove -> binaries
            mapping[trove.jobId].setdefault(troveNVFC, []).append((n,v,f))
    else:
        return False, 'Creating clone failed'

    signatureKey = conaryclient.cfg.signatureKey
    if writeToFile:
        cs.writeToFile(writeToFile)
    else:
        repos.commitChangeSet(cs, callback=callback)
    return True, mapping
示例#8
0
文件: commit.py 项目: pombreda/rmake
def commitJobs(conaryclient,
               jobList,
               reposName,
               message=None,
               commitOutdatedSources=False,
               sourceOnly=False,
               excludeSpecs=None,
               writeToFile=None):
    jobsToCommit = {}
    alreadyCommitted = []
    finalCs = changeset.ReadOnlyChangeSet()
    mapping = {}
    for job in jobList:
        if job.isCommitted():
            alreadyCommitted.append(job)
        else:
            jobsToCommit[job.jobId] = job
    jobsToCommit = jobsToCommit.values()  # dedup job list

    if not jobsToCommit:
        err = 'Job(s) already committed'
        return False, err

    allTroves = []
    trovesByBranch = {}
    alreadyCommitted = False
    for job in jobsToCommit:
        mapping[job.jobId] = {}
        for trove in job.iterTroves():
            allTroves.append(trove)
            troveVersion = trove.getVersion()
            if troveVersion.getHost() == reposName:
                if not troveVersion.branch().hasParentBranch():
                    message = ('Cannot commit filesystem cook %s - '
                               ' nowhere to commit to!' % trove.getName())
                    return False, message
    assert (allTroves)
    source = trovesource.SimpleTroveSource()
    if excludeSpecs:
        excludeSpecsWithContext = {}
        troveMap = {}
        for excludeSpec in excludeSpecs:
            if len(excludeSpec) == 4:
                context = excludeSpec[3]
            else:
                context = None

            excludeSpecsWithContext.setdefault(excludeSpec[:3],
                                               []).append(context)
        excludeSpecs = [x[:3] for x in excludeSpecs]

        for trove in allTroves:
            troveTup = (trove.getName().split(':')[0], trove.getVersion(),
                        trove.getFlavor())
            source.addTrove(*troveTup)
            troveMap.setdefault(troveTup, []).append(trove)

        source.searchAsDatabase()
        matches = source.findTroves(None,
                                    excludeSpecs,
                                    None,
                                    allowMissing=True)
        trvMatches = []
        for excludeSpec, matchList in matches.iteritems():
            contexts = excludeSpecsWithContext[excludeSpec]
            for match in matchList:
                for trv in troveMap[match]:
                    if trv.context in contexts or None in contexts:
                        trvMatches.append(trv)

        allTroves = [x for x in allTroves if x not in trvMatches]
        if not allTroves:
            message = ('All troves excluded - not committing')
            return False, message

    repos = conaryclient.getRepos()

    trovesByNBF = {}
    sourcesToCheck = []
    branchMap = {}
    trovesToClone = []
    for trove in allTroves:
        builtTroves = list(trove.iterBuiltTroves())
        if not builtTroves:
            continue
        if builtTroves[0][1].getHost() != reposName:
            alreadyCommitted = True
            for n, v, f in builtTroves:
                trovesByNBF[n, v.branch(), f] = (trove, v)
            continue

        troveVersion = trove.getVersion()
        if troveVersion.getHost() == reposName:
            sourceTup = (trove.getName(), troveVersion, Flavor())
            targetBranch = troveVersion.branch().parentBranch()
            branchMap[troveVersion.branch()] = targetBranch
            nbf = trove.getName(), targetBranch, Flavor()
            if nbf in trovesByNBF:
                if trovesByNBF[nbf][1] != troveVersion:
                    badVersion = trovesByNBF[nbf][1]
                    return False, (
                        "Cannot commit two different versions of source component %s:"
                        " %s and %s" %
                        (trove.getName(), troveVersion, badVersion))
            trovesByNBF[nbf] = trove, troveVersion
            sourcesToCheck.append(sourceTup)
        if sourceOnly:
            continue

        for troveTup in builtTroves:
            branch = troveTup[1].branch()
            targetBranch = branch.parentBranch()
            # add mapping so that when the cloning is done
            # we can tell what commit resulted in what binaries.
            nbf = (troveTup[0], targetBranch, troveTup[2])
            if nbf in trovesByNBF:
                otherBinary = trovesByNBF[nbf][0].getBinaryTroves()[0]
                if otherBinary[1].branch() == targetBranch:
                    # this one's already committed.
                    break
                # discard the later of the two commits.
                if trovesByNBF[nbf][0].getVersion() > trove.getVersion():
                    # we're the earlier one
                    badTrove, badVersion = trovesByNBF[nbf]
                    newTrove = trove
                    newVersion = troveTup[1]
                else:
                    badTrove = trove
                    badVersion = troveTup[1]
                    newTrove, newVersion = trovesByNBF[nbf]
                name = nbf[0]
                flavor = nbf[2]

                skipped = []
                for badTroveTup in badTrove.iterBuiltTroves():
                    badNbf = (badTroveTup[0], targetBranch, badTroveTup[2])
                    if not ':' in badTroveTup[0]:
                        skipped.append(badTroveTup[0])

                    if badNbf in trovesByNBF and badTrove is trovesByNBF[
                            badNbf][0]:
                        del trovesByNBF[badNbf]

                skipped = '%s' % (', '.join(skipped))
                log.warning("Not committing %s on %s[%s]%s - overridden by"
                            " %s[%s]%s" %
                            (skipped, badTroveTup[1], badTroveTup[2],
                             badTrove.getContextStr(), newVersion, flavor,
                             newTrove.getContextStr()))
                if trove is badTrove:
                    break

            trovesByNBF[nbf] = trove, troveTup[1]
            branchMap[branch] = targetBranch

    for nbf, (trove, tupVersion) in trovesByNBF.items():
        if tupVersion.branch() != nbf[1]:
            trovesToClone.append((nbf[0], tupVersion, nbf[2]))

    if not trovesToClone:
        if sourceOnly:
            err = 'Could not find sources to commit'
        elif alreadyCommitted:
            log.warning('All built troves have already been committed')
            return True, {}
        else:
            err = 'Can only commit built troves, none found'
        return False, err
    if sourcesToCheck and not commitOutdatedSources:
        outdated = _checkOutdatedSources(repos, sourcesToCheck)
        if outdated:
            outdated = ( '%s=%s (replaced by newer %s)' \
                         % (name, builtVer, newVer.trailingRevision())
                         for (name, builtVer, newVer) in outdated)
            err = ('The following source troves are out of date:\n%s\n\n'
                   'Use --commit-outdated-sources to commit anyway' %
                   '\n'.join(outdated))
            return False, err

    # only update build info if we'll be okay if some buildreqs are not
    # updated
    updateBuildInfo = compat.ConaryVersion().acceptsPartialBuildReqCloning()
    callback = callbacks.CloneCallback(conaryclient.cfg, message)
    passed, cs = conaryclient.createTargetedCloneChangeSet(
        branchMap,
        trovesToClone,
        updateBuildInfo=updateBuildInfo,
        cloneSources=False,
        trackClone=False,
        callback=callback,
        fullRecurse=False)
    if passed:
        oldTroves = []
        for troveCs in cs.iterNewTroveList():
            if troveCs.getOldVersion():
                oldTroves.append(troveCs.getOldNameVersionFlavor())
        if oldTroves:
            oldDict = {}
            for oldTrove in repos.getTroves(oldTroves):
                oldDict.setdefault(oldTrove.getNameVersionFlavor(),
                                   []).append(oldTrove)
        for troveCs in cs.iterNewTroveList():
            if troveCs.getOldVersion():
                trv = oldDict[troveCs.getOldNameVersionFlavor()].pop()
                trv.applyChangeSet(troveCs)
            else:
                trv = Trove(troveCs)
            for _, childVersion, _ in trv.iterTroveList(strongRefs=True,
                                                        weakRefs=True):
                # make sure there are not any references to the internal
                # rmake repository - that would be a bad bug - easy to
                # do with the way we do cooking of groups.
                onRepos = childVersion.getHost() == reposName
                assert not onRepos, "Trove %s references repository" % trv
            n, v, f = troveCs.getNewNameVersionFlavor()
            trove, troveVersion = trovesByNBF[n, v.branch(), f]
            troveNVFC = trove.getNameVersionFlavor(withContext=True)
            # map jobId -> trove -> binaries
            mapping[trove.jobId].setdefault(troveNVFC, []).append((n, v, f))
    else:
        return False, 'Creating clone failed'

    signatureKey = conaryclient.cfg.signatureKey
    if signatureKey and compat.ConaryVersion().signAfterPromote():
        finalCs = signAbsoluteChangeset(cs, signatureKey)
    if writeToFile:
        cs.writeToFile(writeToFile)
    else:
        repos.commitChangeSet(cs, callback=callback)
    return True, mapping
示例#9
0
def _makeSourceTrove(package, helper):
    cs = ChangeSet()
    filesToAdd = {}
    ver = macro.expand(package.getBaseVersion(), package)
    version = _createVersion(package, helper, ver)
    latestSpec = (package.getName(), str(version.trailingLabel()), None)
    results = helper.getRepos().findTroves(None,[latestSpec],allowMissing=True,
                        getLeaves=False,troveTypes=trovesource.TROVE_QUERY_ALL)
    if results:
        existingVersions = [x[1] for x in results.get(latestSpec, ())]
        while version in existingVersions:
            version.incrementSourceCount()

    new = Trove(package.name, version, deps.Flavor())
    new.setFactory(package.targetConfig.factory)
    message = "Temporary Source for %s" % version
    message = message.rstrip() + "\n"
    new.changeChangeLog(ChangeLog(name=helper.cfg.name,
                contact=helper.cfg.contact, message=message))
    for path, contents in package.recipeFiles.iteritems():
        isText = path == package.getRecipeName()
        pathId = hashlib.md5(path).digest()
        fileHelper = filetypes.RegularFile(contents=contents,
                    config=isText)
        fileStream = fileHelper.get(pathId)
        fileStream.flags.isSource(set=True)
        fileId = fileStream.fileId()
        fileVersion = new.getVersion()
        key = pathId + fileId
        filesToAdd[key] = (fileStream, fileHelper.contents, isText)
        new.addFile(pathId, path, fileVersion, fileId)
    new.invalidateDigests()
    new.computeDigests()
    for key, (fileObj, fileContents, cfgFile) in filesToAdd.items():
        cs.addFileContents(fileObj.pathId(), fileObj.fileId(),
                    ChangedFileTypes.file, fileContents, cfgFile)
        cs.addFile(None, fileObj.fileId(), fileObj.freeze())
    cs.newTrove(new.diff(None, absolute=True)[0])
    return new.getNameVersionFlavor(), cs
示例#10
0
    def _merge(self):
        changeSet = ChangeSet()
        deleteDirs = set()
        doCommit = False
        # If this is not None then all ephemeral sources will still be fetched
        # but will be placed in this directory instead.
        if self.helper.plan.ephemeralSourceDir:
            ephDir = self.helper.makeEphemeralDir()
        else:
            ephDir = None

        def _addFile(path, contents, isText):
            if path in oldFiles:
                # Always recycle pathId if available.
                pathId, _, oldFileId, oldFileVersion = oldFiles[path]
            else:
                pathId = hashlib.md5(path).digest()
                oldFileId = oldFileVersion = None

            fileHelper = filetypes.RegularFile(contents=contents,
                    config=isText)
            fileStream = fileHelper.get(pathId)
            fileStream.flags.isSource(set=True)
            fileId = fileStream.fileId()

            # If the fileId matches, recycle the fileVersion too.
            if fileId == oldFileId:
                fileVersion = oldFileVersion
            else:
                fileVersion = newTrove.getVersion()

            filesToAdd[fileId] = (fileStream, fileHelper.contents, isText)
            newTrove.addFile(pathId, path, fileVersion, fileId)

        for package, (recipeText, recipeObj), oldTrove in zip(
                self.packages, self.recipes, self.oldTroves):

            filesToAdd = {}
            oldFiles = {}
            if oldTrove is not None:
                for pathId, path, fileId, fileVer in oldTrove.iterFileList():
                    oldFiles[path] = (pathId, path, fileId, fileVer)
            newTrove = Trove(package.name, package.nextVersion, deps.Flavor())
            newTrove.setFactory(package.targetConfig.factory)

            # Add upstream files to new trove. Recycle pathids from the old
            # version.
            # LAZY: assume that everything other than the recipe is binary.
            # Conary has a magic module, but it only accepts filenames!
            for path, contents in package.recipeFiles.iteritems():
                isText = path == package.getRecipeName()
                _addFile(path, contents, isText)

            # Collect requested auto sources from recipe. Unknown recipe types
            # will not be loaded so recipeObj will be the class, so assume
            # these have no sources.
            if not inspect.isclass(recipeObj):
                recipeFiles = dict((os.path.basename(x.getPath()), x)
                    for x in recipeObj.getSourcePathList())
                newFiles = set(x[1] for x in newTrove.iterFileList())

                needFiles = set(recipeFiles) - newFiles
                for autoPath in needFiles:
                    source = recipeFiles[autoPath]
                    if (autoPath in oldFiles
                            and not self.helper.plan.refreshSources
                            and not source.ephemeral):
                        # File exists in old version.
                        pathId, path, fileId, fileVer = oldFiles[autoPath]
                        newTrove.addFile(pathId, path, fileVer, fileId)
                        continue

                    if source.ephemeral and not ephDir:
                        continue

                    # File doesn't exist; need to create it.
                    if source.ephemeral:
                        laUrl = lookaside.laUrl(source.getPath())
                        tempDir = joinPaths(ephDir,
                                os.path.dirname(laUrl.filePath()))
                        mkdirChain(tempDir)
                    else:
                        tempDir = tempfile.mkdtemp()
                        deleteDirs.add(tempDir)
                    snapshot = _getSnapshot(self.helper, package, source,
                            tempDir)

                    if not source.ephemeral and snapshot:
                        autoPathId = hashlib.md5(autoPath).digest()
                        autoObj = FileFromFilesystem(snapshot, autoPathId)
                        autoObj.flags.isAutoSource(set=True)
                        autoObj.flags.isSource(set=True)
                        autoFileId = autoObj.fileId()

                        autoContents = filecontents.FromFilesystem(snapshot)
                        filesToAdd[autoFileId] = (autoObj, autoContents, False)
                        newTrove.addFile(autoPathId, autoPath,
                            newTrove.getVersion(), autoFileId)

            # If the old and new troves are identical, just use the old one.
            if oldTrove and _sourcesIdentical(
                    oldTrove, newTrove, [self.oldChangeSet, filesToAdd]):
                package.setDownstreamVersion(oldTrove.getVersion())
                log.debug('Skipped %s=%s', oldTrove.getName(),
                        oldTrove.getVersion())
                continue

            # Add files and contents to changeset.
            for fileId, (fileObj, fileContents, cfgFile) in filesToAdd.items():
                changeSet.addFileContents(fileObj.pathId(), fileObj.fileId(),
                    ChangedFileTypes.file, fileContents, cfgFile)
                changeSet.addFile(None, fileObj.fileId(), fileObj.freeze())

            # Create a changelog entry.
            changeLog = ChangeLog(
                name=self.helper.cfg.name, contact=self.helper.cfg.contact,
                message=self.helper.plan.commitMessage + '\n')
            newTrove.changeChangeLog(changeLog)

            # Calculate trove digests and add the trove to the changeset
            newTrove.invalidateDigests()
            newTrove.computeDigests()
            newTroveCs = newTrove.diff(None, absolute=True)[0]
            changeSet.newTrove(newTroveCs)
            doCommit = True

            package.setDownstreamVersion(newTrove.getVersion())
            log.debug('Created %s=%s', newTrove.getName(), newTrove.getVersion())

        if doCommit:
            cook.signAbsoluteChangesetByConfig(changeSet, self.helper.cfg)
            f = tempfile.NamedTemporaryFile(dir=os.getcwd(), suffix='.ccs',
                    delete=False)
            f.close()
            changeSet.writeToFile(f.name)
            try:
                self.helper.getRepos().commitChangeSet(changeSet)
            except:
                log.error("Error committing changeset to repository, "
                        "failed changeset is saved at %s", f.name)
                raise
            else:
                os.unlink(f.name)

        for path in deleteDirs:
            shutil.rmtree(path)