コード例 #1
0
    def testFileObjMissing(self):
        # create an absolute changeset
        cs = changeset.ChangeSet()

        # add a pkg diff
        flavor = deps.deps.parseFlavor('')
        v = versions.VersionFromString('/%s/1.0-1-1'
                                       %self.cfg.buildLabel.asString()).copy()
        v.resetTimeStamps()
        t = trove.Trove('test:test', v, flavor, None)
        path = self.workDir + '/blah'
        f = open(path, 'w')
        f.write('hello, world!\n')
        f.close()
        pathId = sha1helper.md5String('/blah')
        f = files.FileFromFilesystem(path, pathId)
        # add the file, and SKIP including
        # the filestream by using cs.addFile().  This creates an
        # incomplete changeset
        t.addFile(pathId, '/blah', v, f.fileId())
        cs.addFileContents(pathId, f.fileId(), changeset.ChangedFileTypes.file,
                           filecontents.FromFilesystem(path),
                           f.flags.isConfig())

        t.computeDigests()
        diff = t.diff(None, absolute = 1)[0]
        cs.newTrove(diff)

        repos = self.openRepository()
        try:
            repos.commitChangeSet(cs)
            assert 0, "Did not raise IntegrityError"
        except errors.IntegrityError, e:
            assert(str(e).startswith("Incomplete changeset specified: missing pathId e806729b6a2b568fa7e77c3efa3a9684 fileId"))
コード例 #2
0
ファイル: verify.py プロジェクト: pombreda/conary-1
 def _addFile(self, cs, trv, path):
     pathId = sha1helper.md5String(path)
     absPath = self.cfg.root + path
     fileObj = files.FileFromFilesystem(absPath, pathId)
     fileId = fileObj.fileId()
     trv.addFile(pathId, path, trv.getVersion(), fileId)
     cs.addFile(None, fileId, fileObj.freeze())
     if fileObj.hasContents:
         cs.addFileContents(pathId, fileId, changeset.ChangedFileTypes.file,
                            filecontents.FromFilesystem(absPath), False)
コード例 #3
0
ファイル: sigtest.py プロジェクト: tensor5/conary
    def testFileUpdateMissingKey(self):
        fingerprint = '95B457D16843B21EA3FC73BBC7C32FC1F94E405E'

        # make a changeset with useful stuff that could be installed
        cs = changeset.ChangeSet()
        flavor = deps.parseFlavor('')
        v = versions.VersionFromString('/%s/1.0-1-1' %
                                       self.cfg.buildLabel.asString()).copy()
        v.resetTimeStamps()
        t = trove.Trove('test:test', v, flavor, None)

        path = self.workDir + '/blah'
        f = open(path, 'w')
        f.write('hello, world!\n')
        f.close()
        pathId = sha1helper.md5String('/blah')
        f = files.FileFromFilesystem(path, pathId)

        fileId = f.fileId()
        t.addFile(pathId, '/blah', v, fileId)
        cs.addFile(None, fileId, f.freeze())
        cs.addFileContents(pathId, fileId, changeset.ChangedFileTypes.file,
                           filecontents.FromFilesystem(path),
                           f.flags.isConfig())
        diff = t.diff(None, absolute=1)[0]
        cs.newTrove(diff)
        cs.addPrimaryTrove('test:test', v, flavor)

        # sign the changeset
        csPath = os.path.join(self.workDir, 'test-1.0-1.ccs')
        cs = cook.signAbsoluteChangeset(cs, fingerprint)
        cs.writeToFile(csPath)

        tmpPath = mkdtemp()

        keyCache = openpgpkey.getKeyCache()
        newKeyCache = openpgpkey.OpenPGPKeyFileCache()

        pubRing = self.cfg.pubRing

        self.cfg.pubRing = [tmpPath + '/pubring.gpg']
        keyCacheCallback = openpgpkey.KeyCacheCallback(None, self.cfg)
        newKeyCache.setCallback(keyCacheCallback)

        openpgpkey.setKeyCache(newKeyCache)

        try:
            self.updatePkg(self.rootDir, csPath)
        finally:
            self.cfg.pubRing = pubRing
            openpgpkey.setKeyCache(keyCache)
コード例 #4
0
    def getFileContentsObjects(self, server, fileList, callback, outF,
                               compressed):
        if not isinstance(self.c[server], ShimServerProxy):
            return netclient.NetworkRepositoryClient.getFileContentsObjects(
                self, server, fileList, callback, outF, compressed)
        filePaths = self.c[server].getFileContents(fileList)
        fileObjList = []
        for path in filePaths:
            if compressed:
                fileObjList.append(
                    filecontents.FromFilesystem(path, compressed = True))
            else:
                f = gzip.GzipFile(path, "r")
                fileObjList.append(filecontents.FromFile(f))

        return fileObjList
コード例 #5
0
ファイル: changesettest.py プロジェクト: tensor5/conary
    def testChangeSetFromFile(self):
        # ensure that absolute changesets that are read from disk
        # that contain config files write out changesets to a file
        # that do not change the file type to a diff.
        # set up a file with some contents
        cont = self.workDir + '/contents'
        f = open(cont, 'w')
        f.write('hello, world!\n')
        f.close()
        pathId = sha1helper.md5FromString('0' * 32)
        f = files.FileFromFilesystem(cont, pathId)
        f.flags.isConfig(1)

        # create an absolute changeset
        cs = changeset.ChangeSet()

        # add a pkg diff
        v = versions.VersionFromString('/localhost@rpl:devel/1.0-1-1',
                                       timeStamps=[1.000])
        flavor = deps.parseFlavor('')
        t = trove.Trove('test', v, flavor, None)
        t.addFile(pathId, '/contents', v, f.fileId())
        diff = t.diff(None, absolute=1)[0]
        cs.newTrove(diff)

        # add the file and file contents
        cs.addFile(None, f.fileId(), f.freeze())
        cs.addFileContents(pathId, f.fileId(), changeset.ChangedFileTypes.file,
                           filecontents.FromFilesystem(cont),
                           f.flags.isConfig())

        # write out the changeset
        cs.writeToFile(self.workDir + '/foo.ccs')
        # read it back in
        cs2 = changeset.ChangeSetFromFile(self.workDir + '/foo.ccs')
        # write it out again (there was a bug where all config files
        # became diffs)
        cs2.writeToFile(self.workDir + '/bar.ccs')
        # read it again
        cs3 = changeset.ChangeSetFromFile(self.workDir + '/bar.ccs')
        # verify that the file is a file, not a diff
        ctype, contents = cs3.getFileContents(pathId, f.fileId())
        assert (ctype == changeset.ChangedFileTypes.file)
コード例 #6
0
 def getFileContentsFromTrove(self, n, v, f, pathList,
                              callback = None, compressed = False):
     server = v.trailingLabel().getHost()
     if not isinstance(self.c[server], ShimServerProxy):
         return netclient.NetworkRepositoryClient.getFileContentsFromTrove(
             self, n, v, f, pathList, callback = callback,
             compressed = compressed)
     pathList = [self.fromPath(x) for x in pathList]
     v = self.fromVersion(v)
     f = self.fromFlavor(f)
     filePaths = self.c[server].getFileContentsFromTrove(n,v,f,
                                                         pathList)
     fileObjList = []
     for path in filePaths:
         if compressed:
             fileObjList.append(
                 filecontents.FromFilesystem(path, compressed = True))
         else:
             f = gzip.GzipFile(path, "r")
             fileObjList.append(filecontents.FromFile(f))
     return fileObjList
コード例 #7
0
    def _merge(self):
        changeSet = ChangeSet()
        deleteDirs = set()
        doCommit = False
        # If this is not None then all ephemeral sources will still be fetched
        # but will be placed in this directory instead.
        if self.helper.plan.ephemeralSourceDir:
            ephDir = self.helper.makeEphemeralDir()
        else:
            ephDir = None

        def _addFile(path, contents, isText):
            if path in oldFiles:
                # Always recycle pathId if available.
                pathId, _, oldFileId, oldFileVersion = oldFiles[path]
            else:
                pathId = hashlib.md5(path).digest()
                oldFileId = oldFileVersion = None

            fileHelper = filetypes.RegularFile(contents=contents,
                    config=isText)
            fileStream = fileHelper.get(pathId)
            fileStream.flags.isSource(set=True)
            fileId = fileStream.fileId()

            # If the fileId matches, recycle the fileVersion too.
            if fileId == oldFileId:
                fileVersion = oldFileVersion
            else:
                fileVersion = newTrove.getVersion()

            filesToAdd[fileId] = (fileStream, fileHelper.contents, isText)
            newTrove.addFile(pathId, path, fileVersion, fileId)

        for package, (recipeText, recipeObj), oldTrove in zip(
                self.packages, self.recipes, self.oldTroves):

            filesToAdd = {}
            oldFiles = {}
            if oldTrove is not None:
                for pathId, path, fileId, fileVer in oldTrove.iterFileList():
                    oldFiles[path] = (pathId, path, fileId, fileVer)
            newTrove = Trove(package.name, package.nextVersion, deps.Flavor())
            newTrove.setFactory(package.targetConfig.factory)

            # Add upstream files to new trove. Recycle pathids from the old
            # version.
            # LAZY: assume that everything other than the recipe is binary.
            # Conary has a magic module, but it only accepts filenames!
            for path, contents in package.recipeFiles.iteritems():
                isText = path == package.getRecipeName()
                _addFile(path, contents, isText)

            # Collect requested auto sources from recipe. Unknown recipe types
            # will not be loaded so recipeObj will be the class, so assume
            # these have no sources.
            if not inspect.isclass(recipeObj):
                recipeFiles = dict((os.path.basename(x.getPath()), x)
                    for x in recipeObj.getSourcePathList())
                newFiles = set(x[1] for x in newTrove.iterFileList())

                needFiles = set(recipeFiles) - newFiles
                for autoPath in needFiles:
                    source = recipeFiles[autoPath]
                    if (autoPath in oldFiles
                            and not self.helper.plan.refreshSources
                            and not source.ephemeral):
                        # File exists in old version.
                        pathId, path, fileId, fileVer = oldFiles[autoPath]
                        newTrove.addFile(pathId, path, fileVer, fileId)
                        continue

                    if source.ephemeral and not ephDir:
                        continue

                    # File doesn't exist; need to create it.
                    if source.ephemeral:
                        laUrl = lookaside.laUrl(source.getPath())
                        tempDir = joinPaths(ephDir,
                                os.path.dirname(laUrl.filePath()))
                        mkdirChain(tempDir)
                    else:
                        tempDir = tempfile.mkdtemp()
                        deleteDirs.add(tempDir)
                    snapshot = _getSnapshot(self.helper, package, source,
                            tempDir)

                    if not source.ephemeral and snapshot:
                        autoPathId = hashlib.md5(autoPath).digest()
                        autoObj = FileFromFilesystem(snapshot, autoPathId)
                        autoObj.flags.isAutoSource(set=True)
                        autoObj.flags.isSource(set=True)
                        autoFileId = autoObj.fileId()

                        autoContents = filecontents.FromFilesystem(snapshot)
                        filesToAdd[autoFileId] = (autoObj, autoContents, False)
                        newTrove.addFile(autoPathId, autoPath,
                            newTrove.getVersion(), autoFileId)

            # If the old and new troves are identical, just use the old one.
            if oldTrove and _sourcesIdentical(
                    oldTrove, newTrove, [self.oldChangeSet, filesToAdd]):
                package.setDownstreamVersion(oldTrove.getVersion())
                log.debug('Skipped %s=%s', oldTrove.getName(),
                        oldTrove.getVersion())
                continue

            # Add files and contents to changeset.
            for fileId, (fileObj, fileContents, cfgFile) in filesToAdd.items():
                changeSet.addFileContents(fileObj.pathId(), fileObj.fileId(),
                    ChangedFileTypes.file, fileContents, cfgFile)
                changeSet.addFile(None, fileObj.fileId(), fileObj.freeze())

            # Create a changelog entry.
            changeLog = ChangeLog(
                name=self.helper.cfg.name, contact=self.helper.cfg.contact,
                message=self.helper.plan.commitMessage + '\n')
            newTrove.changeChangeLog(changeLog)

            # Calculate trove digests and add the trove to the changeset
            newTrove.invalidateDigests()
            newTrove.computeDigests()
            newTroveCs = newTrove.diff(None, absolute=True)[0]
            changeSet.newTrove(newTroveCs)
            doCommit = True

            package.setDownstreamVersion(newTrove.getVersion())
            log.debug('Created %s=%s', newTrove.getName(), newTrove.getVersion())

        if doCommit:
            cook.signAbsoluteChangesetByConfig(changeSet, self.helper.cfg)
            f = tempfile.NamedTemporaryFile(dir=os.getcwd(), suffix='.ccs',
                    delete=False)
            f.close()
            changeSet.writeToFile(f.name)
            try:
                self.helper.getRepos().commitChangeSet(changeSet)
            except:
                log.error("Error committing changeset to repository, "
                        "failed changeset is saved at %s", f.name)
                raise
            else:
                os.unlink(f.name)

        for path in deleteDirs:
            shutil.rmtree(path)
コード例 #8
0
    def _addPhantomContents(self, changeSet, trv, header):
        """Fabricate files for the given RPM header"""
        for (path, owner, group, mode, size, rdev, flags, vflags, linkto,
             mtime) in itertools.izip(
                 header[rpmhelper.OLDFILENAMES],
                 header[rpmhelper.FILEUSERNAME],
                 header[rpmhelper.FILEGROUPNAME],
                 header[rpmhelper.FILEMODES],
                 header[rpmhelper.FILESIZES],
                 header[rpmhelper.FILERDEVS],
                 header[rpmhelper.FILEFLAGS],
                 header[rpmhelper.FILEVERIFYFLAGS],
                 header[rpmhelper.FILELINKTOS],
                 header[rpmhelper.FILEMTIMES],
             ):
            fullPath = util.joinPaths(self.root, path)
            fakestat = FakeStat(mode,
                                0,
                                None,
                                1,
                                owner,
                                group,
                                size,
                                mtime,
                                mtime,
                                mtime,
                                st_rdev=rdev,
                                linkto=linkto)
            pathId = os.urandom(16)

            # Adapted from conary.build.source.addCapsule.doRPM
            kind = 'regular'
            if flags & rpmhelper.RPMFILE_GHOST:
                kind = 'initial'
            elif flags & (rpmhelper.RPMFILE_CONFIG
                          | rpmhelper.RPMFILE_MISSINGOK
                          | rpmhelper.RPMFILE_NOREPLACE):
                if size:
                    kind = 'config'
                else:
                    kind = 'initial'
            elif vflags:
                if (stat.S_ISREG(mode)
                        and not (vflags & rpmhelper.RPMVERIFY_FILEDIGEST)
                        or (stat.S_ISLNK(mode)
                            and not (vflags & rpmhelper.RPMVERIFY_LINKTO))):
                    kind = 'initial'
            # Ignore failures trying to sha1 missing/inaccessible files as long
            # as those files are flagged initial contents (ghost)
            fileStream = files.FileFromFilesystem(fullPath,
                                                  pathId,
                                                  statBuf=fakestat,
                                                  sha1FailOk=True)
            if kind == 'config':
                fileStream.flags.isConfig(set=True)
            elif kind == 'initial':
                fileStream.flags.isInitialContents(set=True)
            else:
                assert kind == 'regular'

            # From conary.build.capsulepolicy.Payload
            if (isinstance(fileStream, files.RegularFile)
                    and not fileStream.flags.isConfig()
                    and not (fileStream.flags.isInitialContents()
                             and not fileStream.contents.size())):
                fileStream.flags.isEncapsulatedContent(set=True)

            fileId = fileStream.fileId()
            trv.addFile(pathId, path, trv.getVersion(), fileId)
            changeSet.addFile(None, fileId, fileStream.freeze())
            # Config file contents have to go into the database, so snag the
            # contents from the filesystem and put them in the changeset.
            if (fileStream.hasContents
                    and not fileStream.flags.isEncapsulatedContent()):
                if fileStream.contents.sha1() == sha1helper.sha1Empty:
                    # Missing/ghost config file. Hopefully it is supposed to be
                    # empty, but even if not then the fake SHA-1 will be the
                    # SHA-1 of the empty string since there's no hint of what
                    # it was supposed to be.
                    contents = filecontents.FromString('')
                else:
                    contents = filecontents.FromFilesystem(fullPath)
                changeSet.addFileContents(
                    pathId,
                    fileId,
                    contType=changeset.ChangedFileTypes.file,
                    contents=contents,
                    cfgFile=fileStream.flags.isConfig(),
                )
コード例 #9
0
    def install(self, flags, troveCs):
        ACTION_RESTORE = 1
        ACTION_SKIP = 2
        ACTION_CONFLICT = 3

        rc = SingleCapsuleOperation.install(self, flags, troveCs)
        if rc is None:
            # parent class thinks we should just ignore this troveCs; I'm
            # not going to argue with it (it's probably because the capsule
            # hasn't changed
            return None

        (oldTrv, trv) = rc
        trvInfo = troveCs.getNewNameVersionFlavor()
        oldTrvInfo = troveCs.getOldNameVersionFlavor()
        hasCapsule = troveCs.hasCapsule()

        # Updates the fsJob metadata for installing the current trove.
        # It assumes files are replaced on install, and complains if something
        # is in the way unless the appropriate flags are set. This is a very
        # much simplified version of FilesystemJob._singleTrove() which maps
        # out a complete install strategy for native packages. Note that
        # we walk all of the files in this trove, not just the new files
        # or the changed files, because RPM installs all of the files.
        toRestore = []

        changedByPathId = dict((x[0], x) for x in troveCs.getChangedFileList())

        # things which aren't change, new, or removed are unchanged
        unchangedByPathId = (set(x[0] for x in trv.iterFileList()) -
                             set(changedByPathId.iterkeys()) -
                             set(x[0] for x in troveCs.getNewFileList()) -
                             set(troveCs.getOldFileList()))

        l = []
        for oldFileInfo in troveCs.getChangedFileList():
            oldFileId, oldVersion = oldTrv.getFile(oldFileInfo[0])[1:3]
            l.append((oldFileInfo[0], oldFileId, oldVersion))

        for unchangedPathId in unchangedByPathId:
            unchangedFileId, unchangedFileVersion = \
                                    trv.getFile(unchangedPathId)[1:3]
            l.append((unchangedPathId, unchangedFileId, unchangedFileVersion))

        fileObjs = self.db.getFileVersions(l)
        fileObjsByPathId = dict([(x[0], y)
                                 for x, y in itertools.izip(l, fileObjs)])

        for fileInfo in trv.iterFileList():
            pathId, path, fileId, version = fileInfo

            if os.path.dirname(path) in self.netSharedPath:
                # we do nothing. really. nothing.
                #
                # we don't back it up. we don't mark it as removed in
                # our database. we don't look for conflicts. nothing.
                continue

            if pathId in changedByPathId:
                oldFileId = oldTrv.getFile(pathId)[1]
                fileChange = self.changeSet.getFileChange(oldFileId, fileId)
                if (oldFileId == fileId):
                    # only the version number changed; we don't need
                    # to merge anything here
                    fileObj = fileObjsByPathId[pathId]
                elif fileChange[0] == '\x01':
                    fileObj = fileObjsByPathId[pathId]
                    fileObj.twm(fileChange, fileObj)
                else:
                    fileObj = files.ThawFile(fileChange, pathId)
            elif pathId in unchangedByPathId:
                fileObj = fileObjsByPathId[pathId]
            else:
                # if it's not changed and it's not unchanged, it must be new
                fileStream = self.changeSet.getFileChange(None, fileId)
                fileObj = files.ThawFile(fileStream, pathId)

            absolutePath = util.joinPaths(self.root, path)

            if (fileObj.flags.isCapsuleAddition()):
                # this was added to the package outside of the RPM; we don't
                # have any responsibility for it
                continue
            elif (trove.conaryContents(hasCapsule, pathId, fileObj)
                  and fileObj.lsTag != 'd'):
                # this content isn't part of the capsule; remember to put
                # it back when RPM is done
                self.preservePath(path, unlink=True)
                continue

            s = util.lstat(absolutePath)
            if not s:
                # there is nothing in the way, so there is nothing which
                # concerns us here. Track the file for later.
                toRestore.append((fileInfo, fileObj))
                continue

            action = ACTION_CONFLICT

            existingOwners = list(
                self.db.iterFindPathReferences(path,
                                               justPresent=True,
                                               withStream=True))

            if existingOwners:
                # Don't complain about files owned by the previous version
                # of this trove.
                l = [x for x in existingOwners if x[0:3] == oldTrvInfo]
                if l:
                    existingOwners.remove(l[0])

                if not existingOwners:
                    action = ACTION_RESTORE
            elif stat.S_ISDIR(s.st_mode) and fileObj.lsTag == 'd':
                # Don't let existing directories stop us from taking over
                # ownership of the directory
                action = ACTION_RESTORE
            elif fileObj.flags.isInitialContents():
                # Initial contents files may be restored on top of things
                # already in the filesystem. They're ghosts or config files
                # and RPM will get the contents right either way, and we
                # should remove them either way.
                action = ACTION_RESTORE

            if action == ACTION_CONFLICT and not existingOwners:
                # Check for "conflicts" that might just be a view across a
                # symlink.
                if self.fsJob.findAliasedRemovals(absolutePath):
                    action = ACTION_RESTORE

            if action == ACTION_CONFLICT and existingOwners:
                if fileId in [x[4] for x in existingOwners]:
                    # The files share metadata same. Whatever it looks like on
                    # disk, RPM is going to blow it away with the new one.
                    for info in existingOwners:
                        self.fsJob.sharedFile(info[0], info[1], info[2],
                                              info[3])
                    action = ACTION_RESTORE
                elif path.startswith('/usr/share/doc/'):
                    # Mirror badness Red Hat patches into RPM for rhel4
                    # and rhel5
                    action = ACTION_RESTORE
                else:
                    existingFiles = [
                        files.ThawFile(x[5], pathId) for x in existingOwners
                    ]

                    compatibility = [
                        1 for x in existingFiles if fileObj.compatibleWith(x)
                    ]

                    if 1 in compatibility:
                        # files can be shared even though the fileId's
                        # are different
                        for info in existingOwners:
                            self.fsJob.sharedFile(info[0], info[1], info[2],
                                                  info[3])
                        action = ACTION_RESTORE
                    elif 1 in [
                            files.rpmFileColorCmp(x, fileObj)
                            for x in existingFiles
                    ]:
                        # rpm file colors and the default rpm setting for
                        # file color policy make elf64 files silently replace
                        # elf32 files. follow that behavior here.
                        #
                        # no, i'm not making this up
                        #
                        # yes, really
                        action = ACTION_SKIP
                    elif (self._checkReplaceManagedFiles(flags, path) or 1 in [
                            files.rpmFileColorCmp(fileObj, x)
                            for x in existingFiles
                    ]):
                        # The files are different. Bail unless we're supposed
                        # to replace managed files.
                        existingFile = files.FileFromFilesystem(
                            absolutePath, pathId)
                        for info in existingOwners:
                            self.fsJob.userRemoval(
                                fileObj=existingFile,
                                content=filecontents.FromFilesystem(
                                    absolutePath),
                                *info[0:4])
                        action = ACTION_RESTORE
                    else:
                        # it's not up to us to decide if this is a true
                        # conflict; the database layer will do that for
                        # us (see checkPathConflicts)
                        action = ACTION_RESTORE
            elif flags.replaceUnmanagedFiles:
                # we don't own it, but it's on disk. RPM will just write over
                # it and we have the flag saying we're good with that
                action = ACTION_RESTORE

            if action == ACTION_RESTORE:
                # We may proceed, and RPM will replace this file for us. We
                # need to track that it's being restored to avoid conflicts
                # with other restorations though.
                toRestore.append((fileInfo, fileObj))
            elif action == ACTION_CONFLICT:
                # The file exists already, we can't share it, and we're not
                # allowed to overwrite it.
                self._error(
                    errors.FileInWayError(util.normpath(path),
                                          troveCs.getName(),
                                          troveCs.getNewVersion(),
                                          troveCs.getNewFlavor()))
            else:
                assert (action == ACTION_SKIP)
                self.preservePath(path, unlink=False)
                self.fsJob.userRemoval(trv.getName(), trv.getVersion(),
                                       trv.getFlavor(), pathId)

        # toRestore is the list of what is going to be restored. We need to get
        # the fileObjects which will be created so we can track them in the
        # filesystem job. This lets the filesystem job look for resolveable
        # conflicts within this update. We handle newly created files first
        # and files which have changed (so we have to look up the diff)
        # a bit later.
        for fileInfo, fileObj in toRestore:
            fullPath = util.joinPaths(self.root, path)
            self.fsJob._restore(fileObj,
                                fullPath,
                                trvInfo,
                                "restoring %s from RPM",
                                restoreFile=False,
                                fileId=fileId)