Пример #1
0
    def testConfigFilesRaisePathIdsConflict(self):
        # test to make sure that one changeset's config cache doesn't
        # override another's
        cs1 = changeset.ChangeSet()
        cs2 = changeset.ChangeSet()
        mergeSet = changeset.ReadOnlyChangeSet()

        # build two changesets, both with config file diffs that have the same
        # pathid and fileid
        cs1.addFileContents('0' * 16, '0' * 20, changeset.ChangedFileTypes.diff,
                            filecontents.FromString('first'), cfgFile = True)
        cs2.addFileContents('0' * 16, '0' * 20, changeset.ChangedFileTypes.diff,
                            filecontents.FromString('second'), cfgFile = True)
        mergeSet.merge(cs1)
        # second merge now handled without ChangeSetKeyConflictError: CNY-3635
        mergeSet.merge(cs1)

        cs1 = changeset.ChangeSet()
        cs1.addFileContents('0' * 16, '0' * 20, changeset.ChangedFileTypes.diff,
                            filecontents.FromString('first'), cfgFile = True)
        try:
            cs1.addFileContents('0' * 16, '0' * 20,
                                changeset.ChangedFileTypes.diff,
                                filecontents.FromString('second'),
                                cfgFile = True)
        except changeset.ChangeSetKeyConflictError, e:
            assert str(e) == 'ChangeSetKeyConflictError: 30303030303030303030303030303030,3030303030303030303030303030303030303030'
Пример #2
0
    def testConfigFileMerges(self):
        # make sure config files are merged properly; at one point we only
        # merged diffs, now all our merged
        cs1 = changeset.ReadOnlyChangeSet()
        cs2 = changeset.ChangeSet()

        cs2.addFileContents('0' * 16, '0' * 20, changeset.ChangedFileTypes.file,
                            filecontents.FromString('first'), cfgFile = True)
        cs2.addFileContents('1' * 16, '1' * 20, changeset.ChangedFileTypes.file,
                            filecontents.FromString('first'), cfgFile = True)

        cs1.merge(cs2)
        assert(len(cs1.configCache) == 2)
Пример #3
0
    def __init__(self, **kwargs):
        _File.__init__(self, **kwargs)
        if isinstance(self.contents, str):
            self.contents = filecontents.FromString(self.contents)
        else:
            self.contents = filecontents.FromFile(self.contents)

        self._enforceMutuallyExclusiveFlags()
Пример #4
0
 def testOne(s):
     # test compression of large files for CNY-1896
     d = changeset.DictAsCsf({
         'id': (changeset.ChangedFileTypes.file,
                filecontents.FromString(s), False)
     })
     f = d.getNextFile()[2]
     gzf = gzip.GzipFile('', 'r', fileobj=f)
     assert (gzf.read() == s)
     return f
Пример #5
0
    def testJournal(self):
        class Journal:
            def __init__(self):
                self.perms = []
                self.devnodes = []

            def lchown(self, root, target, uid, gid):
                self.perms.append((root, target, uid, gid))

            def mknod(self, root, target, devtype, major, minor, mode,
                      uid, gid):
                self.devnodes.append((root, target, devtype, major, minor,
                                      mode, uid, gid))
        path = tempfile.mkdtemp()
        try:
            journal = Journal()

            filelist = []
            for (name, cls) in (('/dev/block', files.BlockDevice),
                                ('/dev/char', files.CharacterDevice)):
                d = cls(None)
                d.inode.perms.set(0604)
                d.inode.mtime.set(0100)
                d.inode.owner.set("daemon")
                d.inode.group.set("uucp")
                # to make sure that referenced names "exist"
                files.userCache.nameCache['daemon'] = 2
                files.groupCache.nameCache['uucp'] = 14
                d.flags.set(0)
                d.devt.major.set(3)
                d.devt.minor.set(1)
                filelist.append((name, d))

            for name, d in filelist:
                p = path + name
                d.restore(None, path, p, journal=journal)
            assert(journal.devnodes ==
                   [(path, path + '/dev/block', 'b', 3, 1, 0604, 'daemon', 'uucp'),
                    (path, path + '/dev/char', 'c', 3, 1, 0604, 'daemon', 'uucp')])

            d = files.RegularFile(None)
            d.inode.perms.set(1755)
            d.inode.owner.set('root')
            d.inode.group.set('root')
            d.inode.mtime.set(0100)
            contents = filecontents.FromString('Hello, world')
            d.restore(contents, path, path + '/sbin/ping', journal=journal)
            assert(journal.perms == [(path, path + '/sbin/ping', 'root', 'root')])
        finally:
            shutil.rmtree(path)
Пример #6
0
    def save(self, path):
        # return early if we aren't going to have permission to save
        try:
            fd, cacheName = tempfile.mkstemp(
                    prefix=os.path.basename(path) + '.',
                    dir=os.path.dirname(path))
            os.close(fd)
        except (IOError, OSError):
            # may not have permissions; say, not running as root
            return

        cs = changeset.ChangeSet()
        for withFiles, trv in self.cache.values():
            # we just assume everything in the cache is w/o files. it's
            # fine for system model, safe, and we don't need the cache
            # anywhere else
            cs.newTrove(trv.diff(None, absolute = True)[0])

        # NB: "fileid" and pathid got reversed here by mistake, try not to
        # think too hard about it.
        cs.addFileContents(
                           self._fileId,
                           self._troveCacheVersionPathId,
                           changeset.ChangedFileTypes.file,
                           filecontents.FromString("%d %d" % self.VERSION),
                           False)
        self._cs = cs
        self._saveTimestamps()
        self._saveDeps()
        self._saveDepSolutions()
        self._saveFileCache()
        self._cs = None

        try:
            try:
                cs.writeToFile(cacheName)
                if util.exists(path):
                    os.chmod(cacheName, os.stat(path).st_mode)
                else:
                    os.chmod(cacheName, 0644)
                os.rename(cacheName, path)
            except (IOError, OSError):
                # may not have permissions; say, not running as root
                pass
        finally:
            try:
                if os.path.exists(cacheName):
                    os.remove(cacheName)
            except OSError:
                pass
Пример #7
0
    def testFileChanges(self):
        (built, d) = self.buildRecipe(recipes.testRecipe1, "TestRecipe1")
        (name, version, flavor) = built[0]
        version = versions.VersionFromString(version)

        (fd, path) = tempfile.mkstemp()
        os.close(fd)

        repos = self.openRepository()
        repos.createChangeSetFile(
            [(name, (None, None), (version, flavor), 1)], path)

        infc = filecontainer.FileContainer(open(path))
        os.unlink(path)

        (fd, path) = tempfile.mkstemp()
        os.close(fd)
        outfc = filecontainer.FileContainer(open(path, "w"))

        n = 0
        while True:
            next = infc.getNextFile()
            if not next:
                break
            (name, tag, f) = next
            n += 1
            if n == 2 or n == 5:
                # modify the file a bit
                contents = gzip.GzipFile(None, "r", fileobj = f).read()
                contents = chr(ord(contents[0]) ^ 0xff) + contents[1:]
                outfc.addFile(name, filecontents.FromString(contents), tag)
            else:
                contents = filecontents.FromFile(f)
                outfc.addFile(name, filecontents.FromFile(f), tag,
                        precompressed=True)

        infc.close()
        outfc.close()

        self.resetRepository()
        repos = self.openRepository()

        try:
            self.assertRaises(errors.IntegrityError,
                              self.updatePkg, self.rootDir, path)
            self.assertRaises(errors.IntegrityError,
                              repos.commitChangeSetFile, path)
        finally:
            os.unlink(path)
Пример #8
0
 def testFileId(self):
     # this test verifies that the value produced as the fileId
     # of a known stream matches its pre-calculated value.
     f = files.RegularFile(None)
     f.inode.perms.set(0604)
     f.inode.mtime.set(0100)
     f.inode.owner.set("daemon")
     f.inode.group.set("uucp")
     # to make sure that referenced names "exist"
     files.userCache.nameCache['daemon'] = 2
     files.groupCache.nameCache['uucp'] = 14
     s = "hello world"
     contents = filecontents.FromString(s)
     f.contents = files.RegularFileStream()
     f.contents.size.set(len(s))
     f.contents.sha1.set(sha1helper.sha1String(s))
     f.flags.set(0)
     expectedId = '567355867fbbcb2be55d35c3d229a7df8152fdbc'
     self.assertEqual(f.freeze(), '-\x01\x00"\x01\x00\x08\x00\x00\x00\x00\x00\x00\x00\x0b\x02\x00\x14*\xael5\xc9O\xcf\xb4\x15\xdb\xe9_@\x8b\x9c\xe9\x1e\xe8F\xed\x03\x00\x04\x00\x00\x00\x00\x05\x00\x1c\x01\x00\x02\x01\x84\x02\x00\x04\x00\x00\x00@\x03\x00\x06daemon\x04\x00\x04uucp')
     self.assertEqual(sha1helper.sha1ToString(f.fileId()), expectedId)
Пример #9
0
    def testChangeSetMerge(self):
        os.chdir(self.workDir)

        cs1 = changeset.ChangeSet()
        p1 = '0' * 16
        f1 = '0' * 20
        cs1.addFileContents(p1, f1, changeset.ChangedFileTypes.file,
                            filecontents.FromString('zero'), False)
        assert (cs1.writeToFile('foo.ccs') == 129)

        cs2 = changeset.ReadOnlyChangeSet()
        cs2.merge(cs1)
        assert (cs2.writeToFile('foo.ccs') == 129)
        cs2.reset()
        assert (cs2.writeToFile('foo.ccs') == 129)
        cs2.reset()

        cs3 = changeset.ReadOnlyChangeSet()
        cs3.merge(cs2)
        assert (cs3.writeToFile('foo.ccs') == 129)
        cs3.reset()
        assert (cs3.writeToFile('foo.ccs') == 129)
Пример #10
0
    def _restoreConfig(self, cs, configRestoreList):
        # config files are cached, so we don't have to worry about not
        # restoring the same fileId/pathId twice
        for (pathId, newFileId, sha1, oldfile, newFileId, oldVersion,
             oldFileId, restoreContents) in configRestoreList:
            if cs.configFileIsDiff(pathId, newFileId):
                (contType,
                 fileContents) = cs.getFileContents(pathId, newFileId)

                # the content for this file is in the form of a
                # diff, which we need to apply against the file in
                # the repository
                assert (oldVersion)

                try:
                    f = self.repos.getFileContents([(oldFileId, oldVersion,
                                                     oldfile)])[0].get()
                except KeyError:
                    raise errors.IntegrityError(
                        "Missing file contents for pathId %s, fileId %s" %
                        (sha1helper.md5ToString(pathId),
                         sha1helper.sha1ToString(oldFileId)))

                oldLines = f.readlines()
                f.close()
                del f
                diff = fileContents.get().readlines()
                (newLines, failedHunks) = patch.patch(oldLines, diff)
                fileContents = filecontents.FromString("".join(newLines))

                assert (not failedHunks)
            else:
                # config files are not always available compressed (due
                # to the config file cache)
                fileContents = filecontents.FromChangeSet(
                    cs, pathId, newFileId)

            self.addFileContents(sha1, fileContents, restoreContents, 1)
Пример #11
0
 def test07_BadRpmCapsule(self):
     cmp = self.addRPMComponent("ghost:rpm=1.0", 'epoch-1.0-1.i386.rpm')
     repos = self.openRepository()
     orig = self.workDir + '/ghost.ccs'
     modified = self.workDir + '/ghost-new.ccs'
     repos.createChangeSetFile([ (cmp.getName(), (None, None),
                                  cmp.getNameVersionFlavor()[1:],
                                  True) ], orig)
     fc = filecontainer.FileContainer(
                             util.ExtendedFile(orig, buffering = False))
     newFc = filecontainer.FileContainer(
                     util.ExtendedFile(modified, "w", buffering = False))
     # CONARYCHANGESET
     (name, tag, contents) = fc.getNextFile()
     newFc.addFile(name, filecontents.FromFile(contents), tag,
                   precompressed = True)
     # the RPM
     (name, tag, contents) = fc.getNextFile()
     contents = filecontents.FromString("busted!")
     newFc.addFile(name, contents, tag)
     cs = changeset.ChangeSetFromFile(modified)
     # this throws away the output about the install failing
     self.assertRaises(files.Sha1Exception, self.captureOutput,
                       self.updatePkg, self.rootDir, cs)
Пример #12
0
    def _addPhantomContents(self, changeSet, trv, header):
        """Fabricate files for the given RPM header"""
        for (path, owner, group, mode, size, rdev, flags, vflags, linkto,
             mtime) in itertools.izip(
                 header[rpmhelper.OLDFILENAMES],
                 header[rpmhelper.FILEUSERNAME],
                 header[rpmhelper.FILEGROUPNAME],
                 header[rpmhelper.FILEMODES],
                 header[rpmhelper.FILESIZES],
                 header[rpmhelper.FILERDEVS],
                 header[rpmhelper.FILEFLAGS],
                 header[rpmhelper.FILEVERIFYFLAGS],
                 header[rpmhelper.FILELINKTOS],
                 header[rpmhelper.FILEMTIMES],
             ):
            fullPath = util.joinPaths(self.root, path)
            fakestat = FakeStat(mode,
                                0,
                                None,
                                1,
                                owner,
                                group,
                                size,
                                mtime,
                                mtime,
                                mtime,
                                st_rdev=rdev,
                                linkto=linkto)
            pathId = os.urandom(16)

            # Adapted from conary.build.source.addCapsule.doRPM
            kind = 'regular'
            if flags & rpmhelper.RPMFILE_GHOST:
                kind = 'initial'
            elif flags & (rpmhelper.RPMFILE_CONFIG
                          | rpmhelper.RPMFILE_MISSINGOK
                          | rpmhelper.RPMFILE_NOREPLACE):
                if size:
                    kind = 'config'
                else:
                    kind = 'initial'
            elif vflags:
                if (stat.S_ISREG(mode)
                        and not (vflags & rpmhelper.RPMVERIFY_FILEDIGEST)
                        or (stat.S_ISLNK(mode)
                            and not (vflags & rpmhelper.RPMVERIFY_LINKTO))):
                    kind = 'initial'
            # Ignore failures trying to sha1 missing/inaccessible files as long
            # as those files are flagged initial contents (ghost)
            fileStream = files.FileFromFilesystem(fullPath,
                                                  pathId,
                                                  statBuf=fakestat,
                                                  sha1FailOk=True)
            if kind == 'config':
                fileStream.flags.isConfig(set=True)
            elif kind == 'initial':
                fileStream.flags.isInitialContents(set=True)
            else:
                assert kind == 'regular'

            # From conary.build.capsulepolicy.Payload
            if (isinstance(fileStream, files.RegularFile)
                    and not fileStream.flags.isConfig()
                    and not (fileStream.flags.isInitialContents()
                             and not fileStream.contents.size())):
                fileStream.flags.isEncapsulatedContent(set=True)

            fileId = fileStream.fileId()
            trv.addFile(pathId, path, trv.getVersion(), fileId)
            changeSet.addFile(None, fileId, fileStream.freeze())
            # Config file contents have to go into the database, so snag the
            # contents from the filesystem and put them in the changeset.
            if (fileStream.hasContents
                    and not fileStream.flags.isEncapsulatedContent()):
                if fileStream.contents.sha1() == sha1helper.sha1Empty:
                    # Missing/ghost config file. Hopefully it is supposed to be
                    # empty, but even if not then the fake SHA-1 will be the
                    # SHA-1 of the empty string since there's no hint of what
                    # it was supposed to be.
                    contents = filecontents.FromString('')
                else:
                    contents = filecontents.FromFilesystem(fullPath)
                changeSet.addFileContents(
                    pathId,
                    fileId,
                    contType=changeset.ChangedFileTypes.file,
                    contents=contents,
                    cfgFile=fileStream.flags.isConfig(),
                )
Пример #13
0
 def _savePickle(self, pathId, data):
     pickled = cPickle.dumps(data, 2)
     self._cs.addFileContents(pathId, self._fileId,
                              changeset.ChangedFileTypes.file,
                              filecontents.FromString(pickled), False)
Пример #14
0
        cs1.addFileContents('0' * 16, '0' * 20, changeset.ChangedFileTypes.diff,
                            filecontents.FromString('first'), cfgFile = True)
        try:
            cs1.addFileContents('0' * 16, '0' * 20,
                                changeset.ChangedFileTypes.diff,
                                filecontents.FromString('second'),
                                cfgFile = True)
        except changeset.ChangeSetKeyConflictError, e:
            assert str(e) == 'ChangeSetKeyConflictError: 30303030303030303030303030303030,3030303030303030303030303030303030303030'
        else:
            assert(0)

        cs1 = changeset.ChangeSet()
        # this is blatantly illegal; diff non-config files!
        cs1.addFileContents('0' * 16, '0' * 20, changeset.ChangedFileTypes.diff,
                            filecontents.FromString('first'), cfgFile = False)
        try:
            cs1.addFileContents('0' * 16, '0' * 20,
                                changeset.ChangedFileTypes.diff,
                                filecontents.FromString('second'),
                                cfgFile = False)
        except changeset.ChangeSetKeyConflictError, e:
            assert str(e) == 'ChangeSetKeyConflictError: 30303030303030303030303030303030,3030303030303030303030303030303030303030'
        else:
            assert(0)

        # build a changeset, both with two config files with the same
        # pathid and fileid. One is a diff and the other is not.  This should
        # be ok the diff will be used.
        cs1 = changeset.ChangeSet()
        cs1.addFileContents('0' * 16, '0' * 20,
Пример #15
0
    def testOwnership(self):
        f = files.RegularFile(None)
        f.inode.perms.set(0604)
        f.inode.mtime.set(0100)
        f.inode.owner.set("daemon")
        f.inode.group.set("uucp")
        # to make sure that referenced names "exist"
        files.userCache.nameCache['daemon'] = 2
        files.userCache.nameCache['uucp'] = 10
        files.groupCache.nameCache['uucp'] = 14

        s = "hello world"
        contents = filecontents.FromString(s)
        f.contents = files.RegularFileStream()
        f.contents.size.set(len(s))
        f.contents.sha1.set(sha1helper.sha1String(s))

        f.flags.set(0)

        # and setuid root
        fr = files.RegularFile(None)
        fr.inode.perms.set(06755)
        fr.inode.mtime.set(0100)
        fr.inode.owner.set("root")
        fr.inode.group.set("root")
        fr.contents = files.RegularFileStream()
        fr.contents.size.set(len(s))
        fr.contents.sha1.set(sha1helper.sha1String(s))
        fr.flags.set(0)

        # and unwriteable
        fo = files.RegularFile(None)
        fo.inode.perms.set(0444)
        fo.inode.mtime.set(0100)
        fo.inode.owner.set("root")
        fo.inode.group.set("root")
        fo.contents = files.RegularFileStream()
        fo.contents.size.set(len(s))
        fo.contents.sha1.set(sha1helper.sha1String(s))
        fo.flags.set(0)

        # and secret
        fs = files.RegularFile(None)
        fs.inode.perms.set(0400)
        fs.inode.mtime.set(0100)
        fs.inode.owner.set("root")
        fs.inode.group.set("root")
        fs.contents = files.RegularFileStream()
        fs.contents.size.set(len(s))
        fs.contents.sha1.set(sha1helper.sha1String(s))
        fs.flags.set(0)

        f2 = f.copy()
        assert(f == f2)
        d = tempfile.mkdtemp()

        # before we mimic root, test a non-root of setu/gid file
        pr = d+"/setuid"
        fr.restore(contents, d, pr)
        assert not os.stat(pr).st_mode & 04000

        try:
            self.mimicRoot()
            p = d + "/file"
            f.restore(contents, d, p)
            assert self.compareChownLog([ (p, 2, 14) ])
            self.chownLog = []

            f.inode.owner.set("rootroot")
            self.logCheck(f.restore, (contents, d, p),
                          "warning: user rootroot does not exist - using root")
            assert self.compareChownLog([ (p, 0, 14) ])
            self.chownLog = []

            f.inode.owner.set("uucp")
            f.inode.group.set("grpgrp")
            self.logCheck(f.restore, (contents, d, p),
                          "warning: group grpgrp does not exist - using root")
            assert self.compareChownLog([ (p, 10, 0) ])

            self.chmodLog = []
            pr = d+"/setuid"
            fr.restore(contents, d, pr)
            assert self.compareChmodLog([ (pr, 06755) ])
            assert os.stat(pr).st_mode & 07777 == 06755

            self.chmodLog = []
            po = d+"/unwriteable"
            fo.restore(contents, d, po)
            assert self.compareChmodLog([ (po, 0444) ])
            assert os.stat(po).st_mode & 07777 == 0444

            self.chmodLog = []
            ps = d+"/secret"
            fs.restore(contents, d, ps)
            assert self.compareChmodLog([ (ps, 0400) ])
            assert os.stat(ps).st_mode & 07777 == 0400
            self.chmodLog = []
        finally:
            self.realRoot()
            shutil.rmtree(d)
Пример #16
0
    def createChangeSet(self, origTroveList, recurse = True,
                        withFiles = True, withFileContents = True,
                        excludeAutoSource = False,
                        mirrorMode = False, roleIds = None):
        """
        @param origTroveList: a list of
        C{(troveName, flavor, oldVersion, newVersion, absolute)} tuples.

        If C{oldVersion == None} and C{absolute == 0}, then the trove is
        assumed to be new for the purposes of the change set.

        If C{newVersion == None} then the trove is being removed.

        If recurse is set, this yields one result for the entire troveList.

        If recurse is not set, it yields one result per troveList entry.
        """
        cs = changeset.ChangeSet()
        externalTroveList = []
        externalFileList = []
        removedTroveList = []

        dupFilter = set()

        # make a copy to remove things from
        troveList = origTroveList[:]

        # def createChangeSet begins here

        troveWrapper = _TroveListWrapper(troveList, self.troveStore, withFiles,
                                         roleIds = roleIds)

        for (job, old, new, streams) in troveWrapper:
            (troveName, (oldVersion, oldFlavor),
                         (newVersion, newFlavor), absolute) = job

            # make sure we haven't already generated this changeset; since
            # troves can be included from other troves we could try
            # to generate quite a few duplicates
            if job in dupFilter:
                continue
            else:
                dupFilter.add(job)

            done = False
            if not newVersion:
                if oldVersion.getHost() not in self.serverNameList:
                    externalTroveList.append((troveName,
                                         (oldVersion, oldFlavor),
                                         (None, None), absolute))
                else:
                    # remove this trove and any trove contained in it
                    cs.oldTrove(troveName, oldVersion, oldFlavor)
                    for (name, version, flavor) in \
                                            old.iterTroveList(strongRefs=True):
                        troveWrapper.append((name, (version, flavor),
                                                   (None, None), absolute),
                                            False)
                done = True
            elif (newVersion.getHost() not in self.serverNameList
                or (oldVersion and
                    oldVersion.getHost() not in self.serverNameList)):
                # don't try to make changesets between repositories; the
                # client can do that itself

                # we don't generate chagnesets between removed and
                # present troves; that's up to the client
                externalTroveList.append((troveName, (oldVersion, oldFlavor),
                                     (newVersion, newFlavor), absolute))
                done = True
            elif (oldVersion and old.type() == trove.TROVE_TYPE_REMOVED):
                removedTroveList.append((troveName, (oldVersion, oldFlavor),
                                        (newVersion, newFlavor), absolute))
                done = True

            if done:
                if not recurse:
                    yield (cs, externalTroveList, externalFileList,
                           removedTroveList)

                    cs = changeset.ChangeSet()
                    externalTroveList = []
                    externalFileList = []
                    removedTroveList = []

                continue

            (troveChgSet, filesNeeded, pkgsNeeded) = \
                                new.diff(old, absolute = absolute)

            if recurse:
                for refJob in pkgsNeeded:
                    refOldVersion = refJob[1][0]
                    refNewVersion = refJob[2][0]
                    if (refNewVersion and
                           (refNewVersion.getHost() not in self.serverNameList)
                        or (refOldVersion and
                            refOldVersion.getHost() not in self.serverNameList)
                       ):
                        # don't try to make changesets between repositories; the
                        # client can do that itself
                        externalTroveList.append(refJob)
                    else:
                        troveWrapper.append(refJob, True)

            cs.newTrove(troveChgSet)

            if job in origTroveList and job[2][0] is not None:
                # add the primary w/ timestamps on the version
                try:
                    primary = troveChgSet.getNewNameVersionFlavor()
                    cs.addPrimaryTrove(*primary)
                except KeyError:
                    # primary troves could be in the externalTroveList, in
                    # which case they aren't primries
                    pass

            # sort the set of files we need into bins based on the server
            # name
            getList = []
            localFilesNeeded = []

            for (pathId, oldFileId, oldFileVersion, newFileId, newFileVersion) in filesNeeded:
                # if either the old or new file version is on a different
                # repository, creating this diff is someone else's problem
                if (newFileVersion.getHost() not in self.serverNameList
                    or (oldFileVersion and
                        oldFileVersion.getHost() not in self.serverNameList)):
                    externalFileList.append((pathId, troveName,
                         (oldVersion, oldFlavor, oldFileId, oldFileVersion),
                         (newVersion, newFlavor, newFileId, newFileVersion)))
                else:
                    localFilesNeeded.append((pathId, oldFileId, oldFileVersion,
                                             newFileId, newFileVersion))
                    if oldFileVersion:
                        getList.append((pathId, oldFileId, oldFileVersion))
                    getList.append((pathId, newFileId, newFileVersion))

            # Walk this in reverse order. This may seem odd, but the
            # order in the final changeset is set by sorting that happens
            # in the change set object itself. The only reason we sort
            # here at all is to make sure PTR file types come before the
            # file they refer to. Reverse shorting makes this a bit easier.
            localFilesNeeded.sort()
            localFilesNeeded.reverse()

            ptrTable = {}
            for (pathId, oldFileId, oldFileVersion, newFileId, \
                 newFileVersion) in localFilesNeeded:
                oldFile = None
                if oldFileVersion:
                    oldFile = files.ThawFile(streams[oldFileId], pathId)

                oldCont = None
                newCont = None

                newFile = files.ThawFile(streams[newFileId], pathId)

                # Skip identical fileids when mirroring, but always use
                # absolute file changes if there is any difference. See note
                # below.
                forceAbsolute = (mirrorMode and oldFileId
                        and oldFileId != newFileId)
                if forceAbsolute:
                    (filecs, contentsHash) = changeset.fileChangeSet(pathId,
                                                                     None,
                                                                     newFile)
                else:
                    (filecs, contentsHash) = changeset.fileChangeSet(pathId,
                                                                     oldFile,
                                                                     newFile)

                cs.addFile(oldFileId, newFileId, filecs)

                if (not withFileContents
                    or (excludeAutoSource and newFile.flags.isAutoSource())
                    or (newFile.flags.isEncapsulatedContent()
                        and not newFile.flags.isCapsuleOverride())):
                    continue

                # this test catches files which have changed from not
                # config files to config files; these need to be included
                # unconditionally so we always have the pristine contents
                # to include in the local database
                # Also include contents of config files when mirroring if the
                # fileid changed, even if the SHA-1 did not.
                # cf CNY-1570, CNY-1699, CNY-2210
                if (contentsHash
                        or (oldFile and newFile.flags.isConfig()
                            and not oldFile.flags.isConfig())
                        or (forceAbsolute and newFile.hasContents)
                        ):
                    if oldFileVersion and oldFile.hasContents:
                        oldCont = self.getFileContents(
                            [ (oldFileId, oldFileVersion, oldFile) ])[0]

                    newCont = self.getFileContents(
                            [ (newFileId, newFileVersion, newFile) ])[0]

                    (contType, cont) = changeset.fileContentsDiff(oldFile,
                                                oldCont, newFile, newCont,
                                                mirrorMode = mirrorMode)

                    # we don't let config files be ptr types; if they were
                    # they could be ptrs to things which aren't config files,
                    # which would completely hose the sort order we use. this
                    # could be relaxed someday to let them be ptr's to other
                    # config files
                    if not newFile.flags.isConfig() and \
                                contType == changeset.ChangedFileTypes.file:
                        contentsHash = newFile.contents.sha1()
                        ptr = ptrTable.get(contentsHash, None)
                        if ptr is not None:
                            contType = changeset.ChangedFileTypes.ptr
                            cont = filecontents.FromString(ptr)
                        else:
                            ptrTable[contentsHash] = pathId + newFileId

                    if not newFile.flags.isConfig() and \
                                contType == changeset.ChangedFileTypes.file:
                        cont = filecontents.CompressedFromDataStore(
                                              self.contentsStore,
                                              newFile.contents.sha1())
                        compressed = True
                    else:
                        compressed = False

                    # ptr entries are not compressed, whether or not they
                    # are config files. override the compressed rule from
                    # above
                    if contType == changeset.ChangedFileTypes.ptr:
                        compressed = False

                    cs.addFileContents(pathId, newFileId, contType, cont,
                                       newFile.flags.isConfig(),
                                       compressed = compressed)

            if not recurse:
                yield cs, externalTroveList, externalFileList, removedTroveList

                cs = changeset.ChangeSet()
                externalTroveList = []
                externalFileList = []
                removedTroveList = []

        if recurse:
            yield cs, externalTroveList, externalFileList, removedTroveList
Пример #17
0
    def __init__(self,
                 repos,
                 cs,
                 fileHostFilter=[],
                 callback=None,
                 resetTimestamps=False,
                 allowIncomplete=False,
                 hidden=False,
                 mirror=False,
                 excludeCapsuleContents=False):

        self.repos = repos
        self.cs = cs
        self.invalidateRollbacksFlag = False

        newList = [x for x in cs.iterNewTroveList()]

        if resetTimestamps:
            # This depends intimiately on the versions cache. We don't
            # change the timestamps on each version, because the cache
            # ensures they are all a single underlying object. Slick,
            # but brittle?
            updated = {}

            for csTrove in newList:
                ver = csTrove.getNewVersion()
                if ver in updated:
                    pass
                else:
                    oldVer = ver.copy()
                    ver.trailingRevision().resetTimeStamp()
                    updated[oldVer] = ver

            del updated

        troveNo, configRestoreList, normalRestoreList = \
            self._createInstallTroveObjects(fileHostFilter = fileHostFilter,
                                            callback = callback,
                                            mirror = mirror, hidden = hidden,
                                            allowIncomplete = allowIncomplete,
                                            excludeCapsuleContents =
                                                excludeCapsuleContents)
        configRestoreList, normalRestoreList = \
            self._filterRestoreList(configRestoreList, normalRestoreList)

        # use a key to select data up to, but not including, the first
        # version.  We can't sort on version because we don't have timestamps
        configRestoreList.sort(key=lambda x: x[0:5])
        normalRestoreList.sort(key=lambda x: x[0:3])

        # config files are cached, so we don't have to worry about not
        # restoring the same fileId/pathId twice
        for (pathId, newFileId, sha1, oldfile, newFileId, oldVersion,
             oldFileId, restoreContents) in configRestoreList:
            if cs.configFileIsDiff(pathId, newFileId):
                (contType,
                 fileContents) = cs.getFileContents(pathId, newFileId)

                # the content for this file is in the form of a
                # diff, which we need to apply against the file in
                # the repository
                assert (oldVersion)

                try:
                    f = self.repos.getFileContents([(oldFileId, oldVersion,
                                                     oldfile)])[0].get()
                except KeyError:
                    raise errors.IntegrityError(
                        "Missing file contents for pathId %s, fileId %s" %
                        (sha1helper.md5ToString(pathId),
                         sha1helper.sha1ToString(fileId)))

                oldLines = f.readlines()
                f.close()
                del f
                diff = fileContents.get().readlines()
                (newLines, failedHunks) = patch.patch(oldLines, diff)
                fileContents = filecontents.FromString("".join(newLines))

                assert (not failedHunks)
            else:
                # config files are not always available compressed (due
                # to the config file cache)
                fileContents = filecontents.FromChangeSet(
                    cs, pathId, newFileId)

            self.addFileContents(sha1, fileContents, restoreContents, 1)

        ptrRestores = []
        ptrRefsAdded = {}
        lastRestore = None  # restore each pathId,fileId combo once
        while normalRestoreList:
            (pathId, fileId, sha1, restoreContents) = normalRestoreList.pop(0)
            if (pathId, fileId) == lastRestore:
                continue

            lastRestore = (pathId, fileId)

            try:
                (contType, fileContents) = cs.getFileContents(pathId,
                                                              fileId,
                                                              compressed=True)
            except KeyError:
                raise errors.IntegrityError(
                    "Missing file contents for pathId %s, fileId %s" %
                    (sha1helper.md5ToString(pathId),
                     sha1helper.sha1ToString(fileId)))
            if contType == changeset.ChangedFileTypes.ptr:
                ptrRestores.append(sha1)
                target = util.decompressString(fileContents.get().read())

                if util.tupleListBsearchInsert(
                        normalRestoreList,
                    (target[:16], target[16:], sha1, True), self.ptrCmp):
                    # Item was inserted. This creates a reference in the
                    # datastore; keep track of it to prevent a duplicate
                    # reference count.
                    ptrRefsAdded[sha1] = True

                continue

            assert (contType == changeset.ChangedFileTypes.file)
            self.addFileContents(sha1,
                                 fileContents,
                                 restoreContents,
                                 0,
                                 precompressed=True)

        for sha1 in ptrRestores:
            # Increment the reference count for items which were ptr's
            # to a different file.
            if sha1 in ptrRefsAdded:
                del ptrRefsAdded[sha1]
            else:
                self.addFileContents(sha1, None, False, 0)

        #del configRestoreList
        #del normalRestoreList

        for csTrove in newList:
            if csTrove.troveType() != trove.TROVE_TYPE_REMOVED:
                continue

            troveNo += 1

            if callback:
                callback.creatingDatabaseTransaction(troveNo, len(newList))

            self.markTroveRemoved(csTrove.getName(), csTrove.getNewVersion(),
                                  csTrove.getNewFlavor())

        for (troveName, version, flavor) in cs.getOldTroveList():
            trv = self.repos.getTrove(troveName, version, flavor)
            self.oldTrove(trv, None, troveName, version, flavor)