예제 #1
0
파일: repos.py 프로젝트: pombredanne/mint
 def get(self, request, hostname, troveString, pathHash):
     repos, path, ver, fileObj = self._getFileInfo(hostname, troveString, 
                                                   pathHash)
     flags = fileObj.flags
     return models.TroveFile(hostname=hostname,
                     trove=troveString,
                     pathId=sha1helper.md5ToString(fileObj.pathId()), 
                     pathHash=pathHash, 
                     path=path, 
                     fileVersion=ver, 
                     fileId=sha1helper.sha1ToString(fileObj.fileId()), 
                     tags=','.join(fileObj.tags()),
                     isConfig=flags.isConfig(),
                     isInitialContents=flags.isInitialContents(),
                     isSource=flags.isSource(),
                     isAutoSource=flags.isAutoSource(),
                     isTransient=flags.isTransient(),
                     size=fileObj.contents.size(),
                     sha1=sha1helper.sha1ToString(fileObj.contents.sha1()),
                     permissions=fileObj.inode.permsString(),
                     mtime=fileObj.inode.mtime(),
                     owner=fileObj.inode.owner(),
                     group=fileObj.inode.group(),
                     provides=fileObj.provides(),
                     requires=fileObj.requires())
예제 #2
0
    def _storeFileFromContents(self, contents, sha1, restoreContents,
                               precompressed = False):
        if restoreContents:
            self.contentsStore.addFile(contents.get(),
                                       sha1helper.sha1ToString(sha1),
                                       precompressed = precompressed)
        else:
            # the file doesn't have any contents, so it must exist
            # in the data store already; we still need to increment
            # the reference count for it
            self.contentsStore.addFileReference(sha1helper.sha1ToString(sha1))

        return 1
예제 #3
0
파일: localrep.py 프로젝트: pombr/conary
    def incrementCount(self, hash, fileObj = None, precompressed = True):
        """
        Increments the count by one.  If it becomes one (the file is
        new), the contents of fileObj are stored into that path.
        """
        if len(hash) != 40:
            hash = sha1helper.sha1ToString(hash)
        cu = self.db.cursor()
        cu.execute("SELECT COUNT(*) FROM DataStore WHERE hash=?", hash)
        exists = cu.next()[0]

        if exists:
            cu.execute("UPDATE DataStore SET count=count+1 WHERE hash=?",
                       hash)
        else:
            if precompressed:
                # it's precompressed as a gzip stream, and we need a
                # zlib stream. just decompress it.
                gzObj = gzip.GzipFile(mode = "r", fileobj = fileObj)
                rawData = gzObj.read()
                del gzObj
            else:
                rawData = fileObj.read()

            data = zlib.compress(rawData)
            digest = digestlib.sha1()
            digest.update(rawData)
            if digest.hexdigest() != hash:
                raise errors.IntegrityError

            cu.execute("INSERT INTO DataStore VALUES(?, 1, ?)",
                       hash, data)
예제 #4
0
 def hashTrove(self, name, version, flavor, withFiles, withFileContents):
     # we add extra delimiters here because we can be sure they they
     # will result in a unique string for each n,v,f
     return sha1helper.sha1ToString(
         sha1helper.sha1String(
             '%s=%s[%s]%s%s' %
             (name, version, flavor, withFiles, withFileContents)))
예제 #5
0
    def testRPMSHA1SigTag(self):
        """make sure that SHA1HEADER/SIG_SHA1 is represented in troveinfo"""

        recipestr = """
class TestRPMSHA1(CapsuleRecipe):
    name = 'simple'
    version = '1.0'

    clearBuildReqs()

    def setup(r):
        r.addCapsule('simple-1.0-1.i386.rpm')
"""
        pkgName = "simple"
        rpmName = "simple-1.0-1.i386.rpm"

        r = self._cookPkgs(recipestr, rpmName, pkgName, "simple")
        trvCs = [x for x in r[2].iterNewTroveList() if x.getName() == "simple:rpm"][0]

        archivePath = resources.get_archive()

        trv = trove.Trove(trvCs)
        f = open(archivePath + "/" + rpmName, "r")
        h = rpmhelper.readHeader(f)

        sha1header = trv.troveInfo.capsule.rpm.sha1header()
        self.assertEqual(h.get(rpmhelper.SIG_SHA1), sha1helper.sha1ToString(sha1header))
예제 #6
0
def getFileSha1(cu, roleIds, fileId):
    fStream = _getFileStream(cu, roleIds, fileId)
    if not fStream or not hasattr(fStream, 'contents'):
        # Missing or no contents (not a regular file).
        return None, None

    return sha1ToString(fStream.contents.sha1()), fStream.flags.isConfig()
예제 #7
0
 def _getJobCachePath(self, applyList):
     applyStr = '\0'.join([
         '%s=%s[%s]--%s[%s]%s' %
         (x[0], x[1][0], x[1][1], x[2][0], x[2][1], x[3]) for x in applyList
     ])
     return self.jobPath + '/' + sha1helper.sha1ToString(
         sha1helper.sha1String(applyStr))
예제 #8
0
    def testRPMSHA1SigTag(self):
        '''make sure that SHA1HEADER/SIG_SHA1 is represented in troveinfo'''

        recipestr = """
class TestRPMSHA1(CapsuleRecipe):
    name = 'simple'
    version = '1.0'

    clearBuildReqs()

    def setup(r):
        r.addCapsule('simple-1.0-1.i386.rpm')
"""
        pkgName = 'simple'
        rpmName = 'simple-1.0-1.i386.rpm'

        r = self._cookPkgs(recipestr, rpmName, pkgName, 'simple')
        trvCs = [
            x for x in r[2].iterNewTroveList() if x.getName() == 'simple:rpm'
        ][0]

        archivePath = resources.get_archive()

        trv = trove.Trove(trvCs)
        f = open(archivePath + '/' + rpmName, "r")
        h = rpmhelper.readHeader(f)

        sha1header = trv.troveInfo.capsule.rpm.sha1header()
        self.assertEqual(h.get(rpmhelper.SIG_SHA1),
                         sha1helper.sha1ToString(sha1header))
예제 #9
0
파일: localrep.py 프로젝트: tensor5/conary
    def incrementCount(self, hash, fileObj=None, precompressed=True):
        """
        Increments the count by one.  If it becomes one (the file is
        new), the contents of fileObj are stored into that path.
        """
        if len(hash) != 40:
            hash = sha1helper.sha1ToString(hash)
        cu = self.db.cursor()
        cu.execute("SELECT COUNT(*) FROM DataStore WHERE hash=?", hash)
        exists = cu.next()[0]

        if exists:
            cu.execute("UPDATE DataStore SET count=count+1 WHERE hash=?", hash)
        else:
            if precompressed:
                # it's precompressed as a gzip stream, and we need a
                # zlib stream. just decompress it.
                gzObj = gzip.GzipFile(mode="r", fileobj=fileObj)
                rawData = gzObj.read()
                del gzObj
            else:
                rawData = fileObj.read()

            data = zlib.compress(rawData)
            digest = digestlib.sha1()
            digest.update(rawData)
            if digest.hexdigest() != hash:
                raise errors.IntegrityError

            cu.execute("INSERT INTO DataStore VALUES(?, 1, ?)", hash, data)
예제 #10
0
파일: repquery.py 프로젝트: pombreda/crest
def getFileInfo(cu, roleIds, fileId, mkUrl = None, path = None,
                noContent = False):
    f = _getFileStream(cu, roleIds, fileId)
    if f is None:
        return None

    args = { 'owner' : f.inode.owner(), 'group' : f.inode.group(),
             'mtime' : f.inode.mtime(), 'perms' : f.inode.perms(),
             'fileId' : fileId, 'mkUrl' : mkUrl }

    if f.lsTag == '-':
        fx = datamodel.RegularFile(size = int(f.contents.size()),
                                   sha1 = sha1ToString(f.contents.sha1()),
                                   path = path, withContentLink = not noContent,
                                   **args)
    elif f.lsTag == 'l':
        fx = datamodel.SymlinkFile(target = f.target(), **args)
    elif f.lsTag == 'd':
        fx = datamodel.Directory(**args)
    elif f.lsTag == 'b':
        fx = datamodel.BlockDeviceFile(major = f.devt.major(),
                                       minor = f.devt.minor(), **args)
    elif f.lsTag == 'c':
        fx = datamodel.CharacterDeviceFile(major = f.devt.major(),
                                           minor = f.devt.minor(), **args)
    elif f.lsTag == 's':
        fx = datamodel.Socket(**args)
    elif f.lsTag == 'p':
        fx = datamodel.NamedPipe(**args)
    else:
        # This really shouldn't happen
        raise NotImplementedError

    return fx
예제 #11
0
    def __init__(self, f, sha1=None, isSource=False, sigBlock=False):
        intro = f.read(16)
        (mag1, mag2, mag3, ver, reserved, entries, size) = \
            struct.unpack("!BBBBiii", intro)

        if mag1 != 0x8e or mag2 != 0xad or mag3 != 0xe8 or ver != 01:
            raise IOError, "bad magic for header"

        entryTable = f.read(entries * 16)

        self.isSource = isSource
        self.entries = {}
        self.data = f.read(size)
        assert len(self.data) == size

        if sha1 is not None:
            computedSha1 = sha1helper.sha1ToString(
                sha1helper.sha1String(intro + entryTable + self.data))
            if computedSha1 != sha1:
                raise IOError, "bad header sha1"

        for i in range(entries):
            (tag, dataType, offset,
             count) = struct.unpack("!iiii", entryTable[i * 16:i * 16 + 16])

            self.entries[tag] = (dataType, offset, count)

        if sigBlock:
            # We need to align to an 8-byte boundary.
            # So far we read the intro (which is 16 bytes) and the entry table
            # (which is a multiple of 16 bytes). So we only have to worry
            # about the actual header data not being aligned.
            alignment = size % 8
            if alignment:
                f.read(8 - alignment)
예제 #12
0
 def invalidateCachedChroot(self):
     """Destroy a cached chroot archive associated with this chroot."""
     if self.chrootFingerprint:
         self.logger.warning(
             "Removing cached chroot with fingerprint %s",
             sha1helper.sha1ToString(self.chrootFingerprint))
         self.chrootCache.remove(self.chrootFingerprint)
예제 #13
0
def getFileInfo(cu, roleIds, fileId, mkUrl = None, path = None,
                noContent = False):
    f = _getFileStream(cu, roleIds, fileId)
    if f is None:
        return None

    args = { 'owner' : f.inode.owner(), 'group' : f.inode.group(),
             'mtime' : f.inode.mtime(), 'perms' : f.inode.perms(),
             'fileId' : fileId, 'mkUrl' : mkUrl }

    if f.lsTag == '-':
        fx = datamodel.RegularFile(size = int(f.contents.size()),
                                   sha1 = sha1ToString(f.contents.sha1()),
                                   path = path, withContentLink = not noContent,
                                   **args)
    elif f.lsTag == 'l':
        fx = datamodel.SymlinkFile(target = f.target(), **args)
    elif f.lsTag == 'd':
        fx = datamodel.Directory(**args)
    elif f.lsTag == 'b':
        fx = datamodel.BlockDeviceFile(major = f.devt.major(),
                                       minor = f.devt.minor(), **args)
    elif f.lsTag == 'c':
        fx = datamodel.CharacterDeviceFile(major = f.devt.major(),
                                           minor = f.devt.minor(), **args)
    elif f.lsTag == 's':
        fx = datamodel.Socket(**args)
    elif f.lsTag == 'p':
        fx = datamodel.NamedPipe(**args)
    else:
        # This really shouldn't happen
        raise NotImplementedError

    return fx
예제 #14
0
    def __init__(self, f, sha1 = None, isSource = False, sigBlock = False):
        intro = f.read(16)
        (mag1, mag2, mag3, ver, reserved, entries, size) = \
            struct.unpack("!BBBBiii", intro)

        if mag1 != 0x8e or mag2 != 0xad or mag3 != 0xe8  or ver != 01:
            raise IOError, "bad magic for header"

        entryTable = f.read(entries * 16)

        self.isSource = isSource
        self.entries = {}
        self.data = f.read(size)
        assert len(self.data) == size

        if sha1 is not None:
            computedSha1 = sha1helper.sha1ToString(
                sha1helper.sha1String(intro + entryTable + self.data))
            if computedSha1 != sha1:
                raise IOError, "bad header sha1"

        for i in range(entries):
            (tag, dataType, offset, count) = struct.unpack("!iiii",
                                            entryTable[i * 16: i * 16 + 16])

            self.entries[tag] = (dataType, offset, count)

        if sigBlock:
            # We need to align to an 8-byte boundary.
            # So far we read the intro (which is 16 bytes) and the entry table
            # (which is a multiple of 16 bytes). So we only have to worry
            # about the actual header data not being aligned.
            alignment = size % 8
            if alignment:
                f.read(8 - alignment)
예제 #15
0
파일: repquery.py 프로젝트: pombreda/crest
def getFileSha1(cu, roleIds, fileId):
    fStream = _getFileStream(cu, roleIds, fileId)
    if not fStream or not hasattr(fStream, 'contents'):
        # Missing or no contents (not a regular file).
        return None, None

    return sha1ToString(fStream.contents.sha1()), fStream.flags.isConfig()
예제 #16
0
    def _storeFileFromContents(self,
                               contents,
                               sha1,
                               restoreContents,
                               precompressed=False):
        if restoreContents:
            self.contentsStore.addFile(contents.get(),
                                       sha1helper.sha1ToString(sha1),
                                       precompressed=precompressed)
        else:
            # the file doesn't have any contents, so it must exist
            # in the data store already; we still need to increment
            # the reference count for it
            self.contentsStore.addFileReference(sha1helper.sha1ToString(sha1))

        return 1
예제 #17
0
 def markTroveRemoved(self, name, version, flavor):
     sha1s = self.troveStore.markTroveRemoved(name, version, flavor)
     for sha1 in sha1s:
         try:
             self.contentsStore.removeFile(sha1helper.sha1ToString(sha1))
         except OSError, e:
             if e.errno != errno.ENOENT:
                 raise
예제 #18
0
파일: errors.py 프로젝트: pombr/conary
 def __init__(self, fileId):
     self.fileId = fileId
     RepositoryError.__init__(self, '''File Stream Missing
 The following file stream was not found on the server:
 fileId: %s
 This could be due to an incomplete mirror, insufficient permissions,
 or the troves using this filestream having been removed from the server.'''
 % sha1helper.sha1ToString(fileId))
예제 #19
0
파일: localrep.py 프로젝트: tensor5/conary
 def openFile(self, hash, mode="r"):
     if len(hash) != 40:
         hash = sha1helper.sha1ToString(hash)
     cu = self.db.cursor()
     cu.execute("SELECT data FROM DataStore WHERE hash=?", hash)
     data = cu.next()[0]
     data = zlib.decompress(data)
     return StringIO(data)
예제 #20
0
파일: localrep.py 프로젝트: pombr/conary
 def openFile(self, hash, mode = "r"):
     if len(hash) != 40:
         hash = sha1helper.sha1ToString(hash)
     cu = self.db.cursor()
     cu.execute("SELECT data FROM DataStore WHERE hash=?", hash)
     data = cu.next()[0]
     data = zlib.decompress(data)
     return StringIO(data)
예제 #21
0
    def hashToPath(self, hash):
        # proxy code passes in hex digests with version suffixes, so just pass
        # that through.
        if len(hash) < 40:
            hash = sha1helper.sha1ToString(hash)
        if (len(hash) < 5):
            raise KeyError, ("invalid hash %s" % hash)

        return os.sep.join((self.top, hash[0:2], hash[2:]))
예제 #22
0
파일: errors.py 프로젝트: tensor5/conary
 def __init__(self, fileId):
     self.fileId = fileId
     RepositoryError.__init__(
         self, '''File Stream Missing
 The following file stream was not found on the server:
 fileId: %s
 This could be due to an incomplete mirror, insufficient permissions,
 or the troves using this filestream having been removed from the server.'''
         % sha1helper.sha1ToString(fileId))
예제 #23
0
 def hashGroupDeps(self, groupTroves, depClass, dependency):
     depSet = deps.DependencySet()
     depSet.addDep(depClass, dependency)
     frz = depSet.freeze()
     troveList = sorted(self.hashTrove(withFiles=False,
                                       withFileContents=False,
                                       *x.getNameVersionFlavor())
                        for x in groupTroves)
     str = '[1]%s%s%s' % (len(frz), frz, ''.join(troveList))
     return sha1helper.sha1ToString(sha1helper.sha1String(str))
예제 #24
0
 def hashGroupDeps(self, groupTroves, depClass, dependency):
     depSet = deps.DependencySet()
     depSet.addDep(depClass, dependency)
     frz = depSet.freeze()
     troveList = sorted(
         self.hashTrove(withFiles=False,
                        withFileContents=False,
                        *x.getNameVersionFlavor()) for x in groupTroves)
     str = '[1]%s%s%s' % (len(frz), frz, ''.join(troveList))
     return sha1helper.sha1ToString(sha1helper.sha1String(str))
예제 #25
0
    def hashToPath(self, hash):
        # New consumers should pass a binary hash, but for backwards
        # compatibility (with rmake) continue to accept hashes that are already
        # encoded. Proxy code also passes in hashes with suffixes on them,
        # which should probably be normalized further.
        if len(hash) < 40:
            hash = sha1helper.sha1ToString(hash)
        if (len(hash) < 5):
            raise KeyError, ("invalid hash %s" % hash)

        return os.sep.join((self.top, hash[0:2], hash[2:4], hash[4:]))
예제 #26
0
파일: lookaside.py 프로젝트: tensor5/conary
    def cacheFilePath(self, cachePrefix, url):
        cachePath = self.getCachePath(cachePrefix, url)
        util.mkdirChain(os.path.dirname(cachePath))

        if url.filePath() in self.cacheMap:
            # don't check sha1 twice
            return self.cacheMap[url.filePath()]
        (troveName, troveVersion, pathId, troveFile, fileId, troveFileVersion,
         sha1, mode) = self.nameMap[url.filePath()]
        sha1Cached = None
        cachedMode = None
        if os.path.exists(cachePath):
            sha1Cached = sha1helper.sha1FileBin(cachePath)
        if sha1Cached != sha1:
            if sha1Cached:
                log.info('%s sha1 %s != %s; fetching new...', url.filePath(),
                         sha1helper.sha1ToString(sha1),
                         sha1helper.sha1ToString(sha1Cached))
            else:
                log.info('%s not yet cached, fetching...', url.filePath())

            if self.quiet:
                csCallback = None
            else:
                csCallback = ChangesetCallback()

            f = self.repos.getFileContents([(fileId, troveFileVersion)],
                                           callback=csCallback)[0].get()
            outF = util.AtomicFile(cachePath, chmod=0644)
            util.copyfileobj(f, outF)
            outF.commit()
            fileObj = self.repos.getFileVersion(pathId, fileId,
                                                troveFileVersion)
            fileObj.chmod(cachePath)

        cachedMode = os.stat(cachePath).st_mode & 0777
        if mode != cachedMode:
            os.chmod(cachePath, mode)
        self.cacheMap[url.filePath()] = cachePath
        return cachePath
예제 #27
0
파일: lookaside.py 프로젝트: pombr/conary
    def cacheFilePath(self, cachePrefix, url):
        cachePath = self.getCachePath(cachePrefix, url)
        util.mkdirChain(os.path.dirname(cachePath))

        if url.filePath() in self.cacheMap:
            # don't check sha1 twice
            return self.cacheMap[url.filePath()]
        (troveName, troveVersion, pathId, troveFile, fileId,
         troveFileVersion, sha1, mode) = self.nameMap[url.filePath()]
        sha1Cached = None
        cachedMode = None
        if os.path.exists(cachePath):
            sha1Cached = sha1helper.sha1FileBin(cachePath)
        if sha1Cached != sha1:
            if sha1Cached:
                log.info('%s sha1 %s != %s; fetching new...', url.filePath(),
                          sha1helper.sha1ToString(sha1),
                          sha1helper.sha1ToString(sha1Cached))
            else:
                log.info('%s not yet cached, fetching...', url.filePath())

            if self.quiet:
                csCallback = None
            else:
                csCallback = ChangesetCallback()

            f = self.repos.getFileContents(
                [(fileId, troveFileVersion)], callback=csCallback)[0].get()
            outF = util.AtomicFile(cachePath, chmod=0644)
            util.copyfileobj(f, outF)
            outF.commit()
            fileObj = self.repos.getFileVersion(
                pathId, fileId, troveFileVersion)
            fileObj.chmod(cachePath)

        cachedMode = os.stat(cachePath).st_mode & 0777
        if mode != cachedMode:
            os.chmod(cachePath, mode)
        self.cacheMap[url.filePath()] = cachePath
        return cachePath
예제 #28
0
 def prune(self):
     if not self.sizeLimit:
         return
     cached = self.listCached()
     cached.sort(key=lambda x: x.atime)
     total = sum(x.size for x in cached)
     for item in cached:
         if total < self.sizeLimit:
             break
         self.logger.info("Deleting cached chroot %s to meet size limit",
                 sha1ToString(item.fingerprint))
         self.remove(item.fingerprint)
         total -= item.size
예제 #29
0
 def prune(self):
     if not self.sizeLimit:
         return
     cached = self.listCached()
     cached.sort(key=lambda x: x.atime)
     total = sum(x.size for x in cached)
     for item in cached:
         if total < self.sizeLimit:
             break
         self.logger.info("Deleting cached chroot %s to meet size limit",
                          sha1ToString(item.fingerprint))
         self.remove(item.fingerprint)
         total -= item.size
예제 #30
0
    def _restoreNormal(self, cs, normalRestoreList, preRestored):
        ptrRestores = []
        ptrRefsAdded = {}
        lastRestore = None  # restore each pathId,fileId combo once
        while normalRestoreList:
            (pathId, fileId, sha1, restoreContents) = normalRestoreList.pop(0)
            if preRestored is not None and sha1 in preRestored:
                continue
            if (pathId, fileId) == lastRestore:
                continue

            lastRestore = (pathId, fileId)

            try:
                (contType, fileContents) = cs.getFileContents(pathId,
                                                              fileId,
                                                              compressed=True)
            except KeyError:
                raise errors.IntegrityError(
                    "Missing file contents for pathId %s, fileId %s" %
                    (sha1helper.md5ToString(pathId),
                     sha1helper.sha1ToString(fileId)))
            if contType == changeset.ChangedFileTypes.ptr:
                ptrRestores.append(sha1)
                target = util.decompressString(fileContents.get().read())

                if util.tupleListBsearchInsert(
                        normalRestoreList,
                    (target[:16], target[16:], sha1, True), self.ptrCmp):
                    # Item was inserted. This creates a reference in the
                    # datastore; keep track of it to prevent a duplicate
                    # reference count.
                    ptrRefsAdded[sha1] = True

                continue

            assert (contType == changeset.ChangedFileTypes.file)
            self.addFileContents(sha1,
                                 fileContents,
                                 restoreContents,
                                 0,
                                 precompressed=True)

        for sha1 in ptrRestores:
            # Increment the reference count for items which were ptr's
            # to a different file.
            if sha1 in ptrRefsAdded:
                del ptrRefsAdded[sha1]
            else:
                self.addFileContents(sha1, None, False, 0)
예제 #31
0
파일: localrep.py 프로젝트: tensor5/conary
 def decrementCount(self, hash):
     """
     Decrements the count by one; it it becomes 1, the count file
     is removed. If it becomes zero, the contents are removed.
     """
     if len(hash) != 40:
         hash = sha1helper.sha1ToString(hash)
     cu = self.db.cursor()
     cu.execute("SELECT count FROM DataStore WHERE hash=?", hash)
     count = cu.next()[0]
     if count == 1:
         cu.execute("DELETE FROM DataStore WHERE hash=?", hash)
     else:
         count -= 1
         cu.execute("UPDATE DataStore SET count=? WHERE hash=?", count,
                    hash)
예제 #32
0
파일: localrep.py 프로젝트: pombr/conary
 def decrementCount(self, hash):
     """
     Decrements the count by one; it it becomes 1, the count file
     is removed. If it becomes zero, the contents are removed.
     """
     if len(hash) != 40:
         hash = sha1helper.sha1ToString(hash)
     cu = self.db.cursor()
     cu.execute("SELECT count FROM DataStore WHERE hash=?", hash)
     count = cu.next()[0]
     if count == 1:
         cu.execute("DELETE FROM DataStore WHERE hash=?", hash)
     else:
         count -= 1
         cu.execute("UPDATE DataStore SET count=? WHERE hash=?",
                    count, hash)
예제 #33
0
파일: filestest.py 프로젝트: pombr/conary
 def testFileId(self):
     # this test verifies that the value produced as the fileId
     # of a known stream matches its pre-calculated value.
     f = files.RegularFile(None)
     f.inode.perms.set(0604)
     f.inode.mtime.set(0100)
     f.inode.owner.set("daemon")
     f.inode.group.set("uucp")
     s = "hello world"
     contents = filecontents.FromString(s)
     f.contents = files.RegularFileStream()
     f.contents.size.set(len(s))
     f.contents.sha1.set(sha1helper.sha1String(s))
     f.flags.set(0)
     expectedId = '567355867fbbcb2be55d35c3d229a7df8152fdbc'
     self.assertEqual(f.freeze(), '-\x01\x00"\x01\x00\x08\x00\x00\x00\x00\x00\x00\x00\x0b\x02\x00\x14*\xael5\xc9O\xcf\xb4\x15\xdb\xe9_@\x8b\x9c\xe9\x1e\xe8F\xed\x03\x00\x04\x00\x00\x00\x00\x05\x00\x1c\x01\x00\x02\x01\x84\x02\x00\x04\x00\x00\x00@\x03\x00\x06daemon\x04\x00\x04uucp')
     self.assertEqual(sha1helper.sha1ToString(f.fileId()), expectedId)
예제 #34
0
    def createImageBuild(self, image, buildData=None, for_user=None):
        outputToken = sha1helper.sha1ToString(file('/dev/urandom').read(20))
        if buildData is None:
            buildData = []
        buildData.append(('outputToken', outputToken, datatypes.RDT_STRING))

        image.time_created = image.time_updated = time.time()
        if self.user is not None:
            image.created_by_id = self.user.user_id
        image.image_count = 0
        if image.project_branch_stage_id:
            image.stage_name = image.project_branch_stage.name

        if (image.trove_version and image.project_branch_stage_id is None
                and image.project_branch_id is None):
            # Try to determine the PBS from the trove version
            troveLabel = versions.ThawVersion(image.trove_version).trailingLabel()
            pbId, stage = self.restDb.productMgr.getProductVersionForLabel(
                image.project.repository_hostname, troveLabel)
            pbs = self.mgr.getStageByProjectBranchAndStageName(pbId, stage)
            if pbs:
                image.project_branch_stage_id = pbs.stage_id

        if image.trove_version is None:
            image.trove_version = '/%s/0.1:1-1-1' % versions.CookLabel()

        if not image.trove_flavor and image.architecture:
            flavor = deps.parseFlavor(str('is: ' + image.architecture))
            image.trove_flavor = flavor.freeze()

        # Fill in the redundant information starting with the most
        # specific part
        if image.project_branch_stage_id:
            image.project_branch_id = image.project_branch_stage.project_branch_id
            image.project_id = image.project_branch_stage.project_id
        elif image.project_branch_id:
            image.project_id = image.project_branch.project_id

        image.save()

        for bdName, bdValue, bdType in buildData:
            self._setImageDataValue(image.image_id, bdName, bdValue, dataType=bdType)

        self.mgr.addToMyQuerySet(image, for_user)
        self.mgr.retagQuerySetsByType('image', for_user)
        return image
예제 #35
0
    def splitFile(self, dir):
        while self.tarfh.tell() < self.tarEnd:
            size, chunk = self._getChunk()
            chunkfh = open(os.path.join(dir, self._formatFileName()), 'w')
            chunkfh.write(chunk)
            chunkfh.close()

            fileName = self._formatFileName()
            sha1sum = sha1ToString(sha1String(chunk))

            self.files.append(fileName)

            # Add both lines to the tblist for backwards compatibility with
            # older versions of Anaconda.
            self.tblist.append('%s %s %s' % (fileName, size, 1))
            self.tblist.append('%s %s %s %s' % (fileName, size, 1, sha1sum))

            self.count += 1
예제 #36
0
    def testTbList(self):
        ts, baseDir = self.mktree()

        for i, fn in enumerate(ts.files):
            tbIndex = i*2
            oldEntry = ts.tblist[tbIndex].split()
            newEntry = ts.tblist[tbIndex+1].split()

            self.failUnlessEqual(len(oldEntry), 3)
            self.failUnlessEqual(len(newEntry), 4)
            self.failUnlessEqual(oldEntry[0], fn)
            self.failUnlessEqual(oldEntry[0], newEntry[0])
            self.failUnlessEqual(oldEntry[1], newEntry[1])
            self.failUnlessEqual(oldEntry[2], newEntry[2])

            binSha1 = sha1helper.sha1FileBin(os.path.join(baseDir, fn))
            sha1 = sha1helper.sha1ToString(binSha1)

            self.failUnlessEqual(newEntry[3], sha1)
예제 #37
0
파일: state.py 프로젝트: tensor5/conary
    def _write(self, f):
        """
        Returns a string representing file information for this trove
        trove, which can later be read by the read() method. This is
        only used to create the Conary control file when dealing with
        :source component checkins, so things like trove dependency
        information is not needed.  The format of the string is:

        name <name>
        version <version>
        branch <branch>
        (lastmerged <version>)?
        (factory <name>)?
        <file count>
        PATHID1 PATH1 FILEID1 ISCONFIG1 REFRESH1 VERSION1
        PATHID2 PATH2 FILEID2 ISCONFIG2 REFRESH2 VERSION2
        .
        .
        .
        PATHIDn PATHn FILEIDn ISCONFIGn REFRESHn VERSIONn
        """
        assert (len(self.strongTroves) == 0)
        assert (len(self.weakTroves) == 0)

        f.write("name %s\n" % self.getName())
        f.write("version %s\n" % self.getVersion().freeze())
        f.write("branch %s\n" % self.getBranch().freeze())
        if self.getLastMerged() is not None:
            f.write("lastmerged %s\n" % self.getLastMerged().freeze())
        if self.getFactory():
            f.write("factory %s\n" % self.getFactory())

        rc = []
        rc.append("%d\n" % (len(list(self.iterFileList()))))

        rc += [
            "%s %s %s %s %s\n" %
            (sha1helper.md5ToString(x[0]), x[1], sha1helper.sha1ToString(
                x[2]), self.fileInfo[x[0]], x[3].asString())
            for x in sorted(self.iterFileList())
        ]

        f.write("".join(rc))
예제 #38
0
파일: repos.py 프로젝트: pombredanne/mint
 def get(self, request, hostname, troveString):
     repos = self.getRepos()
     name, version, flavor = self._getTuple(troveString)
     trv = repos.getTrove(name, version, flavor, withFiles=True)
     fileList = []
     for pathId, path, fileId, fileVersion in trv.iterFileList():
         pathHash = sha1helper.md5String(path)
         fileList.append(models.TroveFile(
                     hostname=hostname,
                     pathId=sha1helper.md5ToString(pathId), 
                     pathHash=sha1helper.md5ToString(pathHash), 
                     path=path, 
                     fileId=sha1helper.sha1ToString(fileId), 
                     trove=troveString,
                     fileVersion=fileVersion))
     troveModel = models.Trove(hostname=hostname, 
                               name=name, version=version, flavor=flavor,
                               files=fileList)
     return troveModel
예제 #39
0
 def store(self, chrootFingerprint, root):
     path = self._fingerPrintToPath(chrootFingerprint)
     prefix = sha1ToString(chrootFingerprint) + '.'
     util.mkdirChain(self.cacheDir)
     lock = locking.LockFile(path + '.lock')
     if not lock.acquire(wait=False):
         # Busy, just do nothing
         return
     fd, fn = tempfile.mkstemp(self.suffix, prefix, self.cacheDir)
     os.close(fd)
     try:
         subprocess.call('tar -cC %s . | %s > %s' % (root, self.compress,
             fn), shell=True)
         os.rename(fn, path)
     finally:
         util.removeIfExists(fn)
         lock.release()
     ChrootManifest.store(root, path)
     self.prune()
예제 #40
0
    def _write(self, f):
        """
        Returns a string representing file information for this trove
        trove, which can later be read by the read() method. This is
        only used to create the Conary control file when dealing with
        :source component checkins, so things like trove dependency
        information is not needed.  The format of the string is:

        name <name>
        version <version>
        branch <branch>
        (lastmerged <version>)?
        (factory <name>)?
        <file count>
        PATHID1 PATH1 FILEID1 ISCONFIG1 REFRESH1 VERSION1
        PATHID2 PATH2 FILEID2 ISCONFIG2 REFRESH2 VERSION2
        .
        .
        .
        PATHIDn PATHn FILEIDn ISCONFIGn REFRESHn VERSIONn
        """
        assert(len(self.strongTroves) == 0)
        assert(len(self.weakTroves) == 0)

        f.write("name %s\n" % self.getName())
        f.write("version %s\n" % self.getVersion().freeze())
        f.write("branch %s\n" % self.getBranch().freeze())
        if self.getLastMerged() is not None:
            f.write("lastmerged %s\n" % self.getLastMerged().freeze())
        if self.getFactory():
            f.write("factory %s\n" % self.getFactory())

        rc = []
        rc.append("%d\n" % (len(list(self.iterFileList()))))

        rc += [ "%s %s %s %s %s\n" % (sha1helper.md5ToString(x[0]),
                                x[1],
                                sha1helper.sha1ToString(x[2]),
                                self.fileInfo[x[0]],
                                x[3].asString())
                for x in sorted(self.iterFileList()) ]

        f.write("".join(rc))
예제 #41
0
파일: filestest.py 프로젝트: sweptr/conary
 def testFileId(self):
     # this test verifies that the value produced as the fileId
     # of a known stream matches its pre-calculated value.
     f = files.RegularFile(None)
     f.inode.perms.set(0604)
     f.inode.mtime.set(0100)
     f.inode.owner.set("daemon")
     f.inode.group.set("uucp")
     # to make sure that referenced names "exist"
     files.userCache.nameCache['daemon'] = 2
     files.groupCache.nameCache['uucp'] = 14
     s = "hello world"
     contents = filecontents.FromString(s)
     f.contents = files.RegularFileStream()
     f.contents.size.set(len(s))
     f.contents.sha1.set(sha1helper.sha1String(s))
     f.flags.set(0)
     expectedId = '567355867fbbcb2be55d35c3d229a7df8152fdbc'
     self.assertEqual(f.freeze(), '-\x01\x00"\x01\x00\x08\x00\x00\x00\x00\x00\x00\x00\x0b\x02\x00\x14*\xael5\xc9O\xcf\xb4\x15\xdb\xe9_@\x8b\x9c\xe9\x1e\xe8F\xed\x03\x00\x04\x00\x00\x00\x00\x05\x00\x1c\x01\x00\x02\x01\x84\x02\x00\x04\x00\x00\x00@\x03\x00\x06daemon\x04\x00\x04uucp')
     self.assertEqual(sha1helper.sha1ToString(f.fileId()), expectedId)
예제 #42
0
 def store(self, chrootFingerprint, root):
     path = self._fingerPrintToPath(chrootFingerprint)
     prefix = sha1ToString(chrootFingerprint) + '.'
     util.mkdirChain(self.cacheDir)
     lock = locking.LockFile(path + '.lock')
     if not lock.acquire(wait=False):
         # Busy, just do nothing
         return
     fd, fn = tempfile.mkstemp(self.suffix, prefix, self.cacheDir)
     os.close(fd)
     try:
         subprocess.call('tar -cC %s . | %s > %s' %
                         (root, self.compress, fn),
                         shell=True)
         os.rename(fn, path)
     finally:
         util.removeIfExists(fn)
         lock.release()
     ChrootManifest.store(root, path)
     self.prune()
예제 #43
0
    def _restoreConfig(self, cs, configRestoreList):
        # config files are cached, so we don't have to worry about not
        # restoring the same fileId/pathId twice
        for (pathId, newFileId, sha1, oldfile, newFileId, oldVersion,
             oldFileId, restoreContents) in configRestoreList:
            if cs.configFileIsDiff(pathId, newFileId):
                (contType,
                 fileContents) = cs.getFileContents(pathId, newFileId)

                # the content for this file is in the form of a
                # diff, which we need to apply against the file in
                # the repository
                assert (oldVersion)

                try:
                    f = self.repos.getFileContents([(oldFileId, oldVersion,
                                                     oldfile)])[0].get()
                except KeyError:
                    raise errors.IntegrityError(
                        "Missing file contents for pathId %s, fileId %s" %
                        (sha1helper.md5ToString(pathId),
                         sha1helper.sha1ToString(oldFileId)))

                oldLines = f.readlines()
                f.close()
                del f
                diff = fileContents.get().readlines()
                (newLines, failedHunks) = patch.patch(oldLines, diff)
                fileContents = filecontents.FromString("".join(newLines))

                assert (not failedHunks)
            else:
                # config files are not always available compressed (due
                # to the config file cache)
                fileContents = filecontents.FromChangeSet(
                    cs, pathId, newFileId)

            self.addFileContents(sha1, fileContents, restoreContents, 1)
예제 #44
0
 def get(self):
     return self.store.openRawFile(sha1helper.sha1ToString(self.sha1))
예제 #45
0
 def hashTrove(self, trove):
     return sha1helper.sha1ToString(
         sha1helper.sha1String('%s %s=%s[%s]' %
                               (trove.jobId, trove.getName(),
                                trove.getVersion(), trove.getFlavor())))
예제 #46
0
 def _fingerPrintToPath(self, chrootFingerprint):
     basename = sha1ToString(chrootFingerprint) + self.suffix
     return os.path.join(self.cacheDir, basename)
예제 #47
0
 def hashTroveInfo(self, jobId, name, version, flavor):
     return sha1helper.sha1ToString(
         sha1helper.sha1String('%s %s=%s[%s]' %
                               (jobId, name, version, flavor)))
예제 #48
0
 def hashFile(self, fileId, fileVersion):
     # we add extra delimiters here because we can be sure they they
     # will result in a unique string for each n,v,f
     return sha1helper.sha1ToString(
         sha1helper.sha1String('[0]%s=%s' % (fileId, fileVersion)))
예제 #49
0
 def checkSha1(self, fileName, sum):
     assert(sha1helper.sha1ToString(sha1helper.sha1FileBin(fileName)) == sum)
예제 #50
0
 def invalidateCachedChroot(self):
     """Destroy a cached chroot archive associated with this chroot."""
     if self.chrootFingerprint:
         self.logger.warning("Removing cached chroot with fingerprint %s",
                 sha1helper.sha1ToString(self.chrootFingerprint))
         self.chrootCache.remove(self.chrootFingerprint)
예제 #51
0
    def install(self):
        self.cfg.root = self.root
        self._lock(self.root, fcntl.LOCK_SH)
        if self.oldRoot:
            if self.serverCfg.reuseChroots:
                self._moveOldRoot(self.oldRoot, self.root)
        if not self.jobList and not self.crossJobList:
            # should only be true in debugging situations
            return

        manifest, done = self._restoreFromCache()
        self._breakLinks()
        if done:
            return

        def _install(jobList):
            self.cfg.flavor = []
            openpgpkey.getKeyCache().setPublicPath(
                                     self.cfg.root + '/root/.gnupg/pubring.gpg')
            openpgpkey.getKeyCache().setPrivatePath(
                                self.cfg.root + '/root/.gnupg/secring.gpg')
            self.cfg.pubRing = [self.cfg.root + '/root/.gnupg/pubring.gpg']
            client = conaryclient.ConaryClient(self.cfg)
            client.setUpdateCallback(self.callback)
            if self.csCache:
                changeSetList = self.csCache.getChangeSets(client.getRepos(),
                                                           jobList,
                                                           callback=self.callback)
            else:
                changeSetList = []

            updJob = client.newUpdateJob()
            try:
                client.prepareUpdateJob(updJob,
                    jobList, keepExisting=False, resolveDeps=False,
                    recurse=False, checkPathConflicts=False,
                    fromChangesets=changeSetList,
                    migrate=True)
            except conaryclient.update.NoNewTrovesError:
                # since we're migrating, this simply means there were no
                # operations to be performed
                pass
            else:
                util.mkdirChain(self.cfg.root + '/root')
                client.applyUpdate(updJob, replaceFiles=True,
                                   tagScript=self.cfg.root + '/root/tagscripts')

        self._installRPM()
        self._touchShadow()
        util.settempdir(self.cfg.root + self.cfg.tmpDir)

        if self.bootstrapJobList:
            self.logger.info("Installing initial chroot bootstrap requirements")
            oldRoot = self.cfg.dbPath
            try:
                # Bootstrap troves are installed outside the system DB,
                # although it doesn't matter as much in trove builds as it does
                # in image builds.
                self.cfg.dbPath += '.bootstrap'
                _install(self.bootstrapJobList)
            finally:
                self.cfg.dbPath = oldRoot

        if self.jobList:
            self.logger.info("Installing chroot requirements")
            _install(self.jobList)

        if self.crossJobList:
            self.logger.info("Installing chroot cross-compile requirements")
            oldRoot = self.cfg.root
            try:
                self.cfg.root += self.sysroot
                _install(self.crossJobList)
            finally:
                self.cfg.root = oldRoot

        util.settempdir(self.cfg.tmpDir)
        self._uninstallRPM()

        # directories must be traversable and files readable (RMK-1006)
        for root, dirs, files in os.walk(self.cfg.root, topdown=True):
            for directory in dirs:
                _addModeBits(os.sep.join((root, directory)), 05)
            for filename in files:
                _addModeBits(os.sep.join((root, filename)), 04)

        if manifest:
            manifest.write(self.cfg.root)
        if self.chrootFingerprint:
            strFingerprint = sha1helper.sha1ToString(self.chrootFingerprint)
            self.logger.info('caching chroot with fingerprint %s',
                    strFingerprint)
            self.chrootCache.store(self.chrootFingerprint, self.cfg.root)
            self.logger.info('caching chroot %s done',
                    strFingerprint)
            self._breakLinks()
예제 #52
0
    def install(self):
        self.cfg.root = self.root
        self._lock(self.root, fcntl.LOCK_SH)
        if self.oldRoot:
            if self.serverCfg.reuseChroots:
                self._moveOldRoot(self.oldRoot, self.root)
        if not self.jobList and not self.crossJobList:
            # should only be true in debugging situations
            return

        manifest, done = self._restoreFromCache()
        self._breakLinks()
        if done:
            return

        def _install(jobList):
            self.cfg.flavor = []
            openpgpkey.getKeyCache().setPublicPath(self.cfg.root +
                                                   '/root/.gnupg/pubring.gpg')
            openpgpkey.getKeyCache().setPrivatePath(self.cfg.root +
                                                    '/root/.gnupg/secring.gpg')
            self.cfg.pubRing = [self.cfg.root + '/root/.gnupg/pubring.gpg']
            client = conaryclient.ConaryClient(self.cfg)
            client.setUpdateCallback(self.callback)
            if self.csCache:
                changeSetList = self.csCache.getChangeSets(
                    client.getRepos(), jobList, callback=self.callback)
            else:
                changeSetList = []

            updJob = client.newUpdateJob()
            try:
                client.prepareUpdateJob(updJob,
                                        jobList,
                                        keepExisting=False,
                                        resolveDeps=False,
                                        recurse=False,
                                        checkPathConflicts=False,
                                        fromChangesets=changeSetList,
                                        migrate=True)
            except conaryclient.update.NoNewTrovesError:
                # since we're migrating, this simply means there were no
                # operations to be performed
                pass
            else:
                util.mkdirChain(self.cfg.root + '/root')
                client.applyUpdate(updJob,
                                   replaceFiles=True,
                                   tagScript=self.cfg.root +
                                   '/root/tagscripts')

        self._installRPM()
        self._touchShadow()
        util.settempdir(self.cfg.root + self.cfg.tmpDir)

        if self.bootstrapJobList:
            self.logger.info(
                "Installing initial chroot bootstrap requirements")
            oldRoot = self.cfg.dbPath
            try:
                # Bootstrap troves are installed outside the system DB,
                # although it doesn't matter as much in trove builds as it does
                # in image builds.
                self.cfg.dbPath += '.bootstrap'
                _install(self.bootstrapJobList)
            finally:
                self.cfg.dbPath = oldRoot

        if self.jobList:
            self.logger.info("Installing chroot requirements")
            _install(self.jobList)

        if self.crossJobList:
            self.logger.info("Installing chroot cross-compile requirements")
            oldRoot = self.cfg.root
            try:
                self.cfg.root += self.sysroot
                _install(self.crossJobList)
            finally:
                self.cfg.root = oldRoot

        util.settempdir(self.cfg.tmpDir)
        self._uninstallRPM()

        # directories must be traversable and files readable (RMK-1006)
        for root, dirs, files in os.walk(self.cfg.root, topdown=True):
            for directory in dirs:
                _addModeBits(os.sep.join((root, directory)), 05)
            for filename in files:
                _addModeBits(os.sep.join((root, filename)), 04)

        if manifest:
            manifest.write(self.cfg.root)
        if self.chrootFingerprint:
            strFingerprint = sha1helper.sha1ToString(self.chrootFingerprint)
            self.logger.info('caching chroot with fingerprint %s',
                             strFingerprint)
            self.chrootCache.store(self.chrootFingerprint, self.cfg.root)
            self.logger.info('caching chroot %s done', strFingerprint)
            self._breakLinks()
예제 #53
0
파일: display.py 프로젝트: pombr/conary
    def formatFile(self, pathId, path, fileId, version, fileObj=None,
                   prefix='', indent=0):
        taglist = ''
        sha1 = ''
        id = ''
        flavor = ''

        dcfg = self.dcfg
        verbose = dcfg.isVerbose()

        if verbose and isinstance(fileObj, files.SymbolicLink):
            name = "%s -> %s" % (path, fileObj.target())
        else:
            name = path
        if dcfg.fileFlavors:
            if not fileObj.flavor().isEmpty():
                flavor = '[%s]' % fileObj.flavor()

        if dcfg.tags:
            tags = []
            if fileObj.tags:
                tags.extend(fileObj.tags)
            if fileObj.flags.isInitialContents():
                tags.append('initialContents')
            if fileObj.flags.isAutoSource():
                tags.append('autosource')
            if fileObj.flags.isConfig():
                tags.append('config')
            if fileObj.flags.isTransient():
                tags.append('transient')
            if tags:
                taglist = ' {' + ' '.join(tags) + '}'
        if dcfg.sha1s:
            if hasattr(fileObj, 'contents') and fileObj.contents:
                sha1 = sha1ToString(fileObj.contents.sha1()) + ' '
            else:
                sha1 = ' '*41

        if dcfg.ids and pathId:
            id = md5ToString(pathId) + ' ' + sha1ToString(fileId) + ', '
        if dcfg.fileVersions:
            if dcfg.useFullVersions():
                verStr = '    %s' % version
            elif dcfg.showLabels:
                verStr = '    %s/%s' % (version.branch().label(), version.trailingRevision())
            else:
                verStr = '    %s' % version.trailingRevision()
        else:
            verStr = ''

        spacer = '  ' * indent

        if fileObj:
            owner = fileObj.inode.owner()
            if owner[0] == '+':
                owner = owner[1:]
            group = fileObj.inode.group()
            if group[0] == '+':
                group = group[1:]

        if verbose:
            ln = "%s%s%s%s%s    1 %-8s %-8s %s %s %s%s%s%s" % \
              (spacer,
               prefix, id, sha1, fileObj.modeString(), owner,
               group, fileObj.sizeString(),
               fileObj.timeString(), name, flavor, taglist, verStr)
        else:
            ln = "%s%s%s%s%s%s%s" % (spacer, id, sha1, path, flavor,
                                     taglist, verStr)

        yield ln

        if dcfg.fileDeps:
            for ln in self.formatDeps(fileObj.provides(), fileObj.requires(),
                                      indent + 1, showEmpty = False):
                yield ln
예제 #54
0
파일: users.py 프로젝트: pombredanne/mint
def confirmString():
    """
    Generate a confirmation string
    """
    hash = sha1helper.sha1String(str(random.random()) + str(time.time()))
    return sha1helper.sha1ToString(hash)
예제 #55
0
 def _hasFileContents(self, sha1):
     return self.contentsStore.hasFile(sha1helper.sha1ToString(sha1))
예제 #56
0
 def path(self):
     return self.store.hashToPath(sha1helper.sha1ToString(self.sha1))
예제 #57
0
파일: repquery.py 프로젝트: pombreda/crest
def getTrove(cu, roleIds, name, version, flavor, mkUrl = None,
             thisHost = None, displayFlavor = None, excludeCapsules = False):

    def buildTupleList(tuples, name, mkUrl = mkUrl):
        l = getattr(datamodel.SingleTrove, name)()
        for troveInfo in sorted(tuples.iter()):
            l.append(name = troveInfo.name(), version = troveInfo.version(),
                     flavor = troveInfo.flavor(), mkUrl = mkUrl)

        return l

    def fileQuery(gfcu, filesInstanceId, dirName = None):
        # XXX restricing by dirName seems and obvious thing to do here,
        # but it actually slows things down??
        #
        # the distinct here is unfortunate, but conary repositories had
        # a bug for about a year which caused it to store duplicate paths
        # if a path was committed for the first time duplicate times in
        # a single commit job
        gfcu.execute("""
            SELECT DISTINCT dirName, basename, version, pathId, fileId
                FROM TroveFiles
                JOIN Versions USING (versionId)
                JOIN FileStreams ON (TroveFiles.streamId = FileStreams.streamId)
                JOIN FilePaths ON (TroveFiles.filePathId = FilePaths.filePathId)
                JOIN DirNames ON
                    FilePaths.dirNameId = DirNames.dirNameId
                JOIN Basenames ON (FilePaths.baseNameId = Basenames.baseNameId)
                WHERE TroveFiles.instanceId = ? ORDER BY dirName, basename
        """, filesInstanceId)

    cu.execute("""
        SELECT Instances.instanceId, Nodes.timeStamps FROM Instances
            JOIN Nodes USING (itemId, versionId)
            JOIN Items USING (itemId)
            JOIN Versions ON (Instances.versionId = Versions.versionId)
            JOIN Flavors ON (Instances.flavorId = Flavors.flavorId)
            JOIN UserGroupInstancesCache AS ugi
                ON (instances.instanceId = ugi.instanceId AND
                    ugi.userGroupId in (%s))
        WHERE
            item = ? AND version = ? AND flavor = ?
    """ % ",".join( str(x) for x in roleIds), name, version,
        deps.parseFlavor(flavor).freeze())

    l = [ (x[0], x[1]) for x in cu ]
    if not l:
        return None

    instanceId, timeStamps = l[0]
    frzVer = versions.strToFrozen(version, timeStamps.split(":"))
    verobj = versions.ThawVersion(frzVer)

    tupleLists = [ ( trove._TROVEINFO_TAG_BUILDDEPS, 'builddeps' ),
                   ( trove._TROVEINFO_TAG_POLICY_PROV, 'policyprovider' ),
                   ( trove._TROVEINFO_TAG_LOADEDTROVES, 'loadedtroves' ),
                   ( trove._TROVEINFO_TAG_COPIED_FROM, 'copiedfrom' ),
                   ( trove._TROVEINFO_TAG_DERIVEDFROM, 'derivedfrom' ) ]

    cu.execute("""
    SELECT infoType, data FROM TroveInfo WHERE instanceId = ? AND
        infoType IN (%s)
                """ % ",".join(str(x) for x in
                        [ trove._TROVEINFO_TAG_SOURCENAME,
                          trove._TROVEINFO_TAG_CLONEDFROM,
                          trove._TROVEINFO_TAG_CLONEDFROMLIST,
                          trove._TROVEINFO_TAG_BUILDTIME,
                          trove._TROVEINFO_TAG_SIZE,
                          trove._TROVEINFO_TAG_METADATA,
                          trove._TROVEINFO_TAG_CAPSULE,
                        ] + [ x[0] for x in tupleLists ]
                ), instanceId)

    troveInfo = {}
    for infoType, data in cu:
        data = cu.frombinary(data)
        infoClass = trove.TroveInfo.streamDict[infoType][1]
        troveInfo[infoType] = infoClass(data)

    kwargs = { 'name' : name,
               'version' : verobj,
               'flavor' : flavor }

    if displayFlavor is not None:
        kwargs['displayflavor'] = displayFlavor

    if trove._TROVEINFO_TAG_BUILDTIME in troveInfo:
        kwargs['buildtime'] = int(troveInfo[trove._TROVEINFO_TAG_BUILDTIME]())

    if trove._TROVEINFO_TAG_SOURCENAME in troveInfo:
        kwargs['source'] = (troveInfo[trove._TROVEINFO_TAG_SOURCENAME](),
            verobj.getSourceVersion(), '')

    if trove._TROVEINFO_TAG_SIZE in troveInfo:
        kwargs['size'] = troveInfo[trove._TROVEINFO_TAG_SIZE]()

    if trove._TROVEINFO_TAG_METADATA in troveInfo:
        md = troveInfo[trove._TROVEINFO_TAG_METADATA].get()
        kwargs['shortdesc'] = md['shortDesc']
        kwargs['longdesc'] = md['longDesc']

        if md['licenses']:
            kwargs['license'] = [ x for x in md['licenses' ]]
        if md['crypto']:
            kwargs['crypto'] = [ x for x in md['crypto'] ]

    for (tag, tagName) in tupleLists:
        if tag in troveInfo:
            kwargs[tagName] = buildTupleList(troveInfo[tag], tagName,
                                             mkUrl = mkUrl)

    t = datamodel.SingleTrove(mkUrl = mkUrl, thisHost = thisHost, **kwargs)

    if trove._TROVEINFO_TAG_CLONEDFROMLIST in troveInfo:
        clonedFromList = troveInfo[trove._TROVEINFO_TAG_CLONEDFROMLIST]
    elif (trove._TROVEINFO_TAG_CLONEDFROM in troveInfo):
        clonedFromList = [ troveInfo[trove._TROVEINFO_TAG_CLONEDFROM]() ]
    else:
        clonedFromList = []

    for ver in clonedFromList:
        t.addClonedFrom(name, ver, flavor, mkUrl = mkUrl)

    hasCapsule = False
    if trove._TROVEINFO_TAG_CAPSULE in troveInfo:
        if troveInfo[trove._TROVEINFO_TAG_CAPSULE].type():
            hasCapsule = True

    fileQuery(cu, instanceId)

    for (dirName, baseName, fileVersion, pathId, fileId) in cu:
        dirName = cu.frombinary(dirName)
        baseName = cu.frombinary(baseName)
        if pathId == trove.CAPSULE_PATHID:
            isCapsule = 1
            contentAvailable = not excludeCapsules
        else:
            isCapsule = None
            contentAvailable = not hasCapsule

        fileObj = datamodel.FileReference(
                        path = os.path.join(dirName, baseName),
                        version = fileVersion,
                        pathId = md5ToString(cu.frombinary(pathId)),
                        fileId = sha1ToString(cu.frombinary(fileId)),
                        isCapsule = isCapsule,
                        contentAvailable = contentAvailable,
                        mkUrl = mkUrl, thisHost = thisHost)
        t.addFile(fileObj)

    cu.execute("""
        SELECT item, version, flavor, TroveTroves.includedId, Nodes.timeStamps
          FROM TroveTroves
            JOIN Instances ON (Instances.instanceId = TroveTroves.includedId)
            JOIN Nodes USING (itemId, versionId)
            JOIN Items USING (itemId)
            JOIN Versions ON (Versions.versionId = Instances.versionId)
            JOIN Flavors ON (Flavors.flavorId = Instances.flavorId)
            WHERE
                TroveTroves.instanceId = ? AND
                (TroveTroves.flags & %d) = 0
            ORDER BY item, version, flavor
    """ % schema.TROVE_TROVES_WEAKREF, instanceId)

    for (subName, subVersion, subFlavor, refInstanceId, subTS) in cu:
        subFlavor = str(deps.ThawFlavor(subFlavor))
        frzVer = versions.strToFrozen(subVersion,
                [ x for x in subTS.split(":") ])
        subV = versions.ThawVersion(frzVer)
        t.addReferencedTrove(subName, subV, subFlavor, mkUrl = mkUrl)

        # It would be far better to use file tags to identify these build
        # logs, but it's significantly slower as well because they're in
        # the file objects rather than the trove (and those file objects
        # could be stored on a different repository)
        if not subName.endswith(':debuginfo'):
            continue

        fileQuery(cu, refInstanceId, dirName = '/usr/src/debug/buildlogs')
        logHost = subV.getHost()
        for (dirName, baseName, fileVersion, pathId, fileId) in cu:
            if (dirName) != '/usr/src/debug/buildlogs':
                continue

            if baseName.endswith('-log.bz2'):
                t.setBuildLog(logHost, sha1ToString(fileId))
            elif baseName.endswith('-xml.bz2'):
                t.setXMLBuildLog(logHost, sha1ToString(fileId))

    return t