Beispiel #1
0
        def _testCs(repos, troves, idxLength, fileCount):
            job = [(x.getName(), (None, None), (x.getVersion(), x.getFlavor()),
                    True) for x in troves]
            repos.createChangeSetFile(job, self.workDir + '/foo.ccs')
            fc = filecontainer.FileContainer(
                util.ExtendedFile(self.workDir + '/foo.ccs',
                                  "r",
                                  buffering=False))

            info = fc.getNextFile()
            assert (info[0] == 'CONARYCHANGESET')

            info = fc.getNextFile()
            while info is not None:
                assert (len(info[0]) == idxLength)
                fileCount -= 1

                if 'ptr' in info[1]:
                    s = info[2].read()
                    s = gzip.GzipFile(None, "r", fileobj=StringIO(s)).read()
                    assert (len(s) == idxLength)

                info = fc.getNextFile()

            assert (fileCount == 0)
Beispiel #2
0
    def testSeekableNestedFile(self):
        (fd, name) = tempfile.mkstemp()
        f = util.ExtendedFile(name, "w++", buffering = False)
        os.close(fd)
        os.unlink(name)

        s = [ "hello world", "foo bar bang" ]
        fs = []

        f.write(s[0])
        fs.append(util.SeekableNestedFile(f, len(s[0]), 0))
        fs.append(util.SeekableNestedFile(f, len(s[1])))
        f.write(s[1])

        assert(fs[0].read() == s[0])
        assert(fs[1].read() == s[1])
        assert(fs[0].read() == "")
        assert(fs[1].read() == "")

        assert(fs[0].pread(offset = 0) == s[0])
        assert(fs[0].read() == "")

        fs[0].seek(0)
        assert(fs[1].read() == "")
        assert(fs[0].read() == s[0])
        assert(fs[0].read() == "")

        fs[0].seek(5)
        assert(fs[0].read() == s[0][5:])
        fs[0].seek(5 - len(s[0]), 2)
        assert(fs[0].read() == s[0][5:])
        fs[0].seek(5)
        fs[0].seek(2, 1)
        assert(fs[0].read() == s[0][7:])
Beispiel #3
0
    def testSeekableNestedFileNested(self):
        # Nested nested files
        s = "0123456789"

        (fd, name) = tempfile.mkstemp()
        f = util.ExtendedFile(name, "w+", buffering = False)
        os.close(fd)
        os.unlink(name)

        f.write(s)
        # Start from the second byte, make sure pread works
        f1 = util.SeekableNestedFile(f, 9, 1)
        first = f1.pread(1, 0)
        self.assertEqual(first, '1')

        # Create nested files within the first nested file
        f21 = util.SeekableNestedFile(f1, 5, 1)

        # Make sure pread, read, tell all work as expected
        first = f21.pread(1, 0)
        self.assertEqual(first, '2')
        self.assertEqual(f21.read(), '23456')
        self.assertEqual(f21.tell(), 5)

        f31 = util.SeekableNestedFile(f21, 3, 4)
        self.assertEqual(f31.read(), '6')
Beispiel #4
0
    def testVerifySig(self):
        rpmName = 'tmpwatch-2.9.7-1.1.el5.2.x86_64.rpm'
        rpmPath = os.path.join(self.archiveDir, rpmName)
        fileObj = file(rpmPath)
        header = rpmhelper.readHeader(fileObj)
        from conary.lib import openpgpfile
        sig = openpgpfile.readSignature(header[rpmhelper.SIG_GPG])

        fileObj.seek(0)
        rpmhelper.readSignatureHeader(fileObj)
        k = openpgpfile.getKeyFromString(
            'E8562897', openpgpfile.parseAsciiArmorKey(pgpKeyCentos))

        rpmhelper.verifySignatures(fileObj, [k])

        # Similar deal, fileObj is an ExtendedFile
        fileObj = util.ExtendedFile(rpmPath, buffering=False)
        rpmhelper.verifySignatures(fileObj, [k])

        # Finally, StringIO
        fileObj.seek(0)
        fileObj = StringIO.StringIO(fileObj.read())
        rpmhelper.verifySignatures(fileObj, [k])

        # Replace last byte
        fileObj = StringIO.StringIO(fileObj.getvalue()[:-1])
        fileObj.seek(0, 2)
        fileObj.write("\xff")
        fileObj.seek(0)
        e = self.assertRaises(rpmhelper.MD5SignatureError,
                              rpmhelper.verifySignatures, fileObj, [k])
        self.assertEqual(
            str(e), 'The MD5 digest fails to verify: '
            'expected 6cc7c546c3a5de90bb272b11be2f3d67, got 744d88f4164ec2974b49839a69ea589d'
        )
Beispiel #5
0
        def csContents(repos, job):
            p = os.path.join(self.workDir, 'foo.ccs')
            repos.createChangeSetFile(job, p)
            fc = filecontainer.FileContainer(
                                    util.ExtendedFile(p, buffering = False))
            l = []
            info = fc.getNextFile()
            while info:
                l.append(info)
                info = fc.getNextFile()

            return l
Beispiel #6
0
    def testExtendedFile(self):
        fd, fn = tempfile.mkstemp()
        try:
            os.write(fd, "hello world")
            os.close(fd)
            f = util.ExtendedFile(fn, buffering=False)

            assert(f.read(5) == 'hello')
            assert(f.pread(5, 6) == 'world')
            assert(f.tell() == 5)
        finally:
            os.unlink(fn)
Beispiel #7
0
 def test07_BadRpmCapsule(self):
     cmp = self.addRPMComponent("ghost:rpm=1.0", 'epoch-1.0-1.i386.rpm')
     repos = self.openRepository()
     orig = self.workDir + '/ghost.ccs'
     modified = self.workDir + '/ghost-new.ccs'
     repos.createChangeSetFile([ (cmp.getName(), (None, None),
                                  cmp.getNameVersionFlavor()[1:],
                                  True) ], orig)
     fc = filecontainer.FileContainer(
                             util.ExtendedFile(orig, buffering = False))
     newFc = filecontainer.FileContainer(
                     util.ExtendedFile(modified, "w", buffering = False))
     # CONARYCHANGESET
     (name, tag, contents) = fc.getNextFile()
     newFc.addFile(name, filecontents.FromFile(contents), tag,
                   precompressed = True)
     # the RPM
     (name, tag, contents) = fc.getNextFile()
     contents = filecontents.FromString("busted!")
     newFc.addFile(name, contents, tag)
     cs = changeset.ChangeSetFromFile(modified)
     # this throws away the output about the install failing
     self.assertRaises(files.Sha1Exception, self.captureOutput,
                       self.updatePkg, self.rootDir, cs)
Beispiel #8
0
    def _produceChangeset(self, items):
        readNestedFile = proxy.ChangesetFileReader.readNestedFile
        for path, isChangeset, preserveFile in items:
            if isChangeset:
                csFile = util.ExtendedFile(path, 'rb', buffering=False)
                changeSet = filecontainer.FileContainer(csFile)
                for data in changeSet.dumpIter(readNestedFile,
                                               args=(self.contentsStore, )):
                    yield data
                del changeSet
            else:
                fobj = open(path, 'rb')
                for data in util.iterFileChunks(fobj):
                    yield data
                fobj.close()

            if not preserveFile:
                os.unlink(path)
Beispiel #9
0
    def testMergedConfigOrder(self):
        # Make sure that config files from absolute change sets are merged
        # correctly relative to config files from relative ones.
        t1 = self.addComponent('test:runtime', '1.0-1-1',
                               fileContents = [ ("/etc/cfg", "contents1\n") ] )
        # This filename is magically designed to have it's pathId before
        # the pathId for /etc/cfg (the pathId in addComponent is a
        # simple md5 of the path)
        o  = self.addComponent('other:runtime', '1.0-1-1',
                               fileContents = [ ("/etc/one", "something") ] )

        repos = self.openRepository()
        (fd, path) = tempfile.mkstemp()
        os.close(fd)
        repos.createChangeSetFile(
                    [ (o.getName(),  (None, None),
                                     (o.getVersion(), o.getFlavor()),
                       False),
                      (t1.getName(), (None, None),
                                     (t1.getVersion(), t1.getFlavor()),
                       True) ], path)

        f = util.ExtendedFile(path, "r", buffering = False)
        os.unlink(path)
        fc = filecontainer.FileContainer(f)

        # first comes the set of troveCs objects
        (name, tag, size) = fc.getNextFile()
        assert(name == 'CONARYCHANGESET')

        # next is the diff
        (name, tag, size) = fc.getNextFile()
        assert(name[0:16] == sha1helper.md5String("/etc/one"))

        # and then the config file
        (name, tag, size) = fc.getNextFile()
        assert(name[0:16] == sha1helper.md5String("/etc/cfg"))

        # and that's it
        rc = fc.getNextFile()
        assert(rc is None)
Beispiel #10
0
    def testChangeSetDumpOffset(self):
        """Stress test offset arg to dumpIter"""
        # Make a changeset with one regular file
        cs = changeset.ChangeSet()
        pathId = '0' * 16
        fileId = '0' * 20
        contents = 'contents'
        store = datastore.FlatDataStore(self.workDir)
        sha1 = sha1helper.sha1String(contents)
        store.addFile(StringIO(contents), sha1)
        rawFile = store.openRawFile(sha1)
        rawSize = os.fstat(rawFile.fileno()).st_size
        contObj = filecontents.CompressedFromDataStore(store, sha1)
        cs.addFileContents(pathId,
                           fileId,
                           changeset.ChangedFileTypes.file,
                           contObj,
                           cfgFile=False,
                           compressed=True)

        # Test dumping a fully populated changeset with every possible resume
        # point
        path = os.path.join(self.workDir, 'full.ccs')
        size = cs.writeToFile(path)
        expected = open(path).read()
        self.assertEqual(len(expected), size)
        fc = filecontainer.FileContainer(
            util.ExtendedFile(path, 'r', buffering=False))

        def noop(name, tag, size, subfile):
            assert tag[2:] != changeset.ChangedFileTypes.refr[4:]
            return tag, size, subfile

        for offset in range(size + 1):
            fc.reset()
            actual = ''.join(fc.dumpIter(noop, (), offset))
            self.assertEqual(actual, expected[offset:])

        # Test dumping a changeset with contents stripped out
        path = os.path.join(self.workDir, 'stubby.ccs')
        size2 = cs.writeToFile(path, withReferences=True)
        self.assertEqual(size2, size)
        fc = filecontainer.FileContainer(
            util.ExtendedFile(path, 'r', buffering=False))
        expect_reference = '%s %d' % (sha1.encode('hex'), rawSize)

        def addfile(name, tag, size, subfile, dummy):
            self.assertEqual(dummy, 'dummy')
            if name == 'CONARYCHANGESET':
                return tag, size, subfile
            elif name == pathId + fileId:
                self.assertEqual(tag[2:], changeset.ChangedFileTypes.refr[4:])
                self.assertEqual(subfile.read(), expect_reference)
                tag = tag[0:2] + changeset.ChangedFileTypes.file[4:]
                rawFile.seek(0)
                return tag, rawSize, rawFile
            else:
                assert False

        for offset in range(size + 1):
            fc.reset()
            actual = ''.join(fc.dumpIter(addfile, ('dummy', ), offset))
            self.assertEqual(actual, expected[offset:])
Beispiel #11
0
    def test(self):
        count = fileCount()

        # let's make sure we can't open an arbitrary file as a container
        f = util.ExtendedFile("/bin/ls", "r", buffering=False)

        self.assertRaises(filecontainer.BadContainer, FileContainer, f)

        f.close()
        if (count != fileCount()):
            raise AssertionError, "too many files are open %s" % count

        # create a new container
        f = util.ExtendedFile(self.fn, "w+", buffering=False)
        c = FileContainer(f)
        c.close()

        data = []
        tags = []
        names = []
        c = FileContainer(f)

        self.assertRaises(AssertionError, c.addFile, "name",
                          FromString("data"), "tag")

        c.close()
        os.unlink(self.fn)
        f = util.ExtendedFile(self.fn, "w+", buffering=False)
        c = FileContainer(f)

        data.append("contents of file1")
        tags.append("extra data")
        names.append("file1")
        c.addFile(names[0], FromString(data[0]), tags[0])

        data.append("file2 gets some contents")
        tags.append("tag")
        names.append("file2")
        c.addFile(names[1], FromString(data[1]), tags[1])

        data.append("")
        tags.append("empty")
        names.append("")
        c.addFile(names[2], FromString(data[2]), tags[2])

        c.close()

        c = FileContainer(f)
        checkFiles(c, names, data, tags)

        f = util.ExtendedFile(self.fn, "r+", buffering=False)
        c = FileContainer(f)
        checkFiles(c, names, data, tags)
        c.reset()
        checkFiles(c, names, data, tags)
        c.close()

        f = util.ExtendedFile(self.fn, "r+", buffering=False)
        c = FileContainer(f)
        name, tag, f = c.getNextFile()
        assert (name == names[0])
Beispiel #12
0
    def testLargeFiles(self):
        # test adding files > 4gig to a filecontainer. we replace the write
        # call with one which handles sparsity
        class SparseFile(util.ExtendedFile):
            def __init__(self, *args, **kwargs):
                self.needsWrite = False
                util.ExtendedFile.__init__(self, *args, **kwargs)

            def write(self, s):
                if len(s) > 100 and s[0] == '\0' and s[-1] == '\0':
                    self.seek(len(s) - 1, 2)
                    self.needsWrite = True
                    return len(s)

                return util.ExtendedFile.write(self, s)

            def close(self):
                if self.needsWrite:
                    self.write('\0')
                    self.needsWrite = False

            def seek(self, *args):
                if self.needsWrite:
                    self.write('\0')
                    self.needsWrite = False

                return util.ExtendedFile.seek(self, *args)

        class FalseFile:
            def __init__(self, size):
                self.size = size
                self.offset = 0

            def seek(self, offset, whence=0):
                assert (whence == 0)
                self.offset = offset

            def read(self, bytes):
                self.offset += bytes
                if self.offset > self.size:
                    self.offset -= bytes
                    bytes = self.size - self.offset
                    self.offset = self.size

                return "\0" * bytes

        f = SparseFile(self.fn, "w+", buffering=False)
        c = FileContainer(f)
        totalSize = 0x100001000
        c.addFile('test',
                  FromFile(FalseFile(totalSize)),
                  'testdata',
                  precompressed=True)
        c.addFile('end',
                  FromString('endcontents'),
                  'enddata',
                  precompressed=True)
        c.close()

        c = FileContainer(util.ExtendedFile(self.fn, 'r', buffering=False))
        name, tag, f = c.getNextFile()
        storedSize = f.seek(0, 2)
        assert (storedSize == totalSize)
        assert (tag == 'testdata')

        name, tag, f = c.getNextFile()
        assert (name == 'end')
        assert (tag == 'enddata')
        s = f.read()
        assert (s == 'endcontents')
Beispiel #13
0
def verifySignatures(f, validKeys=None):
    """
    Given an extended file, compute signatures
    """
    f.seek(0)
    h = readHeader(f)

    # Cheap test first: verify MD5 sig
    sigmd5 = h.get(SIG_MD5, None)
    if sigmd5 is not None:
        f.seek(0)
        readSignatureHeader(f)

        # verify md5 digest
        md5 = digestlib.md5()
        util.copyfileobj(f, NullWriter(), digest=md5)
        if md5.digest() != sigmd5:
            raise MD5SignatureError(
                "The MD5 digest fails to verify: expected %s, got %s" %
                (sha1helper.md5ToString(sigmd5), md5.hexdigest()))

    # Don't bother if no gpg signature was present, or no valid keys were
    # presented
    if validKeys is None:
        return
    sigString = h.get(SIG_GPG, None)
    if sigString is None:
        return
    # Skip to immutable header region
    f.seek(0)
    readSignatureHeader(f)
    sig = openpgpfile.readSignature(sigString)

    keyId = sig.getSignerKeyId()
    matchingKeys = [x for x in validKeys if x.hasKeyId(keyId)]
    if not matchingKeys:
        raise PGPSignatureError("Signature generated with key %s does "
                                "not match valid keys %s" %
                                (keyId, ', '.join(x.getKeyId()
                                                  for x in validKeys)))

    key = matchingKeys[0]

    # signature verification assumes a seekable stream and will seek to the
    # beginning; use a SeekableNestedFile
    size = h.getHeaderPlusPayloadSize()
    if size is None:
        pos = f.tell()
        f.seek(0, 2)
        size = f.tell()
        f.seek(pos, 0)
    snf = None
    if hasattr(f, 'pread'):
        extFile = f
    elif hasattr(f, 'name'):
        extFile = util.ExtendedFile(f.name, buffering=False)
    else:
        # worst case scenario, we slurp everything in memory
        extFile = util.ExtendedStringIO(f.read())
        snf = extFile
    if snf is None:
        snf = util.SeekableNestedFile(extFile, start=f.tell(), size=size)
    try:
        sig.verifyDocument(key.getCryptoKey(), snf)
    except openpgpfile.SignatureError:
        raise PGPSignatureError
Beispiel #14
0
def get(port,
        isSecure,
        repos,
        req,
        restHandler=None,
        authToken=None,
        repServer=None):
    uri = req.uri
    if uri.endswith('/'):
        uri = uri[:-1]
    cmd = os.path.basename(uri)

    if authToken is None:
        authToken = getAuth(req)

    if authToken is None:
        return apache.HTTP_BAD_REQUEST

    if authToken[0] != "anonymous" and not isSecure and repos.cfg.forceSSL:
        return apache.HTTP_FORBIDDEN

    if restHandler and uri.startswith(restHandler.prefix):
        return restHandler.handle(req, req.unparsed_uri)
    elif cmd == "changeset":
        if not req.args:
            # the client asked for a changeset, but there is no
            # ?tmpXXXXXX.cf after /conary/changeset (CNY-1142)
            return apache.HTTP_BAD_REQUEST
        if '/' in req.args:
            return apache.HTTP_FORBIDDEN

        localName = repos.tmpPath + "/" + req.args + "-out"

        if localName.endswith(".cf-out"):
            try:
                f = open(localName, "r")
            except IOError:
                return apache.HTTP_NOT_FOUND

            os.unlink(localName)

            items = []
            totalSize = 0
            for l in f.readlines():
                (path, size, isChangeset, preserveFile) = l.split()
                size = int(size)
                isChangeset = int(isChangeset)
                preserveFile = int(preserveFile)
                totalSize += size
                items.append((path, size, isChangeset, preserveFile))
            f.close()
            del f
        else:
            try:
                size = os.stat(localName).st_size
            except OSError:
                return apache.HTTP_NOT_FOUND
            items = [(localName, size, 0, 0)]
            totalSize = size

        # TODO: refactor to use proxy.ChangesetFileReader
        readNestedFile = proxy.ChangesetFileReader.readNestedFile
        req.content_type = "application/x-conary-change-set"
        req.set_content_length(totalSize)
        for (path, size, isChangeset, preserveFile) in items:
            if isChangeset:
                cs = FileContainer(util.ExtendedFile(path, buffering=False))
                try:
                    for data in cs.dumpIter(readNestedFile,
                                            args=(repos.getContentsStore(), )):
                        req.write(data)
                except IOError, err:
                    log.error("IOError dumping changeset: %s" % err)
                    return apache.HTTP_BAD_REQUEST
                del cs
            else:
                sendfile(req, size, path)

            if not preserveFile:
                os.unlink(path)

        return apache.OK
Beispiel #15
0
        csPath = self.workDir + '/test.ccs'
        #repos.createChangeSetFile( [
            #( 'test:runtime', ( old.getVersion(), old.getFlavor() ),
                              #( new.getVersion(), new.getFlavor() ), False ),
            #], csPath)
        #fc = filecontainer.FileContainer(
                                #util.ExtendedFile(csPath, buffering = False))
        #fc.getNextFile()
        #assert(not fc.getNextFile())

        repos.createChangeSetFile( [
            ( 'test:runtime', ( old.getVersion(), old.getFlavor() ),
                              ( new.getVersion(), new.getFlavor() ), False ),
            ], csPath, mirrorMode = True)
        fc = filecontainer.FileContainer(
                                util.ExtendedFile(csPath, buffering = False))
        fc.getNextFile()
        if not singleRepos:
            assert(fc.getNextFile())

        repos.createChangeSetFile( [
            ( 'test:runtime', ( orig.getVersion(), orig.getFlavor() ),
                              ( new.getVersion(),  new.getFlavor() ), False ),
            ], csPath, mirrorMode = True)
        cs = changeset.ChangeSetFromFile(csPath)
        assert( [ x[0] != '\x01' for x in cs.files.values() ] == 
                    [ True, True ] )

    def testMirrorModeChangesets1(self):
        self._testMirrorModeChangesets(singleRepos = True)
Beispiel #16
0
def open(fn, mode='r', buffering=False):
    return util.ExtendedFile(fn, mode, buffering)