예제 #1
0
    def addFile(self, fileName, contents, tableData, precompressed = False):
        assert(isinstance(contents, filecontents.FileContents))
        assert(self.mutable)

        fileObj = contents.get()
        headerOffset = self.file.tell()
        self.file.write(struct.pack("!HH", SUBFILE_MAGIC, len(fileName)))
        self.file.write(struct.pack("!IH", 0, len(tableData)))
        self.file.write(fileName)
        self.file.write(tableData)

        if precompressed:
            size = util.copyfileobj(fileObj, self.file)
        else:
            start = self.file.tell()
            gzFile = util.DeterministicGzipFile('', "wb", 6, self.file)
            util.copyfileobj(fileObj, gzFile)
            gzFile.close()
            size = self.file.tell() - start

        if size < 0x100000000:
            self.file.seek(headerOffset + 4, SEEK_SET)
            self.file.write(struct.pack("!I", size))
            self.file.seek(0, SEEK_END)
        else:
            self.file.seek(headerOffset, SEEK_SET)
            totalSize = size + len(fileName) + len(tableData)
            self.file.write(struct.pack("!HII", LARGE_SUBFILE_MAGIC,
                                        totalSize >> 32,
                                        totalSize & 0xFFFFFFFF))
            self.file.seek(0, SEEK_END)
            self.file.write(struct.pack("!HH", len(fileName), len(tableData)))
예제 #2
0
 def _readlineify(self):
     if hasattr(self.fp, 'readline'):
         return
     fp = util.BoundedStringIO()
     util.copyfileobj(self.fp, fp)
     fp.seek(0)
     self.fp = fp
예제 #3
0
 def migrateOneDatabase(self, database):
     sourceLog = self._openLog('dump')
     targetLog = self._openLog('restore')
     source = subprocess.Popen([
         self.cluster2.bin('pg_dump'),
         '--username=postgres',
         '--port=' + str(self.cluster1.port),
         '--format=custom',
         '--compress=0',
         '--verbose',
         database,
         ], shell=False, stdout=subprocess.PIPE, stderr=sourceLog)
     target = subprocess.Popen([
         self.cluster2.bin('pg_restore'),
         '--username=postgres',
         '--dbname=' + database,
         '--port=65001',
         '--single-transaction',
         '--verbose',
         ], shell=False, stdin=subprocess.PIPE,
         stdout=targetLog, stderr=targetLog)
     cny_util.copyfileobj(source.stdout, target.stdin)
     sourceLog.close()
     targetLog.close()
     if source.wait():
         raise RuntimeError("pg_dumpall failed with exit code %s" %
                 source.returncode)
     target.stdin.close()
     if target.wait():
         raise RuntimeError("psql failed with exit code %s" %
                 target.returncode)
예제 #4
0
def _getFile(outPath, inUrl, method='GET', session=None, callback=None):
    headers = {}
    if session:
        headers['Cookie'] =  'vmware_soap_session=%s; $Path=/' % session

    response = _makeConnection(inUrl, method, headers,
        callback=callback)

    if response and response.status not in (200, 201):
        raise RuntimeError('%s failed: %d - %s' % (
            method, response.status, response.reason))
    elif not response:
        raise RuntimeError('%s failed' % method)

    progress = lambda x, y: x
    if callback:
        # Default to the dummy progress callback
        progress = getattr(callback, 'progress', progress)

    fileObj = file(outPath, "w")
    contentLength = response.msg.get('Content-Length')
    if contentLength is not None:
        contentLength = int(contentLength)
    # Chunked transfers will not set Content-Length
    # we let copyfileobj read to the end
    util.copyfileobj(response, fileObj, bufSize=BUFSIZE,
        sizeLimit=contentLength, callback=progress)

    response.close()
예제 #5
0
    def _sendData(self, conn):
        if self.data is None:
            return
        if not hasattr(self.data, 'read'):
            conn.send(self.data)
            return

        if self.chunked:
            # Use chunked coding
            output = wrapper = ChunkedSender(conn)
        elif self.size is not None:
            # Use identity coding
            output = conn
            wrapper = None
        else:
            raise RuntimeError("Request must use chunked transfer coding "
                               "if size is not known.")
        util.copyfileobj(self.data,
                         output,
                         callback=self.callback,
                         rateLimit=self.rateLimit,
                         abortCheck=self.abortCheck,
                         sizeLimit=self.size)
        if wrapper:
            wrapper.close()
예제 #6
0
def _getFile(outPath, inUrl, method='GET', session=None, callback=None):
    headers = {}
    if session:
        headers['Cookie'] = 'vmware_soap_session=%s; $Path=/' % session

    response = _makeConnection(inUrl, method, headers, callback=callback)

    if response and response.status not in (200, 201):
        raise RuntimeError('%s failed: %d - %s' %
                           (method, response.status, response.reason))
    elif not response:
        raise RuntimeError('%s failed' % method)

    progress = lambda x, y: x
    if callback:
        # Default to the dummy progress callback
        progress = getattr(callback, 'progress', progress)

    fileObj = file(outPath, "w")
    contentLength = response.msg.get('Content-Length')
    if contentLength is not None:
        contentLength = int(contentLength)
    # Chunked transfers will not set Content-Length
    # we let copyfileobj read to the end
    util.copyfileobj(response,
                     fileObj,
                     bufSize=BUFSIZE,
                     sizeLimit=contentLength,
                     callback=progress)

    response.close()
예제 #7
0
 def migrateGlobals(self):
     log.info("Migrating global data")
     sourceLog = self._openLog('dump')
     targetLog = self._openLog('restore')
     source = subprocess.Popen([
         self.cluster2.bin('pg_dumpall'),
         '--username=postgres',
         '--port=' + str(self.cluster1.port),
         '--globals-only',
         '--no-tablespaces',
         ], shell=False, stdout=subprocess.PIPE, stderr=sourceLog)
     target = subprocess.Popen([
         self.cluster2.bin('psql'),
         '--username=postgres',
         '--dbname=postgres',
         '--port=65001',
         '--no-psqlrc',
         ], shell=False, stdin=subprocess.PIPE,
         stdout=targetLog, stderr=targetLog)
     cny_util.copyfileobj(source.stdout, target.stdin)
     sourceLog.close()
     targetLog.close()
     if source.wait():
         raise RuntimeError("pg_dumpall failed with exit code %s" %
                 source.returncode)
     target.stdin.close()
     if target.wait():
         raise RuntimeError("psql failed with exit code %s" %
                 target.returncode)
예제 #8
0
    def _writeFile(cls, fileObj, outFds, precompressed, computeSha1):
        if precompressed and hasattr(fileObj, '_fdInfo'):
            (fd, start, size) = fileObj._fdInfo()
            pid = os.getpid()
            realHash = digest_uncompress.sha1Copy((fd, start, size), outFds)
            for x in outFds:
                cls._fchmod(x)
                os.close(x)

            return realHash
        else:
            for fd in outFds:
                outFileObj = os.fdopen(fd, "w")
                contentSha1 = digestlib.sha1()
                if precompressed and computeSha1:
                    tee = Tee(fileObj, outFileObj)
                    uncompObj = gzip.GzipFile(mode = "r", fileobj = tee)
                    s = uncompObj.read(128 * 1024)
                    while s:
                        contentSha1.update(s)
                        s = uncompObj.read(128 * 1024)
                    uncompObj.close()
                elif precompressed:
                    util.copyfileobj(fileObj, outFileObj)
                else:
                    dest = gzip.GzipFile(mode = "w", fileobj = outFileObj)
                    util.copyfileobj(fileObj, dest, digest = contentSha1)
                    dest.close()

                # this closes tmpFd for us
                cls._fchmod(fd)
                outFileObj.close()
                fileObj.seek(0)

            return contentSha1.digest()
예제 #9
0
 def _readlineify(self):
     if hasattr(self.fp, 'readline'):
         return
     fp = util.BoundedStringIO()
     util.copyfileobj(self.fp, fp)
     fp.seek(0)
     self.fp = fp
예제 #10
0
파일: files.py 프로젝트: sweptr/conary
    def restore(self,
                fileContents,
                root,
                target,
                journal=None,
                sha1=None,
                nameLookup=True,
                **kwargs):

        keepTempfile = kwargs.get('keepTempfile', False)
        destTarget = target

        if fileContents is not None:
            # this is first to let us copy the contents of a file
            # onto itself; the unlink helps that to work
            src = fileContents.get()
            inFd = None

            if fileContents.isCompressed() and hasattr(src, '_fdInfo'):
                # inFd is None if we can't figure this information out
                # (for _LazyFile for instance)
                (inFd, inStart, inSize) = src._fdInfo()

            path, name = os.path.split(target)
            if not os.path.isdir(path):
                util.mkdirChain(path)

            # Uncompress to a temporary file, using the accelerated
            # implementation if possible.
            if inFd is not None and util.sha1Uncompress is not None:
                actualSha1, tmpname = util.sha1Uncompress(
                    inFd, inStart, inSize, path, name)
            else:
                if fileContents.isCompressed():
                    src = gzip.GzipFile(mode='r', fileobj=src)
                tmpfd, tmpname = tempfile.mkstemp(name, '.ct', path)
                try:
                    d = digestlib.sha1()
                    f = os.fdopen(tmpfd, 'w')
                    util.copyfileobj(src, f, digest=d)
                    f.close()
                    actualSha1 = d.digest()
                except:
                    os.unlink(tmpname)
                    raise

            if keepTempfile:
                # Make a hardlink "copy" for the caller to use
                destTarget = tmpname + '.ptr'
                os.link(tmpname, destTarget)
            try:
                os.rename(tmpname, target)
            except OSError, err:
                if err.args[0] != errno.EISDIR:
                    raise
                os.rmdir(target)
                os.rename(tmpname, target)

            if (sha1 is not None and sha1 != actualSha1):
                raise Sha1Exception(target)
예제 #11
0
    def _writeFile(cls, fileObj, outFds, precompressed, computeSha1):
        if precompressed and hasattr(fileObj, '_fdInfo'):
            (fd, start, size) = fileObj._fdInfo()
            pid = os.getpid()
            realHash = digest_uncompress.sha1Copy((fd, start, size), outFds)
            for x in outFds:
                cls._fchmod(x)
                os.close(x)

            return realHash
        else:
            for fd in outFds:
                outFileObj = os.fdopen(fd, "w")
                contentSha1 = digestlib.sha1()
                if precompressed and computeSha1:
                    tee = Tee(fileObj, outFileObj)
                    uncompObj = gzip.GzipFile(mode="r", fileobj=tee)
                    s = uncompObj.read(128 * 1024)
                    while s:
                        contentSha1.update(s)
                        s = uncompObj.read(128 * 1024)
                    uncompObj.close()
                elif precompressed:
                    util.copyfileobj(fileObj, outFileObj)
                else:
                    dest = gzip.GzipFile(mode="w", fileobj=outFileObj)
                    util.copyfileobj(fileObj, dest, digest=contentSha1)
                    dest.close()

                # this closes tmpFd for us
                cls._fchmod(fd)
                outFileObj.close()
                fileObj.seek(0)

            return contentSha1.digest()
예제 #12
0
파일: client.py 프로젝트: pombreda/restlib
 def request(self, method, body=None, headers=None, contentLength=None,
         callback=None):
     hdrs = self.headers.copy()
     hdrs.update(headers or {})
     if self.user is not None and self.passwd is not None:
         user_pass = base64.b64encode('%s:%s' % (self.user, self.passwd))
         hdrs['Authorization'] = 'Basic %s' % user_pass
     if hasattr(body, "read"):
         # We need to stream
         if contentLength is None:
             # Determine body size
             body.seek(0, 2)
             contentLength = body.tell()
             body.seek(0, 0)
         hdrs['Content-Length'] = str(contentLength)
         self._connection.putrequest(method, self.path)
         for hdr, value in hdrs.iteritems():
             self._connection.putheader(hdr, value)
         self._connection.endheaders()
         util.copyfileobj(body, self._connection, sizeLimit=contentLength,
             callback=callback)
     else:
         self._connection.request(method, self.path, body = body,
                                  headers = hdrs)
     resp = self._connection.getresponse()
     if resp.status != 200:
         raise ResponseError(resp.status, resp.reason, resp.msg, resp)
     return resp
예제 #13
0
    def apply(self,
              justDatabase=False,
              noScripts=False,
              capsuleChangeSet=None):
        if capsuleChangeSet:
            # Previous jobs will have moved the pointer in the auxilliary
            # changeset, so reset it at the start of each job.
            capsuleChangeSet.reset()
        tmpDir = os.path.join(self.root, 'var/tmp')
        if not os.path.isdir(tmpDir):
            # For empty roots or roots that are not systems (e.g. source
            # checkouts), just put capsules in the root directory.
            tmpDir = self.root
        fileDict = {}
        for kind, obj in sorted(self.capsuleClasses.items()):
            fileDict.update(
                dict(((x[0], x[2], x[3]), x[1]) for x in obj._filesNeeded()))

        try:
            for ((pathId, fileId, sha1), path) in sorted(fileDict.items()):
                tmpfd, tmpname = tempfile.mkstemp(dir=tmpDir,
                                                  prefix=path,
                                                  suffix='.conary')
                fType, fContents = self.changeSet.getFileContents(
                    pathId, fileId)
                if (fType == changeset.ChangedFileTypes.hldr):
                    if (capsuleChangeSet):
                        try:
                            result = capsuleChangeSet.getFileContents(
                                pathId, fileId)
                            fObj = result[1].get()
                        except KeyError:
                            raise errors.MissingRollbackCapsule(
                                'Cannot find '
                                'RPM %s to perform local rollback' % path)

                else:
                    fObj = fContents.get()

                d = digestlib.sha1()
                util.copyfileobj(fObj, os.fdopen(tmpfd, "w"), digest=d)
                actualSha1 = d.digest()
                if actualSha1 != sha1:
                    raise files.Sha1Exception(path)

                # tmpfd is closed when the file object created by os.fdopen
                # disappears
                fileDict[(pathId, fileId)] = tmpname

            for kind, obj in sorted(self.capsuleClasses.items()):
                obj.apply(fileDict,
                          justDatabase=justDatabase,
                          noScripts=noScripts)
        finally:
            for tmpPath in fileDict.values():
                try:
                    os.unlink(tmpPath)
                except:
                    pass
예제 #14
0
 def store(cls, root, archive):
     """
     Copy a manifest from inside a chroot to adjacent to a file archive
     """
     f_in = open(os.path.join(root, cls.FILENAME))
     with util.AtomicFile(archive + cls.AR_SUFFIX) as f_out:
         util.copyfileobj(f_in, f_out)
     f_in.close()
예제 #15
0
 def store(cls, root, archive):
     """
     Copy a manifest from inside a chroot to adjacent to a file archive
     """
     f_in = open(os.path.join(root, cls.FILENAME))
     with util.AtomicFile(archive + cls.AR_SUFFIX) as f_out:
         util.copyfileobj(f_in, f_out)
     f_in.close()
예제 #16
0
파일: wms.py 프로젝트: pombreda/bob
 def fetchArchive(self, conarySource, snapPath):
     if os.path.exists(snapPath):
         return
     archive = urllib.quote(os.path.basename(snapPath))
     log.info("Downloading %s", archive)
     f_in = self.wms.archive(self.path, self.revision, archive)
     with open(snapPath, 'w') as f_out:
         copyfileobj(f_in, f_out)
     f_in.close()
예제 #17
0
 def fetchArchive(self, conarySource, snapPath):
     if os.path.exists(snapPath):
         return
     archive = urllib.quote(os.path.basename(snapPath))
     log.info("Downloading %s", archive)
     f_in = self.wms.archive(self.path, self.revision, archive)
     with open(snapPath, 'w') as f_out:
         copyfileobj(f_in, f_out)
     f_in.close()
예제 #18
0
파일: wms.py 프로젝트: pombreda/spanner
 def fetchArchive(self, conarySource, snapPath):
     if os.path.exists(snapPath):
         return
     archive = urllib.quote(os.path.basename(snapPath))
     url = (self.repos + '/archive/'
             + urllib.quote(self.revision) + '/' + archive)
     f_in = self.opener.open(url)
     with open(snapPath, 'w') as f_out:
         copyfileobj(f_in, f_out)
     f_in.close()
예제 #19
0
파일: files.py 프로젝트: pombr/conary
    def restore(self, fileContents, root, target, journal=None, sha1 = None,
                nameLookup=True, **kwargs):

        keepTempfile = kwargs.get('keepTempfile', False)
        destTarget = target

        if fileContents is not None:
            # this is first to let us copy the contents of a file
            # onto itself; the unlink helps that to work
            src = fileContents.get()
            inFd = None

            if fileContents.isCompressed() and hasattr(src, '_fdInfo'):
                # inFd is None if we can't figure this information out
                # (for _LazyFile for instance)
                (inFd, inStart, inSize) = src._fdInfo()

            path, name = os.path.split(target)
            if not os.path.isdir(path):
                util.mkdirChain(path)

            # Uncompress to a temporary file, using the accelerated
            # implementation if possible.
            if inFd is not None and util.sha1Uncompress is not None:
                actualSha1, tmpname = util.sha1Uncompress(
                        inFd, inStart, inSize, path, name)
            else:
                if fileContents.isCompressed():
                    src = gzip.GzipFile(mode='r', fileobj=src)
                tmpfd, tmpname = tempfile.mkstemp(name, '.ct', path)
                try:
                    d = digestlib.sha1()
                    f = os.fdopen(tmpfd, 'w')
                    util.copyfileobj(src, f, digest = d)
                    f.close()
                    actualSha1 = d.digest()
                except:
                    os.unlink(tmpname)
                    raise

            if keepTempfile:
                # Make a hardlink "copy" for the caller to use
                destTarget = tmpname + '.ptr'
                os.link(tmpname, destTarget)
            try:
                os.rename(tmpname, target)
            except OSError, err:
                if err.args[0] != errno.EISDIR:
                    raise
                os.rmdir(target)
                os.rename(tmpname, target)

            if (sha1 is not None and sha1 != actualSha1):
                raise Sha1Exception(target)
예제 #20
0
파일: logstore.py 프로젝트: pombreda/rmake
 def __init__(self, path):
     self.path = path
     self.f_plain = open(path, 'a', buffering=0)
     if os.path.exists(path + '.gz') and not os.fstat(
             self.f_plain.fileno()).st_size:
         # The compressed log exists already, but the plain log was removed.
         # Need to copy the compressed contents to the plain log.
         f_gz = gzip.GzipFile(path + '.gz', 'r')
         util.copyfileobj(f_gz, self.f_plain)
         f_gz.close()
     self.f_gz = gzip.GzipFile(path + '.gz', 'a')
예제 #21
0
def _makeConnection(url,
                    method,
                    headers=None,
                    bodyStream=None,
                    bodyLength=None,
                    callback=None):
    protocol, uri = urllib.splittype(url)
    assert (protocol in ('http', 'https'))
    host, selector = urllib.splithost(uri)
    host, port = urllib.splitport(host)

    if protocol == 'http':
        r = HTTPConnection(host, port)
    else:
        r = HTTPSConnection(host, port)

    progress = None
    if callback:
        progress = getattr(callback, 'progress', None)
    if progress is None:
        progress = lambda x, y: x

    hdrs = {'Content-Type': 'application/octet-stream'}
    hdrs.update(headers or {})
    if bodyStream and bodyLength is None:
        bodyStream.seek(0, 2)
        bodyLength = bodyStream.tell()
        bodyStream.seek(0)
    if bodyLength:
        hdrs['Content-Length'] = bodyLength

    #r.set_debuglevel(1)
    r.connect()
    r.putrequest(method, selector)
    for k, v in hdrs.items():
        r.putheader(k, str(v))
    try:
        r.endheaders()
        if bodyStream:
            # This could fail, if the device backed by this file is connected
            util.copyfileobj(bodyStream,
                             r,
                             bufSize=BUFSIZE,
                             sizeLimit=hdrs['Content-Length'],
                             callback=progress)
        return r.getresponse()
    except socket.error, e:
        raise
        response = None
        try:
            response = r.getresponse()
            r.close()
        except Exception, e:
            pass
예제 #22
0
파일: logstore.py 프로젝트: pombreda/rmake
 def __init__(self, path):
     self.path = path
     self.f_plain = open(path, 'a', buffering=0)
     if os.path.exists(path + '.gz'
             ) and not os.fstat(self.f_plain.fileno()).st_size:
         # The compressed log exists already, but the plain log was removed.
         # Need to copy the compressed contents to the plain log.
         f_gz = gzip.GzipFile(path + '.gz', 'r')
         util.copyfileobj(f_gz, self.f_plain)
         f_gz.close()
     self.f_gz = gzip.GzipFile(path + '.gz', 'a')
예제 #23
0
 def compressDiskImage(self, vmdkPath):
     if not self.WithCompressedDisks:
         # Need to add the file to the final directory
         destf = util.AtomicFile(os.path.join(self.outputDir, self.basefilename + '.vmdk'))
         util.copyfileobj(file(vmdkPath), destf)
         destf.commit()
         return destf.finalPath
     vmdkGzOutputFile = os.path.join(self.outputDir, self.basefilename +
             '.vmdk.gz')
     self.gzip(vmdkPath, vmdkGzOutputFile)
     util.remove(vmdkPath)
     return vmdkGzOutputFile
예제 #24
0
    def apply(self, justDatabase = False, noScripts = False,
              capsuleChangeSet = None):
        if capsuleChangeSet:
            # Previous jobs will have moved the pointer in the auxilliary
            # changeset, so reset it at the start of each job.
            capsuleChangeSet.reset()
        tmpDir = os.path.join(self.root, 'var/tmp')
        if not os.path.isdir(tmpDir):
            # For empty roots or roots that are not systems (e.g. source
            # checkouts), just put capsules in the root directory.
            tmpDir = self.root
        fileDict = {}
        for kind, obj in sorted(self.capsuleClasses.items()):
            fileDict.update(
                dict(((x[0], x[2], x[3]), x[1]) for x in obj._filesNeeded()))

        try:
            for ((pathId, fileId, sha1), path) in sorted(fileDict.items()):
                tmpfd, tmpname = tempfile.mkstemp(dir=tmpDir, prefix=path,
                        suffix='.conary')
                fType, fContents = self.changeSet.getFileContents(pathId,
                                                                  fileId)
                if (fType == changeset.ChangedFileTypes.hldr):
                    if (capsuleChangeSet):
                        try:
                            result = capsuleChangeSet.getFileContents(pathId,
                                                                      fileId)
                            fObj = result[1].get()
                        except KeyError:
                            raise errors.MissingRollbackCapsule('Cannot find '
                                'RPM %s to perform local rollback' % path)

                else:
                    fObj = fContents.get()

                d = digestlib.sha1()
                util.copyfileobj(fObj, os.fdopen(tmpfd, "w"), digest = d)
                actualSha1 = d.digest()
                if actualSha1 != sha1:
                    raise files.Sha1Exception(path)

                # tmpfd is closed when the file object created by os.fdopen
                # disappears
                fileDict[(pathId, fileId)] = tmpname

            for kind, obj in sorted(self.capsuleClasses.items()):
                obj.apply(fileDict, justDatabase = justDatabase, noScripts = noScripts)
        finally:
            for tmpPath in fileDict.values():
                try:
                    os.unlink(tmpPath)
                except:
                    pass
예제 #25
0
        def __init__(self, sourceTrove, repos, cfg, versionStr=None, 
                     labelPath=None,
                     ignoreInstalled=False, filterVersions=False,
                     parentDir=None, defaultToLatest = False,
                     buildFlavor = None, db = None, overrides = None,
                     getFileFunction = None, branch = None):
            self.recipes = {}

            if getFileFunction is None:
                getFileFunction = lambda repos, fileId, fileVersion, path: \
                        repos.getFileContents([ (fileId, fileVersion) ])[0].get()

            name = sourceTrove.getName().split(':')[0]

            recipePath = name + '.recipe'
            match = self.findFileByPath(sourceTrove, recipePath)

            if not match:
                # this is just missing the recipe; we need it
                raise builderrors.RecipeFileError("version %s of %s does not "
                                                  "contain %s" %
                          (sourceTrove.getName(),
                           sourceTrove.getVersion().asString(),
                           recipePath))

            (fd, recipeFile) = tempfile.mkstemp(".recipe", 'temp-%s-' %name, 
                                                dir=cfg.tmpDir)
            outF = os.fdopen(fd, "w")

            inF = getFileFunction(repos, match[0], match[1], recipePath)

            util.copyfileobj(inF, outF)

            del inF
            outF.close()
            del outF

            if branch is None:
                branch = sourceTrove.getVersion().branch()

            try:
                loadrecipe.RecipeLoader.__init__(self, recipeFile, cfg, repos,
                          sourceTrove.getName(),
                          branch = branch,
                          ignoreInstalled=ignoreInstalled,
                          directory=parentDir, buildFlavor=buildFlavor,
                          db=db, overrides=overrides)
            finally:
                os.unlink(recipeFile)

            self.recipe._trove = sourceTrove.copy()
예제 #26
0
    def _downloadParentImage(self, imgSpec, unpackDir, layersDir):
        log.debug('Downloading parent image %s', imgSpec.dockerImageId)
        self.status('Downloading parent image')
        resp = self.response.getImage(imgSpec.url)
        tmpf = tempfile.TemporaryFile(dir=self.workDir)
        util.copyfileobj(resp, tmpf)
        tmpf.seek(0)

        self.status('Unpacking parent image')
        errcode, stdout, stderr = logCall(["tar", "-C", layersDir,
                "-zxf", "-"], stdin=tmpf)
        tmpf.close()
        parentImageDir = os.path.join(unpackDir,
                imgSpec.dockerImageId)

        log.debug('Unpacking parent image as %s', self._path(parentImageDir))
        layerFilesStack = []
        # Unpack the layers in some temporary directories
        layer = imgSpec
        while layer is not None:
            layerId = layer.dockerImageId
            layer._unpackDir = parentImageDir
            layerFilesStack.append(
                    (layerId, os.path.join(layersDir, layerId, "layer.tar")))
            layer.layerSize = os.stat(layerFilesStack[-1][1]).st_size
            layer._manifest = mf = Manifest(json.load(file(os.path.join(layersDir, layerId, 'json'))))
            mf = json.load(file(os.path.join(layersDir, layerId, 'json')))
            parent = mf.get('parent')
            if parent is not None and not layer.parent:
                layer.parent = ImageSpec(dockerImageId=parent)
                layer.parent.children.append(layer)
            layer = layer.parent
        # We now extract all layers, top-to-bottom, in the same directory.
        while layerFilesStack:
            layerId, layerFile = layerFilesStack.pop()
            log.debug("  Extracting parent layer %s on %s", layerId,
                    self._path(parentImageDir))
            self._extractLayer(parentImageDir, layerFile)
        idToNameTags = {}
        reposFile = os.path.join(layersDir, 'repositories')
        if os.path.isfile(reposFile):
            repos = json.load(file(reposFile))
            for name, tagToId in repos.iteritems():
                for tag, imgid in tagToId.iteritems():
                    idToNameTags.setdefault(imgid, set()).add((name, tag))
        # Walk list again, to compute tags
        layer = imgSpec
        while layer is not None:
            layerId = layer.dockerImageId
            layer.updateNamesAndTags(idToNameTags.get(layerId))
            layer = layer.parent
예제 #27
0
    def _generate(self):
        self._log.info("Generating template %s from trove %s=%s[%s]",
                self._hash, *self._troveTup)

        self._installContents(self._contentsDir, [self._troveTup])

        # Copy "unified" directly into the output.
        os.mkdir(self._outputDir)
        util.copytree(self._contentsDir + '/unified', self._outputDir + '/')

        # Process the MANIFEST file.
        for line in open(self._contentsDir + '/MANIFEST'):
            line = line.rstrip()
            if not line or line[0] == '#':
                continue
            args = line.rstrip().split(',')
            command = args.pop(0)
            commandFunc = getattr(self, '_DO_' + command, None)
            if not commandFunc:
                raise RuntimeError("Unknown command %r in MANIFEST"
                        % (command,))
            commandFunc(args)

        # Archive the results.
        digest = digestlib.sha1()
        outFile = util.AtomicFile(self._outputPath)

        proc = call(['/bin/tar', '-cC', self._outputDir, '.'],
                stdout=subprocess.PIPE, captureOutput=False, wait=False)
        util.copyfileobj(proc.stdout, outFile, digest=digest)
        proc.wait()

        # Write metadata.
        metaFile = util.AtomicFile(self._outputPath + '.metadata')
        cPickle.dump({
            'sha1sum': digest.hexdigest(),
            'trovespec': '%s=%s[%s]' % self._troveTup,
            'kernel': (self._kernelTup and ('%s=%s[%s]' % self._kernelTup)
                or '<none>'),
            # Right now, we are going to hardcode this to an older version
            # of Netclient Protocol to hint to the Conary installed on the
            # jobslave to generate old filecontainers that are compatible
            # with all versions of Conary. (See RBL-1552.)
            'netclient_protocol_version': '38',
            }, metaFile)

        metaFile.commit()
        outFile.commit()

        self._log.info("Template %s created", self._hash)
예제 #28
0
파일: server.py 프로젝트: tensor5/conary
    def do_PUT(self):
        chunked = False
        if 'Transfer-encoding' in self.headers:
            contentLength = 0
            chunked = True
        elif 'Content-Length' in self.headers:
            chunked = False
            contentLength = int(self.headers['Content-Length'])
        else:
            # send 411: Length Required
            self.send_error(411)

        authToken = self.getAuth()

        if self.cfg.proxyContentsDir:
            status, reason = netclient.httpPutFile(self.path, self.rfile,
                                                   contentLength)
            self.send_response(status)
            return

        path = self.path.split("?")[-1]

        if '/' in path:
            self.send_error(403)

        path = self.tmpDir + '/' + path + "-in"

        size = os.stat(path).st_size
        if size != 0:
            self.send_error(410)
            return

        out = open(path, "w")
        try:
            if chunked:
                while 1:
                    chunk = self.rfile.readline()
                    chunkSize = int(chunk, 16)
                    # chunksize of 0 means we're done
                    if chunkSize == 0:
                        break
                    util.copyfileobj(self.rfile, out, sizeLimit=chunkSize)
                    # read the \r\n after the chunk we just copied
                    self.rfile.readline()
            else:
                util.copyfileobj(self.rfile, out, sizeLimit=contentLength)
        finally:
            out.close()
        self.send_response(200)
        self.end_headers()
예제 #29
0
파일: server.py 프로젝트: pombr/conary
    def do_PUT(self):
        chunked = False
        if 'Transfer-encoding' in self.headers:
            contentLength = 0
            chunked = True
        elif 'Content-Length' in self.headers:
            chunked = False
            contentLength = int(self.headers['Content-Length'])
        else:
            # send 411: Length Required
            self.send_error(411)

        authToken = self.getAuth()

        if self.cfg.proxyContentsDir:
            status, reason = netclient.httpPutFile(self.path, self.rfile, contentLength)
            self.send_response(status)
            return

        path = self.path.split("?")[-1]

        if '/' in path:
            self.send_error(403)

        path = self.tmpDir + '/' + path + "-in"

        size = os.stat(path).st_size
        if size != 0:
            self.send_error(410)
            return

        out = open(path, "w")
        try:
            if chunked:
                while 1:
                    chunk = self.rfile.readline()
                    chunkSize = int(chunk, 16)
                    # chunksize of 0 means we're done
                    if chunkSize == 0:
                        break
                    util.copyfileobj(self.rfile, out, sizeLimit=chunkSize)
                    # read the \r\n after the chunk we just copied
                    self.rfile.readline()
            else:
                util.copyfileobj(self.rfile, out, sizeLimit=contentLength)
        finally:
            out.close()
        self.send_response(200)
        self.end_headers()
예제 #30
0
    def testCorruptedRpm(self):
        # test enforcement of size and sha1 fields from rpm signature block
        f = open(resources.get_archive() + "/basesystem-8.0-2.src.rpm")
        rpmhelper.readHeader(f)
        f.seek(0)

        # change the size
        tmp = tempfile.TemporaryFile()
        util.copyfileobj(f, tmp)
        f.seek(0)
        tmp.write(" ")
        tmp.seek(0)
        try:
            rpmhelper.readHeader(tmp)
        except IOError, e:
            assert str(e) == "file size does not match size specified by " "header"
예제 #31
0
def _makeConnection(url, method, headers = None, bodyStream = None,
        bodyLength = None, callback = None):
    protocol, uri = urllib.splittype(url)
    assert(protocol in ('http', 'https'))
    host, selector = urllib.splithost(uri)
    host, port = urllib.splitport(host)

    if protocol == 'http':
        r = HTTPConnection(host, port)
    else:
        r = HTTPSConnection(host, port)

    progress = None
    if callback:
        progress = getattr(callback, 'progress', None)
    if progress is None:
        progress = lambda x, y: x

    hdrs = { 'Content-Type' : 'application/octet-stream'}
    hdrs.update(headers or {})
    if bodyStream and bodyLength is None:
        bodyStream.seek(0, 2)
        bodyLength = bodyStream.tell()
        bodyStream.seek(0)
    if bodyLength:
        hdrs['Content-Length'] = bodyLength

    #r.set_debuglevel(1)
    r.connect()
    r.putrequest(method, selector)
    for k, v in hdrs.items():
        r.putheader(k, str(v))
    try:
        r.endheaders()
        if bodyStream:
            # This could fail, if the device backed by this file is connected
            util.copyfileobj(bodyStream, r, bufSize=BUFSIZE,
                sizeLimit=hdrs['Content-Length'], callback=progress)
        return r.getresponse()
    except socket.error, e:
        raise
        response = None
        try:
            response = r.getresponse()
            r.close()
        except Exception, e:
            pass
예제 #32
0
파일: rpmtest.py 프로젝트: tensor5/conary
    def testCorruptedRpm(self):
        # test enforcement of size and sha1 fields from rpm signature block
        f = open(resources.get_archive() + "/basesystem-8.0-2.src.rpm")
        rpmhelper.readHeader(f)
        f.seek(0)

        # change the size
        tmp = tempfile.TemporaryFile()
        util.copyfileobj(f, tmp)
        f.seek(0)
        tmp.write(' ')
        tmp.seek(0)
        try:
            rpmhelper.readHeader(tmp)
        except IOError, e:
            assert (str(e) == 'file size does not match size specified by '
                    'header')
예제 #33
0
    def _getFilesystemImage(self, job, image, stream):
        dlfile = tempfile.NamedTemporaryFile(prefix=image.getBaseFileName(),
                delete=False)
        util.copyfileobj(stream, dlfile)
        dlfile.close()
        dlpath = dlfile.name

        fileExtensions = [ '.ext3' ]
        self._msg(job, "Uncompressing image")
        workdir = self.extractImage(dlpath)
        # XXX make this more robust
        imageFileDir, imageFileName = self.findFile(workdir, fileExtensions)
        if imageFileDir is None:
            raise RuntimeError("No file(s) found: %s" %
                ', '.join("*%s" % x for x in fileExtensions))
        imageFilePath = os.path.join(imageFileDir, imageFileName)
        return imageFilePath
예제 #34
0
    def write(self):
        # Output setup
        topDir = os.path.join(self.workDir, 'ova_base')
        util.mkdirChain(topDir)

        outputDir = os.path.join(constants.finishedDir, self.UUID)
        util.mkdirChain(outputDir)
        deliverable = os.path.join(outputDir, self.basefilename + self.suffix)

        # Build the filesystem images
        image_path = os.path.join(self.workDir, 'hdimage')
        disk = self.makeHDImage(image_path)

        # Open a manifest for tar so that it writes out files in the optimal
        # order.
        manifest_path = os.path.join(self.workDir, 'files')
        manifest = open(manifest_path, 'w')

        # Write the ova.xml file
        ovaName = 'ova.xml'
        ovaPath = os.path.join(topDir, ovaName)
        self.createXVA(ovaPath, disk.totalSize)
        print >>manifest, ovaName

        # Split the HD image into 1GB (not GiB) chunks
        label = 'xvda'
        chunk_dir = os.path.join(topDir, label)
        chunkPrefix = os.path.join(chunk_dir, 'chunk-')
        util.mkdirChain(os.path.split(chunkPrefix)[0])

        self.status('Splitting hard disk image')
        infile = open(image_path, 'rb')
        n = 0
        tocopy = os.stat(image_path).st_size
        while True:
            chunkname = '%s%04d.gz' % (chunkPrefix, n)
            outfile = gzip.GzipFile(chunkname, 'wb')
            tocopy -= util.copyfileobj(infile, outfile, sizeLimit=1000000000)
            outfile.close()
            print >>manifest, chunkname
            if not tocopy:
                break
            n += 1
        infile.close()

        # Delete the FS image to free up temp space
        os.unlink(image_path)

        # Create XVA file
        manifest.close()
        self.status('Creating XVA Image')
        logCall('tar -cv -f "%s" -C "%s" -T "%s"' % \
                         (deliverable, topDir, manifest_path))
        self.outputFileList.append((deliverable, 'Citrix XenServer (TM) Image'),)

        self.postOutput(self.outputFileList)            
예제 #35
0
def extractFilesFromCpio(fileIn, fileList, tmpDir = '/tmp'):
    """
    Returns a list of open files parallel to fileList
    Hardlinked files will share contents, so make sure you seek() back to the
    beginning before you read.
    """
    # Map device/inode to catch hardlinks
    inodeMap = {}
    # Map the path in fileList to header and device/inode
    fileNameMap = dict((_normpath(x), x) for x in fileList)
    fileNameInodeMap = {}

    # Empty files will be shared to avoid consuming fd
    EmptyFile = tempfile.TemporaryFile(dir = tmpDir, prefix = 'tmppayload-')

    cpioObj = cpiostream.CpioStream(fileIn)
    for entry in cpioObj:
        if entry.header.mode & 0170000 != 0100000:
            # Not a regular file
            continue
        fileName = _normpath(entry.filename)
        devmajor = entry.header.devmajor
        devminor = entry.header.devminor
        inode = entry.header.inode

        key = (devmajor, devminor, inode)

        # This file may not be the one we're looking for, but it may be the
        # one that provides the contents for hardlinked files we care about
        if fileName not in fileNameMap and key not in inodeMap:
            continue

        if entry.header.filesize == 0:
            fobj = EmptyFile
        else:
            fobj = tempfile.TemporaryFile(dir = tmpDir, prefix = 'tmppayload-')
            util.copyfileobj(entry.payload, fobj)
            fobj.seek(0)
        inodeMap[key] = fobj
        # in case we'll ever want to use the information from the cpio header
        # entry to restore file permissions, we should also save the header
        # here
        fileNameInodeMap[fileName] = key
예제 #36
0
def extractFilesFromCpio(fileIn, fileList, tmpDir='/tmp'):
    """
    Returns a list of open files parallel to fileList
    Hardlinked files will share contents, so make sure you seek() back to the
    beginning before you read.
    """
    # Map device/inode to catch hardlinks
    inodeMap = {}
    # Map the path in fileList to header and device/inode
    fileNameMap = dict((_normpath(x), x) for x in fileList)
    fileNameInodeMap = {}

    # Empty files will be shared to avoid consuming fd
    EmptyFile = tempfile.TemporaryFile(dir=tmpDir, prefix='tmppayload-')

    cpioObj = cpiostream.CpioStream(fileIn)
    for entry in cpioObj:
        if entry.header.mode & 0170000 != 0100000:
            # Not a regular file
            continue
        fileName = _normpath(entry.filename)
        devmajor = entry.header.devmajor
        devminor = entry.header.devminor
        inode = entry.header.inode

        key = (devmajor, devminor, inode)

        # This file may not be the one we're looking for, but it may be the
        # one that provides the contents for hardlinked files we care about
        if fileName not in fileNameMap and key not in inodeMap:
            continue

        if entry.header.filesize == 0:
            fobj = EmptyFile
        else:
            fobj = tempfile.TemporaryFile(dir=tmpDir, prefix='tmppayload-')
            util.copyfileobj(entry.payload, fobj)
            fobj.seek(0)
        inodeMap[key] = fobj
        # in case we'll ever want to use the information from the cpio header
        # entry to restore file permissions, we should also save the header
        # here
        fileNameInodeMap[fileName] = key
예제 #37
0
    def get(self, pathId):
        f = self.fileClass(pathId)
        f.inode = files.InodeStream(self.perms & 07777, self.mtime, self.owner, self.group)
        self._touchupFileStream(f)
        if self.needSha1:
            sha1 = digestlib.sha1()
            contents = self.contents.get()
            devnull = open(os.devnull, "w")
            util.copyfileobj(contents, devnull, digest=sha1)
            devnull.close()

            f.contents = files.RegularFileStream()
            f.contents.size.set(contents.tell())
            f.contents.sha1.set(sha1.digest())
        f.provides.set(self.provides)
        f.requires.set(self.requires)
        f.flavor.set(self.flavor)
        for tag in self.tags:
            f.tags.set(tag)
        return f
예제 #38
0
    def get(self, pathId):
        f = self.fileClass(pathId)
        f.inode = files.InodeStream(self.perms & 07777, self.mtime, self.owner,
                                    self.group)
        self._touchupFileStream(f)
        if self.needSha1:
            sha1 = digestlib.sha1()
            contents = self.contents.get()
            devnull = open(os.devnull, 'w')
            util.copyfileobj(contents, devnull, digest=sha1)
            devnull.close()

            f.contents = files.RegularFileStream()
            f.contents.size.set(contents.tell())
            f.contents.sha1.set(sha1.digest())
        f.provides.set(self.provides)
        f.requires.set(self.requires)
        f.flavor.set(self.flavor)
        for tag in self.tags:
            f.tags.set(tag)
        return f
예제 #39
0
    def putChangeset(self):
        """PUT method -- handle changeset uploads."""
        if not self.repositoryServer:
            # FIXME: this mechanism is unauthenticated and can probably be used
            # to PUT content to random things on the internet
            if 'content-length' in self.request.headers:
                size = int(self.request.headers['content-length'])
            else:
                size = None
            headers = [
                x for x in self.request.headers.items() if x[0].lower() in (
                    'x-conary-servername',
                    'x-conary-entitlement',
                )
            ]
            result = netclient.httpPutFile(
                self.request.url,
                self.request.body_file,
                size,
                headers=headers,
                chunked=(size is None),
                withResponse=True,
            )
            return self.responseFactory(
                status='%s %s' % (result.status, result.reason),
                app_iter=self._produceProxy(result),
                #headerlist=result.getheaders(),
            )

        # Copy request body to the designated temporary file.
        stream = self.request.body_file
        out = self._openForPut()
        if out is None:
            # File already exists or is in an illegal location.
            return self._makeError('403 Forbidden', "Illegal changeset upload")

        util.copyfileobj(stream, out)
        out.close()

        return self.responseFactory(status='200 OK')
예제 #40
0
파일: lookaside.py 프로젝트: tensor5/conary
    def cacheFilePath(self, cachePrefix, url):
        cachePath = self.getCachePath(cachePrefix, url)
        util.mkdirChain(os.path.dirname(cachePath))

        if url.filePath() in self.cacheMap:
            # don't check sha1 twice
            return self.cacheMap[url.filePath()]
        (troveName, troveVersion, pathId, troveFile, fileId, troveFileVersion,
         sha1, mode) = self.nameMap[url.filePath()]
        sha1Cached = None
        cachedMode = None
        if os.path.exists(cachePath):
            sha1Cached = sha1helper.sha1FileBin(cachePath)
        if sha1Cached != sha1:
            if sha1Cached:
                log.info('%s sha1 %s != %s; fetching new...', url.filePath(),
                         sha1helper.sha1ToString(sha1),
                         sha1helper.sha1ToString(sha1Cached))
            else:
                log.info('%s not yet cached, fetching...', url.filePath())

            if self.quiet:
                csCallback = None
            else:
                csCallback = ChangesetCallback()

            f = self.repos.getFileContents([(fileId, troveFileVersion)],
                                           callback=csCallback)[0].get()
            outF = util.AtomicFile(cachePath, chmod=0644)
            util.copyfileobj(f, outF)
            outF.commit()
            fileObj = self.repos.getFileVersion(pathId, fileId,
                                                troveFileVersion)
            fileObj.chmod(cachePath)

        cachedMode = os.stat(cachePath).st_mode & 0777
        if mode != cachedMode:
            os.chmod(cachePath, mode)
        self.cacheMap[url.filePath()] = cachePath
        return cachePath
예제 #41
0
파일: lookaside.py 프로젝트: pombr/conary
    def cacheFilePath(self, cachePrefix, url):
        cachePath = self.getCachePath(cachePrefix, url)
        util.mkdirChain(os.path.dirname(cachePath))

        if url.filePath() in self.cacheMap:
            # don't check sha1 twice
            return self.cacheMap[url.filePath()]
        (troveName, troveVersion, pathId, troveFile, fileId,
         troveFileVersion, sha1, mode) = self.nameMap[url.filePath()]
        sha1Cached = None
        cachedMode = None
        if os.path.exists(cachePath):
            sha1Cached = sha1helper.sha1FileBin(cachePath)
        if sha1Cached != sha1:
            if sha1Cached:
                log.info('%s sha1 %s != %s; fetching new...', url.filePath(),
                          sha1helper.sha1ToString(sha1),
                          sha1helper.sha1ToString(sha1Cached))
            else:
                log.info('%s not yet cached, fetching...', url.filePath())

            if self.quiet:
                csCallback = None
            else:
                csCallback = ChangesetCallback()

            f = self.repos.getFileContents(
                [(fileId, troveFileVersion)], callback=csCallback)[0].get()
            outF = util.AtomicFile(cachePath, chmod=0644)
            util.copyfileobj(f, outF)
            outF.commit()
            fileObj = self.repos.getFileVersion(
                pathId, fileId, troveFileVersion)
            fileObj.chmod(cachePath)

        cachedMode = os.stat(cachePath).st_mode & 0777
        if mode != cachedMode:
            os.chmod(cachePath, mode)
        self.cacheMap[url.filePath()] = cachePath
        return cachePath
예제 #42
0
파일: request.py 프로젝트: pombr/conary
    def _sendData(self, conn):
        if self.data is None:
            return
        if not hasattr(self.data, 'read'):
            conn.send(self.data)
            return

        if self.chunked:
            # Use chunked coding
            output = wrapper = ChunkedSender(conn)
        elif self.size is not None:
            # Use identity coding
            output = conn
            wrapper = None
        else:
            raise RuntimeError("Request must use chunked transfer coding "
                    "if size is not known.")
        util.copyfileobj(self.data, output, callback=self.callback,
                rateLimit=self.rateLimit, abortCheck=self.abortCheck,
                sizeLimit=self.size)
        if wrapper:
            wrapper.close()
예제 #43
0
파일: lookaside.py 프로젝트: tensor5/conary
    def addFileToCache(self, cachePrefix, url, infile, contentLength):
        # cache needs to be hierarchical to avoid collisions, thus we
        # use cachePrefix so that files with the same name and different
        # contents in different packages do not collide
        cachedname = self.getCachePath(cachePrefix, url)
        util.mkdirChain(os.path.dirname(cachedname))
        f = util.AtomicFile(cachedname, chmod=0644)

        try:
            BLOCKSIZE = 1024 * 4

            if self.quiet:
                callback = callbacks.FetchCallback()
            else:
                callback = FetchCallback()

            wrapper = callbacks.CallbackRateWrapper(callback, callback.fetch,
                                                    contentLength)
            util.copyfileobj(infile,
                             f,
                             bufSize=BLOCKSIZE,
                             rateLimit=self.downloadRateLimit,
                             callback=wrapper.callback)

            f.commit()
            infile.close()
        except:
            f.close()
            raise

        # work around FTP bug (msw had a better way?)
        if url.scheme == 'ftp':
            if os.stat(cachedname).st_size == 0:
                os.unlink(cachedname)
                self.createNegativeCacheEntry(cachePrefix, url)
                return None

        return cachedname
예제 #44
0
파일: wsgi_hooks.py 프로젝트: pombr/conary
    def putChangeset(self):
        """PUT method -- handle changeset uploads."""
        if not self.repositoryServer:
            # FIXME: this mechanism is unauthenticated and can probably be used
            # to PUT content to random things on the internet
            if 'content-length' in self.request.headers:
                size = int(self.request.headers['content-length'])
            else:
                size = None
            headers = [x for x in self.request.headers.items()
                    if x[0].lower() in (
                        'x-conary-servername',
                        'x-conary-entitlement',
                        )]
            result = netclient.httpPutFile(self.request.url,
                    self.request.body_file,
                    size,
                    headers=headers,
                    chunked=(size is None),
                    withResponse=True,
                    )
            return self.responseFactory(
                    status='%s %s' % (result.status, result.reason),
                    app_iter=self._produceProxy(result),
                    #headerlist=result.getheaders(),
                    )

        # Copy request body to the designated temporary file.
        stream = self.request.body_file
        out = self._openForPut()
        if out is None:
            # File already exists or is in an illegal location.
            return self._makeError('403 Forbidden', "Illegal changeset upload")

        util.copyfileobj(stream, out)
        out.close()

        return self.responseFactory(status='200 OK')
예제 #45
0
    def get(self, fileName, computeShaDigest = False):
        """
        Download a file from the repository.
        @param fileName: relative path to file
        @type fileName: string
        @return open file instance
        """

        fobj = self._getTempFileObject()
        realUrl = self._getRealUrl(fileName)

        inf = self._opener.open(realUrl)
        if computeShaDigest:
            dig = digestlib.sha1()
        else:
            dig = None
        util.copyfileobj(inf, fobj, digest = dig)
        fobj.seek(0)

        if not os.path.basename(fileName).endswith('.gz'):
            return self.FileWrapper.create(fobj, dig)
        return self.FileWrapper.create(gzip.GzipFile(fileobj=fobj, mode="r"),
            dig)
예제 #46
0
    def testCopyFileObjDigest(self):
        tmpDir = tempfile.mkdtemp()
        try:
            buf = "test data"

            # rpepare source and destination files
            srcFn = os.path.join(tmpDir, "srcfile")
            destFn = os.path.join(tmpDir, "destfile")
            open(srcFn, "w").write(buf)
            src = open(srcFn)
            dest = open(destFn, "w")

            # filter the digest through copyfileobj
            sha1 = digestlib.sha1()
            util.copyfileobj(src, dest, digest=sha1, sizeLimit=len(buf))
            res = sha1.hexdigest()

            # now compare the resulting hash to reference data
            sha1 = digestlib.sha1()
            sha1.update(buf)
            ref = sha1.hexdigest()
            self.assertEquals(ref, res)
        finally:
            util.rmtree(tmpDir)
예제 #47
0
    def addFileToCache(self, cachePrefix, url, infile, contentLength):
        # cache needs to be hierarchical to avoid collisions, thus we
        # use cachePrefix so that files with the same name and different
        # contents in different packages do not collide
        cachedname = self.getCachePath(cachePrefix, url)
        util.mkdirChain(os.path.dirname(cachedname))
        f = open(cachedname, "w+")

        try:
            BLOCKSIZE = 1024 * 4

            if self.quiet:
                callback = callbacks.FetchCallback()
            else:
                callback = FetchCallback()

            wrapper = callbacks.CallbackRateWrapper(callback, callback.fetch,
                                                    contentLength)
            util.copyfileobj(infile, f, bufSize=BLOCKSIZE,
                             rateLimit=self.downloadRateLimit,
                             callback=wrapper.callback)

            f.close()
            infile.close()
        except:
            os.unlink(cachedname)
            raise

        # work around FTP bug (msw had a better way?)
        if url.scheme == 'ftp':
            if os.stat(cachedname).st_size == 0:
                os.unlink(cachedname)
                self.createNegativeCacheEntry(cachePrefix, url)
                return None

        return cachedname
예제 #48
0
파일: copytest.py 프로젝트: tensor5/conary
    def testCopyFileObjDigest(self):
        tmpDir = tempfile.mkdtemp()
        try:
            buf = 'test data'

            # rpepare source and destination files
            srcFn = os.path.join(tmpDir, 'srcfile')
            destFn = os.path.join(tmpDir, 'destfile')
            open(srcFn, 'w').write(buf)
            src = open(srcFn)
            dest = open(destFn, 'w')

            # filter the digest through copyfileobj
            sha1 = digestlib.sha1()
            util.copyfileobj(src, dest, digest=sha1, sizeLimit=len(buf))
            res = sha1.hexdigest()

            # now compare the resulting hash to reference data
            sha1 = digestlib.sha1()
            sha1.update(buf)
            ref = sha1.hexdigest()
            self.assertEquals(ref, res)
        finally:
            util.rmtree(tmpDir)
예제 #49
0
파일: files.py 프로젝트: pombreda/conary-1
    def restore(self,
                fileContents,
                root,
                target,
                journal=None,
                sha1=None,
                nameLookup=True,
                **kwargs):

        keepTempfile = kwargs.get('keepTempfile', False)

        if fileContents != None:
            # this is first to let us copy the contents of a file
            # onto itself; the unlink helps that to work
            src = fileContents.get()
            inFd = None

            if fileContents.isCompressed():
                if hasattr(src, '_fdInfo'):
                    # inFd is None if we can't figure this information out
                    # (for _LazyFile for instance)
                    (inFd, inStart, inSize) = src._fdInfo()
                else:
                    src = gzip.GzipFile(mode="r", fileobj=src)

            name = os.path.basename(target)
            path = os.path.dirname(target)
            if not os.path.isdir(path):
                util.mkdirChain(path)

            if inFd is not None:
                if keepTempfile:
                    tmpfd, destTarget = tempfile.mkstemp(name, '.ct', path)
                    os.close(tmpfd)
                    destName = os.path.basename(destTarget)
                else:
                    destName, destTarget = name, target
                actualSha1 = util.sha1Uncompress((inFd, inStart, inSize), path,
                                                 destName, destTarget)
                if keepTempfile:
                    # Set up the second temp file here. This makes
                    # sure we get through the next if branch.
                    inFd = None
                    src = file(destTarget)
            elif keepTempfile:
                tmpfd, destTarget = tempfile.mkstemp(name, '.ct', path)
                f = os.fdopen(tmpfd, 'w')
                util.copyfileobj(src, f)
                f.close()
                src = file(destTarget)
            else:
                destTarget = target

            if inFd is None:
                tmpfd, tmpname = tempfile.mkstemp(name, '.ct', path)
                try:
                    d = digestlib.sha1()
                    f = os.fdopen(tmpfd, 'w')
                    util.copyfileobj(src, f, digest=d)
                    f.close()
                    actualSha1 = d.digest()

                    # would be nice if util could do this w/ a single
                    # system call, but exists is better than an exception
                    # when the file doesn't already exist
                    if (os.path.exists(target)
                            and stat.S_ISDIR(os.lstat(target).st_mode)):
                        os.rmdir(target)
                    os.rename(tmpname, target)
                except:
                    # we've not renamed tmpname to target yet, we should
                    # clean up instead of leaving temp files around
                    os.unlink(tmpname)
                    if keepTempfile:
                        os.unlink(destTarget)
                    raise

            if (sha1 is not None and sha1 != actualSha1):
                raise Sha1Exception(target)

            File.restore(self,
                         root,
                         target,
                         journal=journal,
                         nameLookup=nameLookup,
                         **kwargs)
        else:
            destTarget = target
            File.restore(self,
                         root,
                         target,
                         journal=journal,
                         nameLookup=nameLookup,
                         **kwargs)
        return destTarget
예제 #50
0
def verifySignatures(f, validKeys=None):
    """
    Given an extended file, compute signatures
    """
    f.seek(0)
    h = readHeader(f)

    # Cheap test first: verify MD5 sig
    sigmd5 = h.get(SIG_MD5, None)
    if sigmd5 is not None:
        f.seek(0)
        readSignatureHeader(f)

        # verify md5 digest
        md5 = digestlib.md5()
        util.copyfileobj(f, NullWriter(), digest=md5)
        if md5.digest() != sigmd5:
            raise MD5SignatureError(
                "The MD5 digest fails to verify: expected %s, got %s" %
                (sha1helper.md5ToString(sigmd5), md5.hexdigest()))

    # Don't bother if no gpg signature was present, or no valid keys were
    # presented
    if validKeys is None:
        return
    sigString = h.get(SIG_GPG, None)
    if sigString is None:
        return
    # Skip to immutable header region
    f.seek(0)
    readSignatureHeader(f)
    sig = openpgpfile.readSignature(sigString)

    keyId = sig.getSignerKeyId()
    matchingKeys = [x for x in validKeys if x.hasKeyId(keyId)]
    if not matchingKeys:
        raise PGPSignatureError("Signature generated with key %s does "
                                "not match valid keys %s" %
                                (keyId, ', '.join(x.getKeyId()
                                                  for x in validKeys)))

    key = matchingKeys[0]

    # signature verification assumes a seekable stream and will seek to the
    # beginning; use a SeekableNestedFile
    size = h.getHeaderPlusPayloadSize()
    if size is None:
        pos = f.tell()
        f.seek(0, 2)
        size = f.tell()
        f.seek(pos, 0)
    snf = None
    if hasattr(f, 'pread'):
        extFile = f
    elif hasattr(f, 'name'):
        extFile = util.ExtendedFile(f.name, buffering=False)
    else:
        # worst case scenario, we slurp everything in memory
        extFile = util.ExtendedStringIO(f.read())
        snf = extFile
    if snf is None:
        snf = util.SeekableNestedFile(extFile, start=f.tell(), size=size)
    try:
        sig.verifyDocument(key.getCryptoKey(), snf)
    except openpgpfile.SignatureError:
        raise PGPSignatureError
예제 #51
0
        def __init__(self,
                     sourceTrove,
                     repos,
                     cfg,
                     versionStr=None,
                     labelPath=None,
                     ignoreInstalled=False,
                     filterVersions=False,
                     parentDir=None,
                     defaultToLatest=False,
                     buildFlavor=None,
                     db=None,
                     overrides=None,
                     getFileFunction=None,
                     branch=None):
            self.recipes = {}

            if getFileFunction is None:
                getFileFunction = lambda repos, fileId, fileVersion, path: \
                        repos.getFileContents([ (fileId, fileVersion) ])[0].get()

            name = sourceTrove.getName().split(':')[0]

            recipePath = name + '.recipe'
            match = self.findFileByPath(sourceTrove, recipePath)

            if not match:
                # this is just missing the recipe; we need it
                raise builderrors.RecipeFileError(
                    "version %s of %s does not "
                    "contain %s" %
                    (sourceTrove.getName(),
                     sourceTrove.getVersion().asString(), recipePath))

            (fd, recipeFile) = tempfile.mkstemp(".recipe",
                                                'temp-%s-' % name,
                                                dir=cfg.tmpDir)
            outF = os.fdopen(fd, "w")

            inF = getFileFunction(repos, match[0], match[1], recipePath)

            util.copyfileobj(inF, outF)

            del inF
            outF.close()
            del outF

            if branch is None:
                branch = sourceTrove.getVersion().branch()

            try:
                loadrecipe.RecipeLoader.__init__(
                    self,
                    recipeFile,
                    cfg,
                    repos,
                    sourceTrove.getName(),
                    branch=branch,
                    ignoreInstalled=ignoreInstalled,
                    directory=parentDir,
                    buildFlavor=buildFlavor,
                    db=db,
                    overrides=overrides)
            finally:
                os.unlink(recipeFile)

            self.recipe._trove = sourceTrove.copy()
예제 #52
0
def digestWithProgress(fobj, digest, callback):
    source = StreamWithProgress(fobj, callback)
    return copyfileobj(source, Sink, digest=digest)
예제 #53
0
파일: rpmtest.py 프로젝트: tensor5/conary
        f.seek(0)
        tmp.write(' ')
        tmp.seek(0)
        try:
            rpmhelper.readHeader(tmp)
        except IOError, e:
            assert (str(e) == 'file size does not match size specified by '
                    'header')
        else:
            assert (0)

        # change a byte in the header. the offset we write to here happens
        # to work for basesystem-8.0-2.src.rpm; if that file changes this
        # offset needs to change too
        tmp = tempfile.TemporaryFile()
        util.copyfileobj(f, tmp)
        tmp.seek(2000)
        tmp.write('X')
        tmp.seek(0)
        try:
            rpmhelper.readHeader(tmp)
        except IOError, e:
            assert (str(e) == "bad header sha1")
        else:
            assert (0)

    def testFilelessRpm(self):
        # Test that reading the paths for an rpm that has no files still works
        f = open(resources.get_archive() + "/fileless-0.1-1.noarch.rpm")
        h = rpmhelper.readHeader(f)
        self.assertEqual(list(h.paths()), [])