def testOutOfOrderRead(self): cpioPath = self._createCpio() src = cpiostream.CpioStream(file(cpioPath)) ent = src.next() # Advance to the next entry, the first one should no longer be able to # read src.next() self.assertRaises(cpiostream.OutOfOrderRead, ent.payload.read)
def testIterateAndReadAlternate(self): # We only read every other file - this tests that we are properly # rolling the cpio stream forward cpioPath = self._createCpio() src = cpiostream.CpioStream(file(cpioPath)) for i, ent in enumerate(src): if i % 2 == 0: ent.payload.read()
def testOutOfOrderRead2(self): cpioPath = self._createCpio() src = cpiostream.CpioStream(file(cpioPath)) ent = src.next() # The cpio stream advances one byte. This should be enough to kill the # reads for the entry src.read(1) self.assertRaises(cpiostream.OutOfOrderRead, ent.payload.read)
def testIterateAndPartialRead2(self): # Read only a portion of the payload cpioPath = self._createCpio() src = cpiostream.CpioStream(file(cpioPath)) for i, ent in enumerate(src): if i % 2 == 0: amt = min(10, ent.header.filesize) else: amt = min(155, ent.header.filesize) ent.payload.read(amt)
def testIterate(self): cpioPath = self._createCpio() src = cpiostream.CpioStream(file(cpioPath)) fileNames = [ x.filename for x in src ] self.assertEqual(fileNames, [ './usr/bin/ptar', './usr/bin/ptardiff', './usr/lib/perl5/5.10.0/Archive/Tar', './usr/lib/perl5/5.10.0/Archive/Tar.pm', './usr/lib/perl5/5.10.0/Archive/Tar/Constant.pm', './usr/lib/perl5/5.10.0/Archive/Tar/File.pm', './usr/share/man/man1/ptar.1.gz', './usr/share/man/man1/ptardiff.1.gz', './usr/share/man/man3/Archive::Tar.3pm.gz', './usr/share/man/man3/Archive::Tar::File.3pm.gz', ])
def oldtest(self): cpioPath = self._createCpio() sha1sum = digestlib.sha1(file(cpioPath).read()).hexdigest() resultFilePath = os.path.join(self.workDir, 'result.cpio') # Use a variety of sizes, to try to come up with different chunking # solutions for bufferSize in [1001, 1003, 3001]: f = file(resultFilePath, "w") src = cpiostream.CpioStream(file(cpioPath)) while 1: buf = src.read(bufferSize) if not buf: break f.write(buf) f.close() nsha1sum = digestlib.sha1(file(resultFilePath).read()).hexdigest() self.assertEqual(nsha1sum, sha1sum)
def extractFilesFromCpio(fileIn, fileList, tmpDir='/tmp'): """ Returns a list of open files parallel to fileList Hardlinked files will share contents, so make sure you seek() back to the beginning before you read. """ # Map device/inode to catch hardlinks inodeMap = {} # Map the path in fileList to header and device/inode fileNameMap = dict((_normpath(x), x) for x in fileList) fileNameInodeMap = {} # Empty files will be shared to avoid consuming fd EmptyFile = tempfile.TemporaryFile(dir=tmpDir, prefix='tmppayload-') cpioObj = cpiostream.CpioStream(fileIn) for entry in cpioObj: if entry.header.mode & 0170000 != 0100000: # Not a regular file continue fileName = _normpath(entry.filename) devmajor = entry.header.devmajor devminor = entry.header.devminor inode = entry.header.inode key = (devmajor, devminor, inode) # This file may not be the one we're looking for, but it may be the # one that provides the contents for hardlinked files we care about if fileName not in fileNameMap and key not in inodeMap: continue if entry.header.filesize == 0: fobj = EmptyFile else: fobj = tempfile.TemporaryFile(dir=tmpDir, prefix='tmppayload-') util.copyfileobj(entry.payload, fobj) fobj.seek(0) inodeMap[key] = fobj # in case we'll ever want to use the information from the cpio header # entry to restore file permissions, we should also save the header # here fileNameInodeMap[fileName] = key
def testIterateAndRead(self): cpioPath = self._createCpio() src = cpiostream.CpioStream(file(cpioPath)) for ent in src: ent.payload.read()