Пример #1
0
    def __init__(self, f):
        metaSize=unpack(">I",f.read(4))[0]
        metaOffset=f.tell()
        self.header=Header(unpack(">8I",f.read(32)))
        if self.header.magic!=0x9D798ED5: raise Exception("Wrong noncas bundle header magic.")
        sha1List=[f.read(20) for i in range(self.header.totalCount)] #one sha1 for each ebx+res+chunk. Not necessary for extraction
        self.ebx=[BundleEntry(unpack(">2I",f.read(8))) for i in range(self.header.ebxCount)]
        self.res=[BundleEntry(unpack(">2I",f.read(8))) for i in range(self.header.resCount)]

        #ebx are done, but res have extra content
        for entry in self.res: entry.resType=unpack(">I",f.read(4))[0] #as ascii: E.g. \IT. for ITexture
        for entry in self.res: entry.resMeta=f.read(16) #often 16 nulls (always null for IT)
        for entry in self.res: entry.resRid=unpack(">Q",f.read(8))[0] #I think the ebx use these to import res (bf3 used paths)

        #chunks
        self.chunks=[Chunk(f) for i in range(self.header.chunkCount)]

        #chunkMeta. There is one chunkMeta entry for every chunk (i.e. self.chunks and self.chunkMeta both have the same number of elements).
        if self.header.chunkCount>0: self.chunkMeta=dbo.DbObject(f)
        for i in range(self.header.chunkCount):
            self.chunks[i].meta=self.chunkMeta.content[i].getSubObject("meta")
            self.chunks[i].h32=self.chunkMeta.content[i].get("h32")

        #ebx and res have a filename (chunks only have a 16 byte id)
        absStringOffset=metaOffset+self.header.stringOffset
        for entry in self.ebx+self.res: 
            f.seek(absStringOffset+entry.nameOffset)
            entry.name=readNullTerminatedString(f)
            
        self.entries=self.ebx+self.res+self.chunks
        f.seek(metaOffset+metaSize) #go to the start of the payload section
Пример #2
0
def dump(tocPath, outPath):
    toc = dbo.readToc(tocPath)
    if not (toc.getSubEntry("bundles") or toc.get("chunks")):
        return  #there's nothing to extract (the sb might not even exist)

    sbPath = tocPath[:-3] + "sb"
    sb = open(sbPath, "rb")

    chunkPathToc = os.path.join(outPath, "chunks")
    bundlePath = os.path.join(outPath, "bundles")
    ebxPath = os.path.join(bundlePath, "ebx")
    resPath = os.path.join(bundlePath, "res")
    chunkPath = os.path.join(bundlePath, "chunks")

    if not toc.get("das"):
        raise Exception("Non-DAS superbundle found in NFS: Edge.")

    bundles = toc.getSubEntry(
        "bundles")  #names offsets sizes (list sizes should be same)
    offsets = bundles.get("offsets")

    for offset in offsets:
        sb.seek(offset.content)
        bundle = dbo.DbObject(sb)

        for entry in bundle.get("ebx", list()):  #name sha1 size originalSize
            path = os.path.join(ebxPath, entry.get("name") + ".ebx")
            if payload.casBundlePayload(entry, path,
                                        entry.get("originalSize")):
                ebx.addEbxGuid(path, ebxPath)

        for entry in bundle.get(
                "res",
                list()):  #name sha1 size originalSize resRid resType resMeta
            path = os.path.join(resPath, entry.get("name") + ".res")
            payload.casBundlePayload(entry, path, entry.get("originalSize"))

        for entry in bundle.get("chunks", list(
        )):  #id sha1 size logicalOffset logicalSize chunkMeta::meta
            path = os.path.join(chunkPath, entry.get("id").format() + ".chunk")
            payload.casBundlePayload(
                entry, path,
                entry.get("logicalOffset") + entry.get("logicalSize"))

    #Deal with the chunks which are defined directly in the toc.
    #These chunks do NOT know their originalSize.
    for entry in toc.get("chunks"):  #id sha1
        targetPath = os.path.join(chunkPathToc,
                                  entry.get("id").format() + ".chunk")
        payload.casChunkPayload(entry, targetPath)

    sb.close()
Пример #3
0
    def __init__(self, f):
        metaSize = unpack(">I", f.read(4))[
            0]  #size of the meta section/offset of the payload section
        metaStart = f.tell()
        metaEnd = metaStart + metaSize
        self.header = Header(unpack(">8I", f.read(32)), metaStart)
        if self.header.magic != 0x970d1c13:
            raise Exception("Wrong noncas bundle header magic.")
        self.sha1List = [f.read(20) for i in range(self.header.numEntry)
                         ]  #one sha1 for each ebx+res+chunk
        self.ebxEntries = [
            BundleEntry(unpack(">3I", f.read(12)))
            for i in range(self.header.numEbx)
        ]
        self.resEntries = [
            BundleEntry(unpack(">3I", f.read(12)))
            for i in range(self.header.numRes)
        ]
        #ebx are done, but res have extra content
        for entry in self.resEntries:
            entry.resType = unpack(">I", f.read(4))[0]  #e.g. IT for ITexture
        for entry in self.resEntries:
            entry.resMeta = f.read(16)  #often 16 nulls (always null for IT)

        self.chunkEntries = [Chunk(f) for i in range(self.header.numChunks)]

        #chunkmeta section, uses sbtoc structure, defines h32 and meta. If meta != nullbyte, then the corresponding chunk should have range entries.
        #Then again, noncas is crazy so this is only true for cas. There is one chunkMeta element (consisting of h32 and meta) for every chunk.
        #h32 is the FNV-1 hash applied to a string. For some audio files for example, the files are accessed via ebx files which of course have a name.
        #The hash of this name in lowercase is the h32 found in the chunkMeta. The same hash is also found in the ebx file itself at the keyword NameHash
        #For ITextures, the h32 is found in the corresponding res file. The res file also contains a name and once again the hash of this name is the h32.
        #meta for textures usually contains firstMip 0/1/2.
        if self.header.numChunks > 0: self.chunkMeta = dbo.DbObject(f)
        for i in range(self.header.numChunks):
            self.chunkEntries[i].meta = self.chunkMeta.content[i].getSubObject(
                "meta")
            self.chunkEntries[i].h32 = self.chunkMeta.content[i].get("h32")

        for entry in self.ebxEntries + self.resEntries:  #ebx and res have a path and not just a guid
            f.seek(self.header.offsetString + entry.offsetString)
            entry.name = readNullTerminatedString(f)

        f.seek(
            metaEnd
        )  #PAYLOAD. Just grab all the payload offsets and sizes and add them to the entries without actually reading the payload. Also attach sha1 to entry.
        sha1Counter = 0
        for entry in self.ebxEntries + self.resEntries + self.chunkEntries:
            entry.offset = alignValue(f.tell(), 16)
            f.seek(entry.offset + entry.size)
            entry.sha1 = self.sha1List[sha1Counter]
            sha1Counter += 1
Пример #4
0
def dump(tocPath, outPath, baseTocPath=None, commonDatPath=None):
    toc = dbo.readToc(tocPath)
    if not (toc.get("bundles") or toc.get("chunks")):
        return  #there's nothing to extract (the sb might not even exist)

    sbPath = tocPath[:-3] + "sb"
    sb = openSbFile(sbPath)

    chunkPathToc = os.path.join(outPath, "chunks")
    bundlePath = os.path.join(outPath, "bundles")
    ebxPath = os.path.join(bundlePath, "ebx")
    dbxPath = os.path.join(bundlePath, "dbx")
    resPath = os.path.join(bundlePath, "res")
    chunkPath = os.path.join(bundlePath, "chunks")

    if toc.get("cas"):
        #deal with cas bundles => ebx, dbx, res, chunks.
        for tocEntry in toc.get("bundles"):  #id offset size, size is redundant
            sb.seek(tocEntry.get("offset"))
            bundle = dbo.DbObject(sb)

            for entry in bundle.get("ebx",
                                    list()):  #name sha1 size originalSize
                path = os.path.join(ebxPath, entry.get("name") + ".ebx")
                if casBundlePayload(entry, path, False):
                    ebx.addEbxGuid(path, ebxPath)

            for entry in bundle.get("dbx",
                                    list()):  #name sha1 size originalSize
                if entry.get(
                        "idata"
                ):  #dbx appear only idata if at all, they are probably deprecated and were not meant to be shipped at all.
                    path = os.path.join(dbxPath, entry.get("name") + ".dbx")
                    out = open2(path, "wb")
                    if entry.get("size") == entry.get("originalSize"):
                        out.write(entry.get("idata"))
                    else:
                        out.write(zlibIdata(entry.get("idata")))
                    out.close()

            for entry in bundle.get(
                    "res",
                    list()):  #name sha1 size originalSize resType resMeta
                path = os.path.join(resPath, entry.get("name") + ".res")
                casBundlePayload(entry, path, False)

            for entry in bundle.get(
                    "chunks",
                    list()):  #id sha1 size chunkMeta::h32 chunkMeta::meta
                path = os.path.join(chunkPath,
                                    entry.get("id").format() + ".chunk")
                casBundlePayload(entry, path, True)

        #deal with cas chunks defined in the toc.
        for entry in toc.get("chunks"):  #id sha1
            path = os.path.join(chunkPathToc,
                                entry.get("id").format() + ".chunk")
            casChunkPayload(entry, path)

    else:
        #deal with noncas bundles
        for tocEntry in toc.get("bundles"):  #id offset size, size is redundant
            if tocEntry.get("base"):
                continue  #Patched noncas bundle. However, use the unpatched bundle because no file was patched at all.

            sb.seek(tocEntry.get("offset"))

            if tocEntry.get("delta"):
                #Patched noncas bundle. Here goes the hilarious part. Take the patched data and glue parts from the unpatched data in between.
                #When that is done (in memory of course) the result is a new valid bundle file that can be read like an unpatched one.

                deltaSize, deltaMagic, padding = unpack(">IIQ", sb.read(16))

                class Delta:
                    def __init__(self, sb):
                        self.size, self.typ, self.offset = unpack(
                            ">IiQ", sb.read(16))

                deltas = list()
                for deltaEntry in range(deltaSize // 16):
                    deltas.append(Delta(sb))

                bundleStream = io.BytesIO()  #here be the new bundle data
                unpatchedSb = openSbFile(baseTocPath[:-3] + "sb")
                commonDat = open(
                    commonDatPath,
                    "rb") if os.path.isfile(commonDatPath) else None

                for delta in deltas:
                    if delta.typ == 1:
                        unpatchedSb.seek(delta.offset)
                        bundleStream.write(unpatchedSb.read(delta.size))
                    elif delta.typ == 0:
                        bundleStream.write(sb.read(delta.size))
                    elif delta.typ == -1:
                        if not commonDat:
                            raise Exception(
                                "Found delta type -1 without common.dat present."
                            )
                        commonDat.seek(delta.offset)
                        bundleStream.write(commonDat.read(delta.size))
                    else:
                        raise Exception(
                            "Unknown delta type %d in patched bundle at 0x%08x"
                            % (delta.typ, tocEntry.get("offset")))

                unpatchedSb.close()
                if commonDat: commonDat.close()
                bundleStream.seek(0)

                bundle = noncas.Bundle(bundleStream)
                sb2 = bundleStream
            else:
                bundle = noncas.Bundle(sb)
                sb2 = sb

            for entry in bundle.ebxEntries:
                path = os.path.join(ebxPath, entry.name + ".ebx")
                if noncasBundlePayload(sb2, entry, path):
                    ebx.addEbxGuid(path, ebxPath)

            for entry in bundle.resEntries:
                originalSize = entry.originalSize
                path = os.path.join(resPath, entry.name + ".res")
                noncasBundlePayload(sb2, entry, path)

            for entry in bundle.chunkEntries:
                path = os.path.join(chunkPath, entry.id.format() + ".chunk")
                noncasBundlePayload(sb2, entry, path)

        #deal with noncas chunks defined in the toc
        for entry in toc.get("chunks"):  #id offset size
            path = os.path.join(chunkPathToc,
                                entry.get("id").format() + ".chunk")
            noncasChunkPayload(sb, entry, path)

    #Clean up.
    sb.close()
    clearTempFiles()
Пример #5
0
def dump(tocPath,baseTocPath,outPath):
    """Take the filename of a toc and dump all files to the targetFolder."""

    #Depending on how you look at it, there can be up to 2*(3*3+1)=20 different cases:
    #    The toc has a cas flag which means all assets are stored in the cas archives. => 2 options
    #        Each bundle has either a delta or base flag, or no flag at all. => 3 options
    #            Each file in the bundle is one of three types: ebx/res/chunks => 3 options
    #        The toc itself contains chunks. => 1 option
    #
    #Simplify things by ignoring base bundles (they just state that the unpatched bundle is used),
    #which is alright, as the user needs to dump the unpatched files anyway.
    #
    #Additionally, add some common fields to the ebx/res/chunks entries so they can be treated the same.
    #=> 6 cases.

    toc=dbo.readToc(tocPath)
    if not (toc.get("bundles") or toc.get("chunks")): return #there's nothing to extract (the sb might not even exist)

    sbPath=tocPath[:-3]+"sb"
    sb=open(sbPath,"rb")

    chunkPathToc=os.path.join(outPath,"chunks")
    bundlePath=os.path.join(outPath,"bundles")
    ebxPath=os.path.join(bundlePath,"ebx")
    resPath=os.path.join(bundlePath,"res")
    chunkPath=os.path.join(bundlePath,"chunks")

    ###read the bundle depending on the four types (+cas+delta, +cas-delta, -cas+delta, -cas-delta) and choose the right function to write the payload
    if toc.get("cas"):
        for tocEntry in toc.get("bundles"): #id offset size, size is redundant
            if tocEntry.get("base"): continue #Patched bundle. However, use the unpatched bundle because no file was patched at all.

            sb.seek(tocEntry.get("offset"))
            bundle=dbo.DbObject(sb)
                    
            #pick the right function
            if tocEntry.get("delta"):
                writePayload=payload.casPatchedBundlePayload
            else:
                writePayload=payload.casBundlePayload

            for entry in bundle.get("ebx",list()): #name sha1 size originalSize
                path=os.path.join(ebxPath,entry.get("name")+".ebx")
                if writePayload(entry,path,False):
                    ebx.addEbxGuid(path,ebxPath)

            for entry in bundle.get("res",list()): #name sha1 size originalSize resRid resType resMeta
                path=os.path.join(resPath,entry.get("name")+".res")
                writePayload(entry,path,False)

            for entry in bundle.get("chunks",list()): #id sha1 size logicalOffset logicalSize chunkMeta::h32 chunkMeta::meta
                path=os.path.join(chunkPath,entry.get("id").format()+".chunk")
                writePayload(entry,path,True)

        #Deal with the chunks which are defined directly in the toc.
        #These chunks do NOT know their originalSize.
        for entry in toc.get("chunks"): #id sha1
            targetPath=os.path.join(chunkPathToc,entry.get("id").format()+".chunk")
            payload.casChunkPayload(entry,targetPath)
    else:
        for tocEntry in toc.get("bundles"): #id offset size, size is redundant
            if tocEntry.get("base"): continue #Patched bundle. However, use the unpatched bundle because no file was patched at all.

            sb.seek(tocEntry.get("offset"))

            if tocEntry.get("delta"):
                #The sb currently points at the delta file.
                #Read the unpatched toc of the same name to get the base bundle.
                baseToc=dbo.readToc(baseTocPath)
                for baseTocEntry in baseToc.get("bundles"):
                    if baseTocEntry.get("id").lower() == tocEntry.get("id").lower():
                        break
                else: #if no base bundle has with this name has been found:
                    pass #use the last base bundle. This is okay because it is actually not used at all (the delta has uses instructionType 3 only).
                    
                basePath=baseTocPath[:-3]+"sb"
                base=open(basePath,"rb")
                base.seek(baseTocEntry.get("offset"))
                bundle=noncas.patchedBundle(base, sb) #create a patched bundle using base and delta
                base.close()
                writePayload=payload.noncasPatchedBundlePayload
                sourcePath=[basePath,sbPath] #base, delta
            else:
                bundle=noncas.unpatchedBundle(sb)
                writePayload=payload.noncasBundlePayload
                sourcePath=sbPath

            for entry in bundle.ebx:
                path=os.path.join(ebxPath,entry.name+".ebx")
                if writePayload(entry,path,sourcePath):
                    ebx.addEbxGuid(path,ebxPath)

            for entry in bundle.res:
                path=os.path.join(resPath,entry.name+".res")
                writePayload(entry,path,sourcePath)

            for entry in bundle.chunks:
                path=os.path.join(chunkPath,entry.id.format()+".chunk")
                writePayload(entry,path,sourcePath)

        #Deal with the chunks which are defined directly in the toc.
        #These chunks do NOT know their originalSize.
        for entry in toc.get("chunks"): #id offset size
            targetPath=os.path.join(chunkPathToc,entry.get("id").format()+".chunk")
            payload.noncasChunkPayload(entry,targetPath,sbPath)

    sb.close()