def __readNdxFile(self, file, dic): fileName = file.filename sizeFile = file.file_size fileData = self.__zipFile.read(fileName, 'rU') bt = BytesIO(fileData) bt.seek(0) lenBytes = bt.read(2) # lenSum = self.BytesToInt(lenBytes) # print "lenSum = ", lenSum while bt.tell() < sizeFile: lenBytes12 = bt.read(12) # NULL bytes bytesNull = lenBytes12[0:2] bytesNullInt = self.BytesToInt(bytesNull) # FILETIME bytes bytesFileTime = lenBytes12[2:10] bytesFileTimeInt = self.BytesToInt(bytesFileTime) # offset in PDT bytes bytesOffsetPdt = lenBytes12[10:12] bytesOffsetPdtInt = self.BytesToInt(bytesOffsetPdt) startTimeInt = self.FiletimeToUnixtimestamp(bytesFileTimeInt) if bytesOffsetPdtInt not in dic.keys(): dic[bytesOffsetPdtInt] = [None, None] dic[bytesOffsetPdtInt][1] = startTimeInt bt.close()
def __readPdtFile(self, file, dic): fileName = file.filename sizeFile = file.file_size fileData = self.__zipFile.read(fileName, 'rU') bt = BytesIO(fileData) bt.seek(int('0x01A', 0)) while bt.tell() < sizeFile: pos = bt.tell() lenBytes = bt.read(2) lenBytesHex = "0x" + ''.join( [ "%02X" % ord( x ) for x in reversed(lenBytes) ] ) lenSum = int(lenBytesHex, 0) bytesProName = unicode(bt.read(lenSum), self.__jtvEncodeProgrammName) if pos not in dic.keys(): dic[pos] = [None, None] dic[pos][0] = bytesProName bt.close()
def decompress(frequencyTable, valueData): buffer = BytesIO(valueData) uncompressedSize = readUnsignedLeb128(buffer) compressedSize = len(valueData) - buffer.tell() # create a buffer to decompress into inputData = buffer.read(compressedSize) outputData = ctypes.create_string_buffer(uncompressedSize) decompDLL.decompressData(frequencyTable, inputData, compressedSize, outputData, uncompressedSize) return BytesIO(outputData.raw)
def extractUnencryptedTelaraDB(unencryptedDBFilename, extractDirectory): print("Begin extracting of " + unencryptedDBFilename) # load decompression DLL decompDLL = ctypes.CDLL("riftdecomp.dll") conn = sqlite3.connect(unencryptedDBFilename) conn.row_factory = sqlite3.Row ds = conn.cursor() # DatasetID appears to be a "category" of sorts, with the datasetkey being subcategories # For example, dataset 7701 has different keys for different languages. # Guesses at some randomly chosen dataset id contents: # 83 - ability formulas # 84 - worlds? contains NIF references # 111 - Scene? # 114 - sound bank reference # 4307 - profanity block? # 7701 - EULA # In test mode only the first row for each datasetid will be extracted, disable it to extract more than one row per datasetid ############### # # WARNING: BE AWARE IF YOU DISABLE TEST MODE WITHOUT CHANGING THE SQL EURY YOU WILL PULL **EVERY RECORD** FROM THE DATABASE. # WARNING: THERE ARE 400,000+ AND MOST ARE UNDER 1KB.BE AWARE THAT your filesystem might not appreciate 400,000 1KB files suddenly appearing # WARNING: You may wish to filter the first query by a specific datasetId, eg: # WARNING: ds.execute('SELECT * from dataset where datasetId=?', (7701,)) # ############### TEST_MODE = True test_mode_ids = set() ds.execute('SELECT * from dataset order by length(value) desc') while (1): rowA = ds.fetchone() if rowA == None: break dsc = conn.cursor() dsid = rowA["datasetId"] dskey = rowA["datasetKey"] dsname = rowA[ "name"] # some entries have a "name" that can be useful to identify, but often have funny characters in them so we can't use them directly if TEST_MODE: if dsid in test_mode_ids: continue test_mode_ids.add(dsid) dsc.execute("select * from dataset_compression where datasetid= ?", (dsid, )) freqRow = dsc.fetchone() valueData = rowA['value'] frequencyTable = freqRow["frequencies"] buffer = BytesIO(valueData) uncompressedSize = readUnsignedLeb128(buffer) compressedSize = len(valueData) - buffer.tell() # create a buffer to decompress into inputData = buffer.read(compressedSize) outputData = ctypes.create_string_buffer(uncompressedSize) decompDLL.decompressData(frequencyTable, inputData, compressedSize, outputData, uncompressedSize) # write our new data to f = open(os.path.join(extractDirectory + str(dsid) + "_" + str(dskey)), "wb") f.write(outputData) f.close() dsc.close() ds.close()