def _mark_compressed(self): for index in self.indices: if index.type_id == 0xe86b1eef: DIR = dbpf.dir(self.header.index_version) if self.header.index_version == '7.0': records = index.size / 16 else: records = index.size / 20 self._fileobj.seek(index.location, SEEK_SET) indices = self.indices[:] for _ in xrange(records): parsed = DIR.parse(self._fileobj) for index in indices: if parsed.equals_index(index): index.compressed = True indices.remove(index)
def save(self, path): version = self.header.index_version DIR = dbpf.dir(version) Index = dbpf.index(version) files = list() dirs = list() indices = list() compressed_files = 0 for index in self.indices: if index.type_id == 0xe86b1eef: continue compressed, data = index.dump_file() if compressed: compressed_files += 1 args = index._data.copy() del args['location'] if not index._file is None: args['size'] = len(index._file.raw()) dirs.append(DIR(*args.values()).raw()) indices.append(index) files.append(data) indices_data = list() offset = dbpf.Header._struct.size offset += len(indices) * Index._struct.size offset += Index._struct.size if compressed_files > 0 else 0 for index, file in izip(indices, files): index.location = offset index.size = len(file) indices_data.append(index.raw()) offset += index.size if compressed_files > 0: if version == '7.0': args = (0xe86b1eef, 0xe86b1eef, 0x286b1f03, offset, compressed_files * DIR._struct.size) else: args = (0xe86b1eef, 0xe86b1eef, 0x286b1f03, 0x286b1f03, offset, compressed_files * DIR._struct.size) indices_data.append(Index(*args).raw()) self.header.index_count = len(indices_data) self.header.index_offset = 96 self.header.index_size = len(indices_data) * Index._struct.size self.header.holes_count = 0 self.header.holes_offset = 0 self.header.holes_size = 0 indices_data = ''.join(indices_data) with open(path, 'w') as f: f.write(self.header.raw()) f.write(indices_data) for file in files: f.write(file) f.write(''.join(dirs))
def save(self, path): version = self.header.index_version DIR = dbpf.dir(version) Index = dbpf.index(version) files = list() dirs = list() indices = list() compressed_files = 0 for index in self.indices: if index.type_id == 0xe86b1eef: continue compressed, data = index.dump_file() if compressed: compressed_files += 1 args = index._data.copy() del args['location'] if not index._file is None: args['size'] = len(index._file.raw()) dirs.append(DIR(*args.values()).raw()) indices.append(index) files.append(data) indices_data = list() offset = dbpf.Header._struct.size offset += len(indices)*Index._struct.size offset += Index._struct.size if compressed_files > 0 else 0 for index, file in izip(indices, files): index.location = offset index.size = len(file) indices_data.append(index.raw()) offset += index.size if compressed_files > 0: if version == '7.0': args = (0xe86b1eef, 0xe86b1eef, 0x286b1f03, offset, compressed_files*DIR._struct.size) else: args = (0xe86b1eef, 0xe86b1eef, 0x286b1f03, 0x286b1f03, offset, compressed_files*DIR._struct.size) indices_data.append(Index(*args).raw()) self.header.index_count = len(indices_data) self.header.index_offset = 96 self.header.index_size = len(indices_data)*Index._struct.size self.header.holes_count = 0 self.header.holes_offset = 0 self.header.holes_size = 0 indices_data = ''.join(indices_data) with open(path, 'w') as f: f.write(self.header.raw()) f.write(indices_data) for file in files: f.write(file) f.write(''.join(dirs))