def _extract_indices(self): if self.header.index_count >= 1: Index = dbpf.index(self.header.index_version) self._fileobj.seek(self.header.index_offset, SEEK_SET) for _ in xrange(self.header.index_count): self.indices.append(Index.parse(self._fileobj)) if not self._fileobj.tell() == (self.header.index_offset + self.header.index_size): raise ValueError('incorrect amount of data read, file to small?')
def _extract_indices(self): if self.header.index_count >= 1: Index = dbpf.index(self.header.index_version) self._fileobj.seek(self.header.index_offset, SEEK_SET) for _ in xrange(self.header.index_count): self.indices.append(Index.parse(self._fileobj)) if not self._fileobj.tell() == (self.header.index_offset + self.header.index_size): raise ValueError( 'incorrect amount of data read, file to small?')
def save(self, path): version = self.header.index_version DIR = dbpf.dir(version) Index = dbpf.index(version) files = list() dirs = list() indices = list() compressed_files = 0 for index in self.indices: if index.type_id == 0xe86b1eef: continue compressed, data = index.dump_file() if compressed: compressed_files += 1 args = index._data.copy() del args['location'] if not index._file is None: args['size'] = len(index._file.raw()) dirs.append(DIR(*args.values()).raw()) indices.append(index) files.append(data) indices_data = list() offset = dbpf.Header._struct.size offset += len(indices) * Index._struct.size offset += Index._struct.size if compressed_files > 0 else 0 for index, file in izip(indices, files): index.location = offset index.size = len(file) indices_data.append(index.raw()) offset += index.size if compressed_files > 0: if version == '7.0': args = (0xe86b1eef, 0xe86b1eef, 0x286b1f03, offset, compressed_files * DIR._struct.size) else: args = (0xe86b1eef, 0xe86b1eef, 0x286b1f03, 0x286b1f03, offset, compressed_files * DIR._struct.size) indices_data.append(Index(*args).raw()) self.header.index_count = len(indices_data) self.header.index_offset = 96 self.header.index_size = len(indices_data) * Index._struct.size self.header.holes_count = 0 self.header.holes_offset = 0 self.header.holes_size = 0 indices_data = ''.join(indices_data) with open(path, 'w') as f: f.write(self.header.raw()) f.write(indices_data) for file in files: f.write(file) f.write(''.join(dirs))
def save(self, path): version = self.header.index_version DIR = dbpf.dir(version) Index = dbpf.index(version) files = list() dirs = list() indices = list() compressed_files = 0 for index in self.indices: if index.type_id == 0xe86b1eef: continue compressed, data = index.dump_file() if compressed: compressed_files += 1 args = index._data.copy() del args['location'] if not index._file is None: args['size'] = len(index._file.raw()) dirs.append(DIR(*args.values()).raw()) indices.append(index) files.append(data) indices_data = list() offset = dbpf.Header._struct.size offset += len(indices)*Index._struct.size offset += Index._struct.size if compressed_files > 0 else 0 for index, file in izip(indices, files): index.location = offset index.size = len(file) indices_data.append(index.raw()) offset += index.size if compressed_files > 0: if version == '7.0': args = (0xe86b1eef, 0xe86b1eef, 0x286b1f03, offset, compressed_files*DIR._struct.size) else: args = (0xe86b1eef, 0xe86b1eef, 0x286b1f03, 0x286b1f03, offset, compressed_files*DIR._struct.size) indices_data.append(Index(*args).raw()) self.header.index_count = len(indices_data) self.header.index_offset = 96 self.header.index_size = len(indices_data)*Index._struct.size self.header.holes_count = 0 self.header.holes_offset = 0 self.header.holes_size = 0 indices_data = ''.join(indices_data) with open(path, 'w') as f: f.write(self.header.raw()) f.write(indices_data) for file in files: f.write(file) f.write(''.join(dirs))