def extract_data(self, index, name, version_id): if name == 'ram': if version_id != 4: raise exceptions.LayerException( "QEMU unknown RAM version_id {}".format(version_id)) new_segments, index = self._get_ram_segments( index, self._configuration.get('page_size', None) or 4096) self._segments += new_segments elif name == 'spapr/htab': if version_id != 1: raise exceptions.LayerException( "QEMU unknown HTAB version_id {}".format(version_id)) header = self.context.object(self._qemu_table_name + constants.BANG + 'unsigned long', offset=index, layer_name=self._base_layer) index += 4 if header == 0: htab_index = -1 htab_n_valid = 0 htab_n_invalid = 0 while htab_index != 0 and htab_n_valid != 0 and htab_n_invalid != 0: htab = self.context.object(self._qemu_table_name + constants.BANG + 'htab', offset=index, layer_name=self._base_layer) htab_index, htab_n_valid, htab_n_invalid = htab index += 8 + (htab_n_valid * self.HASH_PTE_SIZE_64) return index
def _check_header(cls, base_layer: interfaces.layers.DataLayerInterface, name: str = ''): header = base_layer.read(0, 8) if header[:4] != b'\x51\x45\x56\x4D': raise exceptions.LayerException(name, 'No QEMU magic bytes') if header[4:] != b'\x00\x00\x00\x03': raise exceptions.LayerException(name, 'Unsupported QEMU version found')
def _get_ram_segments( self, index: int, page_size: int) -> Tuple[List[Tuple[int, int, int, int]], int]: """Recovers the new index and any sections of memory from a ram section""" done = None segments = [] base_layer = self.context.layers[self._base_layer] while not done: addr = self.context.object(self._qemu_table_name + constants.BANG + 'unsigned long long', offset=index, layer_name=self._base_layer) flags = addr & (page_size - 1) page_size_bits = int(math.log(page_size, 2)) addr = (addr >> page_size_bits) << page_size_bits index += 8 if flags & self.SEGMENT_FLAG_MEM_SIZE: namelen = self._context.object( self._qemu_table_name + constants.BANG + 'unsigned char', offset=index, layer_name=self._base_layer) while namelen != 0: # if base_layer.read(index + 1, namelen) == b'pc.ram': # total_size = self._context.object(self._qemu_table_name + constants.BANG + 'unsigned long long', # offset = index + 1 + namelen, # layer_name = self._base_layer) index += 1 + namelen + 8 namelen = self._context.object(self._qemu_table_name + constants.BANG + 'unsigned char', offset=index, layer_name=self._base_layer) if flags & (self.SEGMENT_FLAG_COMPRESS | self.SEGMENT_FLAG_PAGE): if not (flags & self.SEGMENT_FLAG_CONTINUE): namelen = self._context.object(self._qemu_table_name + constants.BANG + 'unsigned char', offset=index, layer_name=self._base_layer) self._current_segment_name = base_layer.read( index + 1, namelen) index += 1 + namelen if flags & self.SEGMENT_FLAG_COMPRESS: if self._current_segment_name == b'pc.ram': segments.append((addr, index, page_size, 1)) self._compressed.add(addr) index += 1 else: if self._current_segment_name == b'pc.ram': segments.append((addr, index, page_size, page_size)) index += page_size if flags & self.SEGMENT_FLAG_XBZRLE: raise exceptions.LayerException( self.name, "XBZRLE compression not supported") if flags & self.SEGMENT_FLAG_EOS: done = True return segments, index
def write_layer( cls, context: interfaces.context.ContextInterface, layer_name: str, preferred_name: str, open_method: Type[plugins.FileHandlerInterface], chunk_size: Optional[int] = None, progress_callback: Optional[constants.ProgressCallback] = None) -> Optional[plugins.FileHandlerInterface]: """Produces a FileHandler from the named layer in the provided context or None on failure Args: context: the context from which to read the memory layer layer_name: the name of the layer to write out preferred_name: a string with the preferred filename for hte file chunk_size: an optional size for the chunks that should be written (defaults to 0x500000) open_method: class for creating FileHandler context managers progress_callback: an optional function that takes a percentage and a string that displays output """ if layer_name not in context.layers: raise exceptions.LayerException("Layer not found") layer = context.layers[layer_name] if chunk_size is None: chunk_size = cls.default_block_size file_handle = open_method(preferred_name) for i in range(0, layer.maximum_address, chunk_size): current_chunk_size = min(chunk_size, layer.maximum_address - i) data = layer.read(i, current_chunk_size, pad = True) file_handle.write(data) if progress_callback: progress_callback((i / layer.maximum_address) * 100, f'Writing layer {layer_name}') return file_handle
def read(self, offset: int, length: int, pad: bool = False) -> bytes: """Reads an offset for length bytes and returns 'bytes' (not 'str') of length size.""" current_offset = offset output: List[bytes] = [] for (offset, _, mapped_offset, mapped_length, layer) in self.mapping(offset, length, ignore_errors=pad): if not pad and offset > current_offset: raise exceptions.InvalidAddressException( self.name, current_offset, f"Layer {self.name} cannot map offset: {current_offset}") elif offset > current_offset: output += [b"\x00" * (offset - current_offset)] current_offset = offset elif offset < current_offset: raise exceptions.LayerException( self.name, "Mapping returned an overlapping element") if mapped_length > 0: output += [ self._context.layers.read(layer, mapped_offset, mapped_length, pad) ] current_offset += mapped_length recovered_data = b"".join(output) return recovered_data + b"\x00" * (length - len(recovered_data))
def add_layer(self, layer: DataLayerInterface) -> None: """Adds a layer to memory model. This will throw an exception if the required dependencies are not met Args: layer: the layer to add to the list of layers (based on layer.name) """ if layer.name in self._layers: raise exceptions.LayerException(layer.name, f"Layer already exists: {layer.name}") if isinstance(layer, TranslationLayerInterface): missing_list = [sublayer for sublayer in layer.dependencies if sublayer not in self._layers] if missing_list: raise exceptions.LayerException( layer.name, f"Layer {layer.name} has unmet dependencies: {', '.join(missing_list)}") self._layers[layer.name] = layer
def _read_configuration(self, base_layer: interfaces.layers.DataLayerInterface, name: str) -> Any: """Reads the JSON configuration from the end of the file""" chunk_size = 0x4096 data = b'' for i in range(base_layer.maximum_address, base_layer.minimum_address, -chunk_size): if i != base_layer.maximum_address: data = base_layer.read(i, chunk_size) + data if b'\x00' in data: start = data.rfind(b'\x00') data = data[data.find(b'{', start):] return json.loads(data) raise exceptions.LayerException(name, "Could not load JSON configuration from the end of the file")
def del_layer(self, name: str) -> None: """Removes the layer called name. This will throw an exception if other layers depend upon this layer Args: name: The name of the layer to delete """ for layer in self._layers: depend_list = [superlayer for superlayer in self._layers if name in self._layers[layer].dependencies] if depend_list: raise exceptions.LayerException( self._layers[layer].name, f"Layer {self._layers[layer].name} is depended upon: {', '.join(depend_list)}") self._layers[name].destroy() del self._layers[name]
def write(self, offset: int, value: bytes) -> None: """Writes a value at offset, distributing the writing across any underlying mapping.""" current_offset = offset length = len(value) for (offset, _, mapped_offset, length, layer) in self.mapping(offset, length): if offset > current_offset: raise exceptions.InvalidAddressException( self.name, current_offset, f"Layer {self.name} cannot map offset: {current_offset}") elif offset < current_offset: raise exceptions.LayerException( self.name, "Mapping returned an overlapping element") self._context.layers.write(layer, mapped_offset, value[:length]) value = value[length:] current_offset += length
def _read_configuration(self, base_layer: interfaces.layers.DataLayerInterface, name: str) -> Any: """Reads the JSON configuration from the end of the file""" chunk_size = 0x4096 data = b'' for i in range(base_layer.maximum_address, base_layer.minimum_address, -chunk_size): if i != base_layer.maximum_address: data = (base_layer.read(i, chunk_size) + data).rstrip(b'\x00') if b'\x00' in data: last_null_byte = data.rfind(b'\x00') start_of_json = data.find(b'{', last_null_byte) if start_of_json >= 0: data = data[start_of_json:] return json.loads(data) return dict() raise exceptions.LayerException( name, "Invalid JSON configuration at the end of the file")
def translate( self, offset: int, ignore_errors: bool = False ) -> Tuple[Optional[int], Optional[str]]: mapping = list(self.mapping(offset, 0, ignore_errors)) if len(mapping) == 1: original_offset, _, mapped_offset, _, layer = mapping[0] if original_offset != offset: raise exceptions.LayerException( self.name, f"Layer {self.name} claims to map linearly but does not") else: if ignore_errors: # We should only hit this if we ignored errors, but check anyway return None, None raise exceptions.InvalidAddressException( self.name, offset, f"Cannot translate {offset} in layer {self.name}") return mapped_offset, layer
def _load_segments(self): base_layer = self.context.layers[self._base_layer] self._check_header(base_layer, self.name) if not self._configuration: self._configuration = self._read_configuration( base_layer, self.name) section_byte = -1 index = 8 current_section_id = -1 version_id = -1 name = None while section_byte != self.QEVM_EOF and index <= base_layer.maximum_address: section_byte = self.context.object( self._qemu_table_name + constants.BANG + 'unsigned char', offset=index, layer_name=self._base_layer) index += 1 if section_byte == self.QEVM_CONFIGURATION: section_len = self.context.object( self._qemu_table_name + constants.BANG + 'unsigned long', offset=index, layer_name=self._base_layer) index += 4 + section_len elif section_byte == self.QEVM_SECTION_START or section_byte == self.QEVM_SECTION_FULL: section_id = self.context.object( self._qemu_table_name + constants.BANG + 'unsigned long', offset=index, layer_name=self._base_layer) current_section_id = section_id index += 4 name_len = self.context.object( self._qemu_table_name + constants.BANG + 'unsigned char', offset=index, layer_name=self._base_layer) index += 1 name = self.context.object(self._qemu_table_name + constants.BANG + 'string', offset=index, layer_name=self._base_layer, max_length=name_len) index += name_len # instance_id = self.context.object(self._qemu_table_name + constants.BANG + 'unsigned long', # offset = index, # layer_name = self._base_layer) index += 4 version_id = self.context.object( self._qemu_table_name + constants.BANG + 'unsigned long', offset=index, layer_name=self._base_layer) index += 4 # Read additional data index = self.extract_data(index, name, version_id) elif section_byte == self.QEVM_SECTION_PART or section_byte == self.QEVM_SECTION_END: section_id = self.context.object( self._qemu_table_name + constants.BANG + 'unsigned long', offset=index, layer_name=self._base_layer) current_section_id = section_id index += 4 # Read additional data index = self.extract_data(index, name, version_id) elif section_byte == self.QEVM_SECTION_FOOTER: section_id = self.context.object( self._qemu_table_name + constants.BANG + 'unsigned long', offset=index, layer_name=self._base_layer) index += 4 if section_id != current_section_id: raise exceptions.LayerException( self._name, 'QEMU section footer mismatch: {} and {}'.format( current_section_id, section_id)) elif section_byte == self.QEVM_EOF: pass else: raise exceptions.LayerException( self._name, 'QEMU unknown section encountered: {}'.format( section_byte))
def _get_ram_segments( self, index: int, page_size: int) -> Tuple[List[Tuple[int, int, int, int]], int]: """Recovers the new index and any sections of memory from a ram section""" done = None segments = [] base_layer = self.context.layers[self._base_layer] while not done: addr = self.context.object(self._qemu_table_name + constants.BANG + 'unsigned long long', offset=index, layer_name=self._base_layer) # Flags are stored in the n least significant bits, where n equals the bit-length of pagesize flags = addr & (page_size - 1) # addr equals the highest multiple of pagesize <= offset # (We assume that page_size is a power of 2) addr = addr ^ (addr & (page_size - 1)) index += 8 if flags & self.SEGMENT_FLAG_MEM_SIZE: namelen = self._context.object( self._qemu_table_name + constants.BANG + 'unsigned char', offset=index, layer_name=self._base_layer) while namelen != 0: # if base_layer.read(index + 1, namelen) == b'pc.ram': # total_size = self._context.object(self._qemu_table_name + constants.BANG + 'unsigned long long', # offset = index + 1 + namelen, # layer_name = self._base_layer) index += 1 + namelen + 8 namelen = self._context.object(self._qemu_table_name + constants.BANG + 'unsigned char', offset=index, layer_name=self._base_layer) if flags & (self.SEGMENT_FLAG_COMPRESS | self.SEGMENT_FLAG_PAGE): if not (flags & self.SEGMENT_FLAG_CONTINUE): namelen = self._context.object(self._qemu_table_name + constants.BANG + 'unsigned char', offset=index, layer_name=self._base_layer) self._current_segment_name = base_layer.read( index + 1, namelen) index += 1 + namelen if flags & self.SEGMENT_FLAG_COMPRESS: if self._current_segment_name == b'pc.ram': segments.append((addr, index, page_size, 1)) self._compressed.add(addr) index += 1 else: if self._current_segment_name == b'pc.ram': segments.append((addr, index, page_size, page_size)) index += page_size if flags & self.SEGMENT_FLAG_XBZRLE: raise exceptions.LayerException( self.name, "XBZRLE compression not supported") if flags & self.SEGMENT_FLAG_EOS: done = True return segments, index