def method_kdbg_offset( self, context: interfaces.context.ContextInterface, vlayer: layers.intel.Intel, progress_callback: constants.ProgressCallback = None ) -> ValidKernelsType: vollog.debug( "Kernel base determination - using KDBG structure for kernel offset" ) valid_kernels = {} # type: ValidKernelsType physical_layer_name = self.get_physical_layer_name(context, vlayer) physical_layer = context.layers[physical_layer_name] results = physical_layer.scan(context, scanners.BytesScanner(b"KDBG"), progress_callback=progress_callback) seen = set() # type: Set[int] for result in results: # TODO: Identify the specific structure we're finding and document this a bit better pointer = context.object("pdbscan!unsigned long long", offset=result + 8, layer_name=physical_layer_name) address = pointer & vlayer.address_mask if address in seen: continue seen.add(address) valid_kernels = self.check_kernel_offset(context, vlayer, address, progress_callback) if valid_kernels: break return valid_kernels
def unsatisfied(self, context: interfaces.context.ContextInterface, config_path: str) -> Dict[str, interfaces.configuration.RequirementInterface]: """Check the types on each of the returned values and their number and then call the element type's check for each one.""" config_path = interfaces.configuration.path_join(config_path, self.name) default = None value = self.config_value(context, config_path, default) if not value and self.min_elements > 0: vollog.log(constants.LOGLEVEL_V, "ListRequirement Unsatisfied - ListRequirement has non-zero min_elements") return {config_path: self} if value is None and not self.optional: # We need to differentiate between no value and an empty list vollog.log(constants.LOGLEVEL_V, "ListRequirement Unsatisfied - Value was not specified") return {config_path: self} elif value is None: context.config[config_path] = [] if not isinstance(value, list): # TODO: Check this is the correct response for an error raise TypeError("Unexpected config value found: {}".format(repr(value))) if not (self.min_elements <= len(value)): vollog.log(constants.LOGLEVEL_V, "TypeError - Too few values provided to list option.") return {config_path: self} if self.max_elements and not (len(value) < self.max_elements): vollog.log(constants.LOGLEVEL_V, "TypeError - Too many values provided to list option.") return {config_path: self} if not all([isinstance(element, self.element_type) for element in value]): vollog.log(constants.LOGLEVEL_V, "TypeError - At least one element in the list is not of the correct type.") return {config_path: self} return {}
def load_pdb_layer(cls, context: interfaces.context.ContextInterface, location: str) -> Tuple[str, interfaces.context.ContextInterface]: """Loads a PDB file into a layer within the context and returns the name of the new layer. Note: the context may be changed by this method """ physical_layer_name = context.layers.free_layer_name("FileLayer") physical_config_path = interfaces.configuration.path_join("pdbreader", physical_layer_name) # Create the file layer # This must be specific to get us started, setup the config and run new_context = context.clone() new_context.config[interfaces.configuration.path_join(physical_config_path, "location")] = location physical_layer = physical.FileLayer(new_context, physical_config_path, physical_layer_name) new_context.add_layer(physical_layer) # Add on the MSF format layer msf_layer_name = context.layers.free_layer_name("MSFLayer") msf_config_path = interfaces.configuration.path_join("pdbreader", msf_layer_name) new_context.config[interfaces.configuration.path_join(msf_config_path, "base_layer")] = physical_layer_name msf_layer = msf.PdbMultiStreamFormat(new_context, msf_config_path, msf_layer_name) new_context.add_layer(msf_layer) msf_layer.read_streams() return msf_layer_name, new_context
def method_module_offset(self, context: interfaces.context.ContextInterface, vlayer: layers.intel.Intel, progress_callback: constants.ProgressCallback = None) -> ValidKernelsType: """Method for finding a suitable kernel offset based on a module table.""" vollog.debug("Kernel base determination - searching layer module list structure") valid_kernels = {} # type: ValidKernelsType # If we're here, chances are high we're in a Win10 x64 image with kernel base randomization virtual_layer_name = vlayer.name physical_layer_name = self.get_physical_layer_name(context, vlayer) physical_layer = context.layers[physical_layer_name] # TODO: On older windows, this might be \WINDOWS\system32\nt rather than \SystemRoot\system32\nt results = physical_layer.scan(context, scanners.BytesScanner(b"\\SystemRoot\\system32\\nt"), progress_callback = progress_callback) seen = set() # type: Set[int] # Because this will launch a scan of the virtual layer, we want to be careful for result in results: # TODO: Identify the specific structure we're finding and document this a bit better pointer = context.object("pdbscan!unsigned long long", offset = (result - 16 - int(vlayer.bits_per_register / 8)), layer_name = physical_layer_name) address = pointer & vlayer.address_mask if address in seen: continue seen.add(address) valid_kernels = self.check_kernel_offset(context, vlayer, address, progress_callback) if valid_kernels: break return valid_kernels
def virtual_process_from_physical(cls, context: interfaces.context.ContextInterface, layer_name: str, symbol_table: str, proc: interfaces.objects.ObjectInterface) -> \ Iterable[interfaces.objects.ObjectInterface]: """ Returns a virtual process from a physical addressed one Args: context: The context to retrieve required elements (layers, symbol tables) from layer_name: The name of the layer on which to operate symbol_table: The name of the table containing the kernel symbols proc: the process object with phisical address Returns: A process object on virtual address layer """ # We'll use the first thread to bounce back to the virtual process kvo = context.layers[layer_name].config['kernel_virtual_offset'] ntkrnlmp = context.module(symbol_table, layer_name=layer_name, offset=kvo) tleoffset = ntkrnlmp.get_type("_ETHREAD").relative_child_offset( "ThreadListEntry") # Start out with the member offset offsets = [tleoffset] # If (and only if) we're dealing with 64-bit Windows 7 SP1 # then add the other commonly seen member offset to the list kuser = info.Info.get_kuser_structure(context, layer_name, symbol_table) nt_major_version = int(kuser.NtMajorVersion) nt_minor_version = int(kuser.NtMinorVersion) vers = info.Info.get_version_structure(context, layer_name, symbol_table) build = vers.MinorVersion bits = context.layers[layer_name].bits_per_register version = (nt_major_version, nt_minor_version, build) if version == (6, 1, 7601) and bits == 64: offsets.append(tleoffset + 8) # Now we can try to bounce back for ofs in offsets: ethread = ntkrnlmp.object(object_type="_ETHREAD", offset=proc.ThreadListHead.Flink - ofs, absolute=True) # Ask for the thread's process to get an _EPROCESS with a virtual address layer virtual_process = ethread.owning_process() # Sanity check the bounce. # This compares the original offset with the new one (translated from virtual layer) (_, _, ph_offset, _, _) = list(context.layers[layer_name].mapping( offset=virtual_process.vol.offset, length=0))[0] if virtual_process and \ proc.vol.offset == ph_offset: return virtual_process
def get_type_map(cls, context: interfaces.context.ContextInterface, layer_name: str, symbol_table: str) -> Dict[int, str]: """List the executive object types (_OBJECT_TYPE) using the ObTypeIndexTable or ObpObjectTypes symbol (differs per OS). This method will be necessary for determining what type of object we have given an object header. Note: The object type index map was hard coded into profiles in previous versions of volatility. It is now generated dynamically. Args: context: The context to retrieve required elements (layers, symbol tables) from layer_name: The name of the layer on which to operate symbol_table: The name of the table containing the kernel symbols Returns: A mapping of type indicies to type names """ type_map = {} kvo = context.layers[layer_name].config['kernel_virtual_offset'] ntkrnlmp = context.module(symbol_table, layer_name=layer_name, offset=kvo) try: table_addr = ntkrnlmp.get_symbol("ObTypeIndexTable").address except exceptions.SymbolError: table_addr = ntkrnlmp.get_symbol("ObpObjectTypes").address ptrs = ntkrnlmp.object(object_type="array", offset=table_addr, subtype=ntkrnlmp.get_type("pointer"), count=100) for i, ptr in enumerate(ptrs): # type: ignore # the first entry in the table is always null. break the # loop when we encounter the first null entry after that if i > 0 and ptr == 0: break objt = ptr.dereference().cast(symbol_table + constants.BANG + "_OBJECT_TYPE") try: type_name = objt.Name.String except exceptions.InvalidAddressException: vollog.log( constants.LOGLEVEL_VVV, "Cannot access _OBJECT_HEADER.Name at {0:#x}".format( objt.Name.vol.offset)) continue type_map[i] = type_name return type_map
def list_notify_routines( cls, context: interfaces.context.ContextInterface, layer_name: str, symbol_table: str, callback_table_name: str ) -> Iterable[Tuple[str, int, Optional[str]]]: """Lists all kernel notification routines. Args: context: The context to retrieve required elements (layers, symbol tables) from layer_name: The name of the layer on which to operate symbol_table: The name of the table containing the kernel symbols callback_table_name: The nae of the table containing the callback symbols Yields: A name, location and optional detail string """ kvo = context.layers[layer_name].config['kernel_virtual_offset'] ntkrnlmp = context.module(symbol_table, layer_name=layer_name, offset=kvo) is_vista_or_later = versions.is_vista_or_later( context=context, symbol_table=symbol_table) full_type_name = callback_table_name + constants.BANG + "_GENERIC_CALLBACK" symbol_names = [("PspLoadImageNotifyRoutine", False), ("PspCreateThreadNotifyRoutine", True), ("PspCreateProcessNotifyRoutine", True)] for symbol_name, extended_list in symbol_names: try: symbol_offset = ntkrnlmp.get_symbol(symbol_name).address except exceptions.SymbolError: vollog.debug("Cannot find {}".format(symbol_name)) continue if is_vista_or_later and extended_list: count = 64 else: count = 8 fast_refs = ntkrnlmp.object( object_type="array", offset=symbol_offset, subtype=ntkrnlmp.get_type("_EX_FAST_REF"), count=count) for fast_ref in fast_refs: try: callback = fast_ref.dereference().cast(full_type_name) except exceptions.InvalidAddressException: continue if callback.Callback != 0: yield symbol_name, callback.Callback, None
def get_kernel_module(cls, context: interfaces.context.ContextInterface, layer_name: str, symbol_table: str): """Returns the kernel module based on the layer and symbol_table""" virtual_layer = context.layers[layer_name] if not isinstance(virtual_layer, layers.intel.Intel): raise TypeError("Virtual Layer is not an intel layer") kvo = virtual_layer.config["kernel_virtual_offset"] ntkrnlmp = context.module(symbol_table, layer_name = layer_name, offset = kvo) return ntkrnlmp
def find_cookie(cls, context: interfaces.context.ContextInterface, layer_name: str, symbol_table: str) -> Optional[interfaces.objects.ObjectInterface]: """Find the ObHeaderCookie value (if it exists)""" try: offset = context.symbol_space.get_symbol(symbol_table + constants.BANG + "ObHeaderCookie").address except exceptions.SymbolError: return None kvo = context.layers[layer_name].config['kernel_virtual_offset'] return context.object(symbol_table + constants.BANG + "unsigned int", layer_name, offset = kvo + offset)
def list_bugcheck_callbacks(cls, context: interfaces.context.ContextInterface, layer_name: str, symbol_table: str, callback_table_name: str) -> Iterable[Tuple[str, int, str]]: """Lists all kernel bugcheck callbacks. Args: context: The context to retrieve required elements (layers, symbol tables) from layer_name: The name of the layer on which to operate symbol_table: The name of the table containing the kernel symbols callback_table_name: The nae of the table containing the callback symbols Yields: A name, location and optional detail string """ kvo = context.layers[layer_name].config['kernel_virtual_offset'] ntkrnlmp = context.module(symbol_table, layer_name = layer_name, offset = kvo) try: list_offset = ntkrnlmp.get_symbol("KeBugCheckCallbackListHead").address except exceptions.SymbolError: vollog.debug("Cannot find KeBugCheckCallbackListHead") return full_type_name = callback_table_name + constants.BANG + "_KBUGCHECK_CALLBACK_RECORD" callback_record = context.object(full_type_name, offset = kvo + list_offset, layer_name = layer_name) for callback in callback_record.Entry: if not context.layers[layer_name].is_valid(callback.CallbackRoutine): continue try: component = context.object(symbol_table + constants.BANG + "string", layer_name = layer_name, offset = callback.Component, max_length = 64, errors = "replace") except exceptions.InvalidAddressException: component = renderers.UnreadableValue() yield "KeBugCheckCallbackListHead", callback.CallbackRoutine, component
def process_dump( cls, context: interfaces.context.ContextInterface, kernel_table_name: str, pe_table_name: str, proc: interfaces.objects.ObjectInterface, open_method: Type[interfaces.plugins.FileHandlerInterface] ) -> interfaces.plugins.FileHandlerInterface: """Extracts the complete data for a process as a FileHandlerInterface Args: context: the context to operate upon kernel_table_name: the name for the symbol table containing the kernel's symbols pe_table_name: the name for the symbol table containing the PE format symbols proc: the process object whose memory should be output open_method: class to provide context manager for opening the file Returns: An open FileHandlerInterface object containing the complete data for the process or None in the case of failure """ file_handle = None try: proc_layer_name = proc.add_process_layer() peb = context.object(kernel_table_name + constants.BANG + "_PEB", layer_name=proc_layer_name, offset=proc.Peb) dos_header = context.object(pe_table_name + constants.BANG + "_IMAGE_DOS_HEADER", offset=peb.ImageBaseAddress, layer_name=proc_layer_name) file_handle = open_method("pid.{0}.{1:#x}.dmp".format( proc.UniqueProcessId, peb.ImageBaseAddress)) for offset, data in dos_header.reconstruct(): file_handle.seek(offset) file_handle.write(data) except Exception as excp: vollog.debug("Unable to dump PE with pid {}: {}".format( proc.UniqueProcessId, excp)) return file_handle
def find_aslr(cls, context: interfaces.context.ContextInterface, symbol_table: str, layer_name: str, progress_callback: constants.ProgressCallback = None) \ -> Tuple[int, int]: """Determines the offset of the actual DTB in physical space and its symbol offset.""" init_task_symbol = symbol_table + constants.BANG + 'init_task' init_task_json_address = context.symbol_space.get_symbol( init_task_symbol).address swapper_signature = rb"swapper(\/0|\x00\x00)\x00\x00\x00\x00\x00\x00" module = context.module(symbol_table, layer_name, 0) address_mask = context.symbol_space[symbol_table].config.get( 'symbol_mask', None) task_symbol = module.get_type('task_struct') comm_child_offset = task_symbol.relative_child_offset('comm') for offset in context.layers[layer_name].scan( scanner=scanners.RegExScanner(swapper_signature), context=context, progress_callback=progress_callback): init_task_address = offset - comm_child_offset init_task = module.object(object_type='task_struct', offset=init_task_address, absolute=True) if init_task.pid != 0: continue elif init_task.has_member( 'state') and init_task.state.cast('unsigned int') != 0: continue # This we get for free aslr_shift = init_task.files.cast( 'long unsigned int') - module.get_symbol('init_files').address kaslr_shift = init_task_address - cls.virtual_to_physical_address( init_task_json_address) if address_mask: aslr_shift = aslr_shift & address_mask if aslr_shift & 0xfff != 0 or kaslr_shift & 0xfff != 0: continue vollog.debug( "Linux ASLR shift values determined: physical {:0x} virtual {:0x}" .format(kaslr_shift, aslr_shift)) return kaslr_shift, aslr_shift # We don't throw an exception, because we may legitimately not have an ASLR shift, but we report it vollog.debug( "Scanners could not determine any ASLR shifts, using 0 for both") return 0, 0
def get_session_layers( cls, context: interfaces.context.ContextInterface, layer_name: str, symbol_table: str, pids: List[int] = None) -> Generator[str, None, None]: """Build a cache of possible virtual layers, in priority starting with the primary/kernel layer. Then keep one layer per session by cycling through the process list. Args: context: The context to retrieve required elements (layers, symbol tables) from layer_name: The name of the layer on which to operate symbol_table: The name of the table containing the kernel symbols pids: A list of process identifiers to include exclusively or None for no filter Returns: A list of session layer names """ seen_ids = [] # type: List[interfaces.objects.ObjectInterface] filter_func = pslist.PsList.create_pid_filter(pids or []) for proc in pslist.PsList.list_processes(context=context, layer_name=layer_name, symbol_table=symbol_table, filter_func=filter_func): proc_id = "Unknown" try: proc_id = proc.UniqueProcessId proc_layer_name = proc.add_process_layer() # create the session space object in the process' own layer. # not all processes have a valid session pointer. session_space = context.object(symbol_table + constants.BANG + "_MM_SESSION_SPACE", layer_name=layer_name, offset=proc.Session) if session_space.SessionId in seen_ids: continue except exceptions.InvalidAddressException: vollog.log( constants.LOGLEVEL_VVV, "Process {} does not have a valid Session or a layer could not be constructed for it" .format(proc_id)) continue # save the layer if we haven't seen the session yet seen_ids.append(session_space.SessionId) yield proc_layer_name
def method_fixed_mapping( self, context: interfaces.context.ContextInterface, vlayer: layers.intel.Intel, progress_callback: constants.ProgressCallback = None ) -> ValidKernelsType: # TODO: Verify this is a windows image vollog.debug("Kernel base determination - testing fixed base address") valid_kernels = {} virtual_layer_name = vlayer.name physical_layer_name = self.get_physical_layer_name(context, vlayer) kvo_path = interfaces.configuration.path_join(vlayer.config_path, 'kernel_virtual_offset') kernels = scan(ctx=context, layer_name=physical_layer_name, page_size=vlayer.page_size, progress_callback=progress_callback) for kernel in kernels: # It seems the kernel is loaded at a fixed mapping (presumably because the memory manager hasn't started yet) if kernel['mz_offset'] is None or not isinstance( kernel['mz_offset'], int): # Rule out kernels that couldn't find a suitable MZ header continue if vlayer.bits_per_register == 64: kvo = kernel['mz_offset'] + (31 << int( math.ceil(math.log2(vlayer.maximum_address + 1)) - 5)) else: kvo = kernel['mz_offset'] + (1 << (vlayer.bits_per_register - 1)) try: kvp = vlayer.mapping(kvo, 0) if (any([(p == kernel['mz_offset'] and layer_name == physical_layer_name) for (_, p, _, layer_name) in kvp])): valid_kernels[virtual_layer_name] = (kvo, kernel) # Sit the virtual offset under the TranslationLayer it applies to context.config[kvo_path] = kvo vollog.debug("Setting kernel_virtual_offset to {}".format( hex(kvo))) break else: vollog.debug( "Potential kernel_virtual_offset did not map to expected location: {}" .format(hex(kvo))) except exceptions.InvalidAddressException: vollog.debug( "Potential kernel_virtual_offset caused a page fault: {}". format(hex(kvo))) return valid_kernels
def find_suitable_requirements( cls, context: interfaces.context.ContextInterface, config_path: str, requirement: interfaces.configuration.RequirementInterface, stacked_layers: List[str]) -> Optional[Tuple[str, str]]: """Looks for translation layer requirements and attempts to apply the stacked layers to it. If it succeeds it returns the configuration path and layer name where the stacked nodes were spliced into the tree. Returns: A tuple of a configuration path and layer name for the top of the stacked layers or None if suitable requirements are not found """ child_config_path = interfaces.configuration.path_join( config_path, requirement.name) if isinstance(requirement, requirements.TranslationLayerRequirement): if requirement.unsatisfied(context, config_path): original_setting = context.config.get(child_config_path, None) for layer_name in stacked_layers: context.config[child_config_path] = layer_name if not requirement.unsatisfied(context, config_path): return child_config_path, layer_name else: # Clean-up to restore the config if original_setting: context.config[child_config_path] = original_setting else: del context.config[child_config_path] else: return child_config_path, context.config.get( child_config_path, None) for req_name, req in requirement.requirements.items(): result = cls.find_suitable_requirements(context, child_config_path, req, stacked_layers) if result: return result return None
def list_registry_callbacks( cls, context: interfaces.context.ContextInterface, layer_name: str, symbol_table: str, callback_table_name: str) -> Iterable[Tuple[str, int, None]]: """Lists all registry callbacks. Args: context: The context to retrieve required elements (layers, symbol tables) from layer_name: The name of the layer on which to operate symbol_table: The name of the table containing the kernel symbols callback_table_name: The nae of the table containing the callback symbols Yields: A name, location and optional detail string """ kvo = context.layers[layer_name].config['kernel_virtual_offset'] ntkrnlmp = context.module(symbol_table, layer_name=layer_name, offset=kvo) full_type_name = callback_table_name + constants.BANG + "_EX_CALLBACK_ROUTINE_BLOCK" try: symbol_offset = ntkrnlmp.get_symbol("CmpCallBackVector").address symbol_count_offset = ntkrnlmp.get_symbol( "CmpCallBackCount").address except exceptions.SymbolError: vollog.debug("Cannot find CmpCallBackVector or CmpCallBackCount") return callback_count = ntkrnlmp.object(object_type="unsigned int", offset=symbol_count_offset) if callback_count == 0: return fast_refs = ntkrnlmp.object(object_type="array", offset=symbol_offset, subtype=ntkrnlmp.get_type("_EX_FAST_REF"), count=callback_count) for fast_ref in fast_refs: try: callback = fast_ref.dereference().cast(full_type_name) except exceptions.InvalidAddressException: continue if callback.Function != 0: yield "CmRegisterCallback", callback.Function, None
def list_processes(cls, context: interfaces.context.ContextInterface, layer_name: str, symbol_table: str, filter_func: Callable[[interfaces.objects.ObjectInterface], bool] = lambda _: False) -> \ Iterable[interfaces.objects.ObjectInterface]: """Lists all the processes in the primary layer that are in the pid config option. Args: context: The context to retrieve required elements (layers, symbol tables) from layer_name: The name of the layer on which to operate symbol_table: The name of the table containing the kernel symbols filter_func: A function which takes an EPROCESS object and returns True if the process should be ignored/filtered Returns: The list of EPROCESS objects from the `layer_name` layer's PsActiveProcessHead list after filtering """ # We only use the object factory to demonstrate how to use one kvo = context.layers[layer_name].config['kernel_virtual_offset'] ntkrnlmp = context.module(symbol_table, layer_name=layer_name, offset=kvo) ps_aph_offset = ntkrnlmp.get_symbol("PsActiveProcessHead").address list_entry = ntkrnlmp.object(object_type="_LIST_ENTRY", offset=ps_aph_offset) # This is example code to demonstrate how to use symbol_space directly, rather than through a module: # # ``` # reloff = self.context.symbol_space.get_type( # self.config['nt_symbols'] + constants.BANG + "_EPROCESS").relative_child_offset( # "ActiveProcessLinks") # ``` # # Note: "nt_symbols!_EPROCESS" could have been used, but would rely on the "nt_symbols" symbol table not already # having been present. Strictly, the value of the requirement should be joined with the BANG character # defined in the constants file reloff = ntkrnlmp.get_type("_EPROCESS").relative_child_offset( "ActiveProcessLinks") eproc = ntkrnlmp.object(object_type="_EPROCESS", offset=list_entry.vol.offset - reloff, absolute=True) for proc in eproc.ActiveProcessLinks: if not filter_func(proc): yield proc
def construct(self, context: interfaces.context.ContextInterface, config_path: str) -> None: """Constructs the appropriate layer and adds it based on the class parameter.""" config_path = interfaces.configuration.path_join(config_path, self.name) # Determine the layer name name = self.name counter = 2 while name in context.layers: name = self.name + str(counter) counter += 1 args = {"context": context, "config_path": config_path, "name": name} if any( [subreq.unsatisfied(context, config_path) for subreq in self.requirements.values() if not subreq.optional]): return None obj = self._construct_class(context, config_path, args) if obj is not None and isinstance(obj, interfaces.layers.DataLayerInterface): context.add_layer(obj) # This should already be done by the _construct_class method # context.config[config_path] = obj.name return None
def populate_config(self, context: interfaces.context.ContextInterface, configurables_list: Dict[ str, interfaces.configuration.ConfigurableInterface], args: argparse.Namespace, plugin_config_path: str) -> None: """Populate the context config based on the returned args. We have already determined these elements must be descended from ConfigurableInterface Args: context: The volatility context to operate on configurables_list: A dictionary of configurable items that can be configured on the plugin args: An object containing the arguments necessary plugin_config_path: The path within the context's config containing the plugin's configuration """ vargs = vars(args) for configurable in configurables_list: for requirement in configurables_list[ configurable].get_requirements(): value = vargs.get(requirement.name, None) if value is not None: if isinstance(requirement, requirements.URIRequirement): if isinstance(value, str): if not parse.urlparse(value).scheme: if not os.path.exists(value): raise TypeError( "Non-existant file {} passed to URIRequirement" .format(value)) value = "file://" + \ request.pathname2url( os.path.abspath(value)) if isinstance(requirement, requirements.ListRequirement): if not isinstance(value, list): raise TypeError( "Configuration for ListRequirement was not a list" ) value = [requirement.element_type(x) for x in value] if not inspect.isclass(configurables_list[configurable]): config_path = configurables_list[ configurable].config_path else: # We must be the plugin, so name it appropriately: config_path = plugin_config_path extended_path = interfaces.configuration.path_join( config_path, requirement.name) context.config[extended_path] = value pdb.set_trace()
def dump_pe( cls, context: interfaces.context.ContextInterface, pe_table_name: str, dll_entry: interfaces.objects.ObjectInterface, open_method: Type[interfaces.plugins.FileHandlerInterface], layer_name: str = None, prefix: str = '' ) -> Optional[interfaces.plugins.FileHandlerInterface]: """Extracts the complete data for a process as a FileInterface Args: context: the context to operate upon pe_table_name: the name for the symbol table containing the PE format symbols dll_entry: the object representing the module layer_name: the layer that the DLL lives within open_method: class for constructing output files Returns: An open FileHandlerInterface object containing the complete data for the DLL or None in the case of failure """ try: try: name = dll_entry.FullDllName.get_string() except exceptions.InvalidAddressException: name = 'UnreadbleDLLName' if layer_name is None: layer_name = dll_entry.vol.layer_name file_handle = open_method("{}{}.{:#x}.{:#x}.dmp".format( prefix, ntpath.basename(name), dll_entry.vol.offset, dll_entry.DllBase)) dos_header = context.object(pe_table_name + constants.BANG + "_IMAGE_DOS_HEADER", offset=dll_entry.DllBase, layer_name=layer_name) for offset, data in dos_header.reconstruct(): file_handle.seek(offset) file_handle.write(data) except (IOError, exceptions.VolatilityException, OverflowError, ValueError) as excp: vollog.debug("Unable to dump dll at offset {}: {}".format( dll_entry.DllBase, excp)) return None return file_handle
def set_kernel_virtual_offset(self, context: interfaces.context.ContextInterface, valid_kernels: ValidKernelsType) -> None: """Traverses the requirement tree, looking for kernel_virtual_offset values that may need setting and sets it based on the previously identified `valid_kernels`. Args: context: Context on which to operate and provide the kernel virtual offset valid_kernels: List of valid kernels and offsets """ for virtual_layer in valid_kernels: # Set the virtual offset under the TranslationLayer it applies to kvo_path = interfaces.configuration.path_join(context.layers[virtual_layer].config_path, 'kernel_virtual_offset') kvo, kernel = valid_kernels[virtual_layer] context.config[kvo_path] = kvo vollog.debug("Setting kernel_virtual_offset to {}".format(hex(kvo)))
def get_version_information( cls, context: interfaces.context.ContextInterface, pe_table_name: str, layer_name: str, base_address: int) -> Tuple[int, int, int, int]: """Get File and Product version information from PE files. Args: context: volatility context on which to operate pe_table_name: name of the PE table layer_name: name of the layer containing the PE file base_address: base address of the PE (where MZ is found) """ if layer_name is None: raise ValueError("Layer must be a string not None") pe_data = io.BytesIO() dos_header = context.object(pe_table_name + constants.BANG + "_IMAGE_DOS_HEADER", offset=base_address, layer_name=layer_name) for offset, data in dos_header.reconstruct(): pe_data.seek(offset) pe_data.write(data) pe = pefile.PE(data=pe_data.getvalue(), fast_load=True) pe.parse_data_directories( [pefile.DIRECTORY_ENTRY["IMAGE_DIRECTORY_ENTRY_RESOURCE"]]) if isinstance(pe.VS_FIXEDFILEINFO, list): # pefile >= 2018.8.8 (estimated) version_struct = pe.VS_FIXEDFILEINFO[0] else: # pefile <= 2017.11.5 (estimated) version_struct = pe.VS_FIXEDFILEINFO major = version_struct.ProductVersionMS >> 16 minor = version_struct.ProductVersionMS & 0xFFFF product = version_struct.ProductVersionLS >> 16 build = version_struct.ProductVersionLS & 0xFFFF pe_data.close() return major, minor, product, build
def protect_values(cls, context: interfaces.context.ContextInterface, layer_name: str, symbol_table: str) -> Iterable[int]: """Look up the array of memory protection constants from the memory sample. These don't change often, but if they do in the future, then finding them dynamically versus hard-coding here will ensure we parse them properly. Args: context: The context to retrieve required elements (layers, symbol tables) from layer_name: The name of the layer on which to operate symbol_table: The name of the table containing the kernel symbols """ kvo = context.layers[layer_name].config["kernel_virtual_offset"] ntkrnlmp = context.module(symbol_table, layer_name = layer_name, offset = kvo) addr = ntkrnlmp.get_symbol("MmProtectToValue").address values = ntkrnlmp.object(object_type = "array", offset = addr, subtype = ntkrnlmp.get_type("int"), count = 32) return values # type: ignore
def scan_hives(cls, context: interfaces.context.ContextInterface, layer_name: str, symbol_table: str) -> \ Iterable[interfaces.objects.ObjectInterface]: """Scans for hives using the poolscanner module and constraints or bigpools module with tag. Args: context: The context to retrieve required elements (layers, symbol tables) from layer_name: The name of the layer on which to operate symbol_table: The name of the table containing the kernel symbols Returns: A list of Hive objects as found from the `layer_name` layer based on Hive pool signatures """ is_64bit = symbols.symbol_table_is_64bit(context, symbol_table) is_windows_8_1_or_later = HiveScan.is_windows_8_1_or_later( context=context, symbol_table=symbol_table) if is_windows_8_1_or_later and is_64bit: kvo = context.layers[layer_name].config['kernel_virtual_offset'] ntkrnlmp = context.module(symbol_table, layer_name=layer_name, offset=kvo) for pool in bigpools.BigPools.list_big_pools( context, layer_name=layer_name, symbol_table=symbol_table, tags=["CM10"]): cmhive = ntkrnlmp.object(object_type="_CMHIVE", offset=pool.Va, absolute=True) yield cmhive else: constraints = poolscanner.PoolScanner.builtin_constraints( symbol_table, [b'CM10']) for result in poolscanner.PoolScanner.generate_pool_scan( context, layer_name, symbol_table, constraints): _constraint, mem_object, _header = result yield mem_object
def dump_pe(cls, context: interfaces.context.ContextInterface, pe_table_name: str, dll_entry: interfaces.objects.ObjectInterface, layer_name: str = None) -> interfaces.plugins.FileInterface: """Extracts the complete data for a process as a FileInterface Args: context: the context to operate upon pe_table_name: the name for the symbol table containing the PE format symbols dll_entry: the object representing the module layer_name: the layer that the DLL lives within Returns: A FileInterface object containing the complete data for the DLL or None in the case of failure""" filedata = None try: try: name = dll_entry.FullDllName.get_string() except exceptions.InvalidAddressException: name = 'UnreadbleDLLName' if layer_name is None: layer_name = dll_entry.vol.layer_name filedata = interfaces.plugins.FileInterface( "{0}.{1:#x}.{2:#x}.dmp".format(ntpath.basename(name), dll_entry.vol.offset, dll_entry.DllBase)) dos_header = context.object(pe_table_name + constants.BANG + "_IMAGE_DOS_HEADER", offset=dll_entry.DllBase, layer_name=layer_name) for offset, data in dos_header.reconstruct(): filedata.data.seek(offset) filedata.data.write(data) except Exception as excp: vollog.debug("Unable to dump dll at offset {}: {}".format( dll_entry.DllBase, excp)) return filedata
def pool_scan(cls, context: interfaces.context.ContextInterface, layer_name: str, symbol_table: str, pool_constraints: List[PoolConstraint], alignment: int = 8, progress_callback: Optional[constants.ProgressCallback] = None) \ -> Generator[Tuple[PoolConstraint, interfaces.objects.ObjectInterface], None, None]: """Returns the _POOL_HEADER object (based on the symbol_table template) after scanning through layer_name returning all headers that match any of the constraints provided. Only one constraint can be provided per tag. Args: context: The context to retrieve required elements (layers, symbol tables) from layer_name: The name of the layer on which to operate symbol_table: The name of the table containing the kernel symbols pool_constraints: List of pool constraints used to limit the scan results alignment: An optional value that all pool headers will be aligned to progress_callback: An optional function to provide progress feedback whilst scanning Returns: An Iterable of pool constraints and the pool headers associated with them """ # Setup the pattern constraint_lookup = {} # type: Dict[bytes, PoolConstraint] for constraint in pool_constraints: if constraint.tag in constraint_lookup: raise ValueError( "Constraint tag is used for more than one constraint: {}". format(repr(constraint.tag))) constraint_lookup[constraint.tag] = constraint pool_header_table_name = cls.get_pool_header_table( context, symbol_table) module = context.module(pool_header_table_name, layer_name, offset=0) # Run the scan locating the offsets of a particular tag layer = context.layers[layer_name] scanner = PoolHeaderScanner(module, constraint_lookup, alignment) yield from layer.scan(context, scanner, progress_callback)
def get_cmdline(cls, context: interfaces.context.ContextInterface, kernel_table_name: str, proc): """Extracts the cmdline from PEB Args: context: the context to operate upon kernel_table_name: the name for the symbol table containing the kernel's symbols proc: the process object Returns: A string with the command line """ proc_layer_name = proc.add_process_layer() peb = context.object(kernel_table_name + constants.BANG + "_PEB", layer_name=proc_layer_name, offset=proc.Peb) result_text = peb.ProcessParameters.CommandLine.get_string() return result_text
def _enumerate_system_va_type(cls, large_page_size: int, system_range_start: int, module: interfaces.context.ContextInterface, type_array: interfaces.objects.ObjectInterface) -> Dict[str, List[Tuple[int, int]]]: result = {} # type: Dict[str, List[Tuple[int, int]]] system_va_type = module.get_enumeration('_MI_SYSTEM_VA_TYPE') start = system_range_start prev_entry = -1 cur_size = large_page_size for entry in type_array: entry = system_va_type.lookup(entry) if entry != prev_entry: region_range = result.get(entry, []) region_range.append((start, cur_size)) result[entry] = region_range start = start + cur_size cur_size = large_page_size else: cur_size += large_page_size prev_entry = entry return result
def get_kdbg_structure(cls, context: interfaces.context.ContextInterface, config_path: str, layer_name: str, symbol_table: str) -> interfaces.objects.ObjectInterface: """Returns the KDDEBUGGER_DATA64 structure for a kernel""" ntkrnlmp = cls.get_kernel_module(context, layer_name, symbol_table) native_types = context.symbol_space[symbol_table].natives kdbg_offset = ntkrnlmp.get_symbol("KdDebuggerDataBlock").address kdbg_table_name = intermed.IntermediateSymbolTable.create(context, interfaces.configuration.path_join( config_path, 'kdbg'), "windows", "kdbg", native_types = native_types, class_types = extensions.kdbg.class_types) kdbg = context.object(kdbg_table_name + constants.BANG + "_KDDEBUGGER_DATA64", offset = ntkrnlmp.offset + kdbg_offset, layer_name = layer_name) return kdbg
def get_ntheader_structure(cls, context: interfaces.context.ContextInterface, config_path: str, layer_name: str) -> interfaces.objects.ObjectInterface: """Gets the ntheader structure for the kernel of the specified layer""" virtual_layer = context.layers[layer_name] if not isinstance(virtual_layer, layers.intel.Intel): raise TypeError("Virtual Layer is not an intel layer") kvo = virtual_layer.config["kernel_virtual_offset"] pe_table_name = intermed.IntermediateSymbolTable.create(context, interfaces.configuration.path_join(config_path, 'pe'), "windows", "pe", class_types = extensions.pe.class_types) dos_header = context.object(pe_table_name + constants.BANG + "_IMAGE_DOS_HEADER", offset = kvo, layer_name = layer_name) nt_header = dos_header.get_nt_header() return nt_header