def construct(self, context: interfaces.context.ContextInterface, config_path: str) -> None: """Constructs the appropriate layer and adds it based on the class parameter.""" config_path = interfaces.configuration.path_join(config_path, self.name) # Determine the layer name name = self.name counter = 2 while name in context.layers: name = self.name + str(counter) counter += 1 args = {"context": context, "config_path": config_path, "name": name} if any( [subreq.unsatisfied(context, config_path) for subreq in self.requirements.values() if not subreq.optional]): return None obj = self._construct_class(context, config_path, args) if obj is not None and isinstance(obj, interfaces.layers.DataLayerInterface): context.add_layer(obj) # This should already be done by the _construct_class method # context.config[config_path] = obj.name return None
def process_dump( cls, context: interfaces.context.ContextInterface, kernel_table_name: str, pe_table_name: str, proc: interfaces.objects.ObjectInterface, open_method: Type[interfaces.plugins.FileHandlerInterface]) -> interfaces.plugins.FileHandlerInterface: """Extracts the complete data for a process as a FileHandlerInterface Args: context: the context to operate upon kernel_table_name: the name for the symbol table containing the kernel's symbols pe_table_name: the name for the symbol table containing the PE format symbols proc: the process object whose memory should be output open_method: class to provide context manager for opening the file Returns: An open FileHandlerInterface object containing the complete data for the process or None in the case of failure """ file_handle = None try: proc_layer_name = proc.add_process_layer() peb = context.object(kernel_table_name + constants.BANG + "_PEB", layer_name = proc_layer_name, offset = proc.Peb) dos_header = context.object(pe_table_name + constants.BANG + "_IMAGE_DOS_HEADER", offset = peb.ImageBaseAddress, layer_name = proc_layer_name) file_handle = open_method("pid.{0}.{1:#x}.dmp".format(proc.UniqueProcessId, peb.ImageBaseAddress)) for offset, data in dos_header.reconstruct(): file_handle.seek(offset) file_handle.write(data) except Exception as excp: vollog.debug("Unable to dump PE with pid {}: {}".format(proc.UniqueProcessId, excp)) return file_handle
def __call__(self, context: interfaces.context.ContextInterface, config_path: str, requirement: interfaces.configuration.RequirementInterface, progress_callback: constants.ProgressCallback = None) -> None: useful = [] sub_config_path = interfaces.configuration.path_join( config_path, requirement.name) if (isinstance(requirement, requirements.TranslationLayerRequirement) and requirement.requirements.get("class", False) and requirement.unsatisfied(context, config_path)): class_req = requirement.requirements["class"] for test in self.tests: if (test.layer_type.__module__ + "." + test.layer_type.__name__ == class_req.config_value( context, sub_config_path)): useful.append(test) # Determine if a class has been chosen # Once an appropriate class has been chosen, attempt to determine the page_map_offset value if ("memory_layer" in requirement.requirements and not requirement.requirements["memory_layer"].unsatisfied( context, sub_config_path)): # Only bother getting the DTB if we don't already have one page_map_offset_path = interfaces.configuration.path_join( sub_config_path, "page_map_offset") if not context.config.get(page_map_offset_path, None): physical_layer_name = requirement.requirements[ "memory_layer"].config_value(context, sub_config_path) if not isinstance(physical_layer_name, str): raise TypeError( f"Physical layer name is not a string: {sub_config_path}" ) physical_layer = context.layers[physical_layer_name] # Check lower layer metadata first if physical_layer.metadata.get('page_map_offset', None): context.config[ page_map_offset_path] = physical_layer.metadata[ 'page_map_offset'] else: hits = physical_layer.scan(context, PageMapScanner(useful), progress_callback) for test, dtb in hits: context.config[page_map_offset_path] = dtb break else: return None if isinstance( requirement, interfaces.configuration. ConstructableRequirementInterface): requirement.construct(context, config_path) else: for subreq in requirement.requirements.values(): self(context, sub_config_path, subreq)
def __call__(self, context: interfaces.context.ContextInterface, config_path: str, requirement: interfaces.configuration.RequirementInterface, progress_callback: constants.ProgressCallback = None) -> None: """Finds translation layers that can have swap layers added.""" path_join = interfaces.configuration.path_join self._translation_requirement = self.find_requirements( context, config_path, requirement, requirements.TranslationLayerRequirement, shortcut=False) for trans_sub_config, trans_req in self._translation_requirement: if not isinstance(trans_req, requirements.TranslationLayerRequirement): # We need this so the type-checker knows we're a TranslationLayerRequirement continue swap_sub_config, swap_req = self.find_swap_requirement( trans_sub_config, trans_req) counter = 0 swap_config = interfaces.configuration.parent_path(swap_sub_config) if swap_req and swap_req.unsatisfied(context, swap_config): # See if any of them need constructing for swap_location in self.config.get('single_swap_locations', []): # Setup config locations/paths current_layer_name = swap_req.name + str(counter) current_layer_path = path_join(swap_sub_config, current_layer_name) layer_loc_path = path_join(current_layer_path, "location") layer_class_path = path_join(current_layer_path, "class") counter += 1 # Fill in the config if swap_location: context.config[current_layer_path] = current_layer_name context.config[layer_loc_path] = swap_location context.config[ layer_class_path] = 'volatility3.framework.layers.physical.FileLayer' # Add the requirement new_req = requirements.TranslationLayerRequirement( name=current_layer_name, description="Swap Layer", optional=False) swap_req.add_requirement(new_req) context.config[path_join(swap_sub_config, 'number_of_elements')] = counter context.config[swap_sub_config] = True swap_req.construct(context, swap_config)
def load_windows_symbol_table( cls, context: interfaces.context.ContextInterface, guid: str, age: int, pdb_name: str, symbol_table_class: str, config_path: str = 'pdbutility', progress_callback: constants.ProgressCallback = None): """Loads (downlading if necessary) a windows symbol table""" filter_string = os.path.join(pdb_name.strip('\x00'), guid.upper() + "-" + str(age)) isf_path = False # Take the first result of search for the intermediate file for value in intermed.IntermediateSymbolTable.file_symbol_url( "windows", filter_string): isf_path = value break else: # If none are found, attempt to download the pdb, convert it and try again cls.download_pdb_isf(context, guid.upper(), age, pdb_name, progress_callback) # Try again for value in intermed.IntermediateSymbolTable.file_symbol_url( "windows", filter_string): isf_path = value break if not isf_path: vollog.debug("Required symbol library path not found: {}".format( filter_string)) vollog.info( "The symbols can be downloaded later using pdbconv.py -p {} -g {}" .format(pdb_name.strip('\x00'), guid.upper() + str(age))) return None vollog.debug("Using symbol library: {}".format(filter_string)) # Set the discovered options join = interfaces.configuration.path_join context.config[join(config_path, "class")] = symbol_table_class context.config[join(config_path, "isf_url")] = isf_path parent_config_path = interfaces.configuration.parent_path(config_path) requirement_name = interfaces.configuration.path_head(config_path) # Construct the appropriate symbol table requirement = SymbolTableRequirement( name=requirement_name, description="PDBUtility generated symbol table") requirement.construct(context, parent_config_path) return context.config[config_path]
def list_bugcheck_reason_callbacks( cls, context: interfaces.context.ContextInterface, layer_name: str, symbol_table: str, callback_table_name: str) -> Iterable[Tuple[str, int, str]]: """Lists all kernel bugcheck reason callbacks. Args: context: The context to retrieve required elements (layers, symbol tables) from layer_name: The name of the layer on which to operate symbol_table: The name of the table containing the kernel symbols callback_table_name: The nae of the table containing the callback symbols Yields: A name, location and optional detail string """ kvo = context.layers[layer_name].config['kernel_virtual_offset'] ntkrnlmp = context.module(symbol_table, layer_name=layer_name, offset=kvo) try: list_offset = ntkrnlmp.get_symbol( "KeBugCheckReasonCallbackListHead").address except exceptions.SymbolError: vollog.debug("Cannot find KeBugCheckReasonCallbackListHead") return full_type_name = callback_table_name + constants.BANG + "_KBUGCHECK_REASON_CALLBACK_RECORD" callback_record = context.object(object_type=full_type_name, offset=kvo + list_offset, layer_name=layer_name) for callback in callback_record.Entry: if not context.layers[layer_name].is_valid( callback.CallbackRoutine, 64): continue try: component: Union[ interfaces.renderers.BaseAbsentValue, interfaces.objects.ObjectInterface] = ntkrnlmp.object( "string", absolute=True, offset=callback.Component, max_length=64, errors="replace") except exceptions.InvalidAddressException: component = renderers.UnreadableValue() yield "KeBugCheckReasonCallbackListHead", callback.CallbackRoutine, component
def __call__(self, context: interfaces.context.ContextInterface, config_path: str, requirement: interfaces.configuration.RequirementInterface, progress_callback: constants.ProgressCallback = None) -> None: new_config_path = interfaces.configuration.path_join( config_path, requirement.name) if not isinstance(requirement, configuration.requirements.ModuleRequirement): # Check subrequirements for req in requirement.requirements: self(context, new_config_path, requirement.requirements[req], progress_callback) return if not requirement.unsatisfied(context, config_path): return # The requirement is unfulfilled and is a ModuleRequirement context.config[interfaces.configuration.path_join( new_config_path, 'class')] = 'volatility3.framework.contexts.ConfigurableModule' for req in requirement.requirements: if requirement.requirements[req].unsatisfied( context, new_config_path) and req != 'offset': return # We now just have the offset requirement, but the layer requirement has been fulfilled. # Unfortunately we don't know the layer name requirement's exact name for req in requirement.requirements: if isinstance( requirement.requirements[req], configuration.requirements.TranslationLayerRequirement): layer_kvo_config_path = interfaces.configuration.path_join( new_config_path, req, 'kernel_virtual_offset') offset_config_path = interfaces.configuration.path_join( new_config_path, 'offset') offset = context.config[layer_kvo_config_path] context.config[offset_config_path] = offset elif isinstance(requirement.requirements[req], configuration.requirements.SymbolTableRequirement): symbol_shift_config_path = interfaces.configuration.path_join( new_config_path, req, 'symbol_shift') context.config[symbol_shift_config_path] = 0 # Now construct the module based on the sub-requirements requirement.construct(context, config_path)
def get_ntheader_structure( cls, context: interfaces.context.ContextInterface, config_path: str, layer_name: str) -> interfaces.objects.ObjectInterface: """Gets the ntheader structure for the kernel of the specified layer""" virtual_layer = context.layers[layer_name] if not isinstance(virtual_layer, layers.intel.Intel): raise TypeError("Virtual Layer is not an intel layer") kvo = virtual_layer.config["kernel_virtual_offset"] pe_table_name = intermed.IntermediateSymbolTable.create( context, interfaces.configuration.path_join(config_path, 'pe'), "windows", "pe", class_types=extensions.pe.class_types) dos_header = context.object(pe_table_name + constants.BANG + "_IMAGE_DOS_HEADER", offset=kvo, layer_name=layer_name) nt_header = dos_header.get_nt_header() return nt_header
def get_kdbg_structure( cls, context: interfaces.context.ContextInterface, config_path: str, layer_name: str, symbol_table: str) -> interfaces.objects.ObjectInterface: """Returns the KDDEBUGGER_DATA64 structure for a kernel""" ntkrnlmp = cls.get_kernel_module(context, layer_name, symbol_table) native_types = context.symbol_space[symbol_table].natives kdbg_offset = ntkrnlmp.get_symbol("KdDebuggerDataBlock").address kdbg_table_name = intermed.IntermediateSymbolTable.create( context, interfaces.configuration.path_join(config_path, 'kdbg'), "windows", "kdbg", native_types=native_types, class_types=extensions.kdbg.class_types) kdbg = context.object(kdbg_table_name + constants.BANG + "_KDDEBUGGER_DATA64", offset=ntkrnlmp.offset + kdbg_offset, layer_name=layer_name) return kdbg
def _get_cryptdll_types(self, context: interfaces.context.ContextInterface, config, config_path: str, proc_layer_name: str, cryptdll_base: int): """ Builds a symbol table from the cryptdll types generated after binary analysis Args: context: the context to operate upon config: config_path: proc_layer_name: name of the lsass.exe process layer cryptdll_base: base address of cryptdll.dll inside of lsass.exe """ kernel = self.context.modules[self.config['kernel']] table_mapping = {"nt_symbols": kernel.symbol_table_name} cryptdll_symbol_table = intermed.IntermediateSymbolTable.create( context=context, config_path=config_path, sub_path="windows", filename="kerb_ecrypt", table_mapping=table_mapping) return context.module(cryptdll_symbol_table, proc_layer_name, offset=cryptdll_base)
def _method_offset(self, context: interfaces.context.ContextInterface, vlayer: layers.intel.Intel, pattern: bytes, result_offset: int, progress_callback: constants.ProgressCallback = None) -> Optional[ValidKernelType]: """Method for finding a suitable kernel offset based on a module table.""" vollog.debug("Kernel base determination - searching layer module list structure") valid_kernel: Optional[ValidKernelType] = None # If we're here, chances are high we're in a Win10 x64 image with kernel base randomization physical_layer_name = self.get_physical_layer_name(context, vlayer) physical_layer = context.layers[physical_layer_name] # TODO: On older windows, this might be \WINDOWS\system32\nt rather than \SystemRoot\system32\nt results = physical_layer.scan(context, scanners.BytesScanner(pattern), progress_callback = progress_callback) seen: Set[int] = set() # Because this will launch a scan of the virtual layer, we want to be careful for result in results: # TODO: Identify the specific structure we're finding and document this a bit better pointer = context.object("pdbscan!unsigned long long", offset = (result + result_offset), layer_name = physical_layer_name) address = pointer & vlayer.address_mask if address in seen: continue seen.add(address) valid_kernel = self.check_kernel_offset(context, vlayer, address, progress_callback) if valid_kernel: break return valid_kernel
def parse_hashtable( cls, context: interfaces.context.ContextInterface, layer_name: str, ht_offset: int, ht_length: int, alignment: int, net_symbol_table: str ) -> Generator[interfaces.objects.ObjectInterface, None, None]: """Parses a hashtable quick and dirty. Args: context: The context to retrieve required elements (layers, symbol tables) from layer_name: The name of the layer on which to operate ht_offset: Beginning of the hash table ht_length: Length of the hash table Returns: The hash table entries which are _not_ empty """ # we are looking for entries whose values are not their own address for index in range(ht_length): current_addr = ht_offset + index * alignment current_pointer = context.object(net_symbol_table + constants.BANG + "pointer", layer_name=layer_name, offset=current_addr) # check if addr of pointer is equal to the value pointed to if current_pointer.vol.offset == current_pointer: continue yield current_pointer
def load_pdb_layer( cls, context: interfaces.context.ContextInterface, location: str) -> Tuple[str, interfaces.context.ContextInterface]: """Loads a PDB file into a layer within the context and returns the name of the new layer. Note: the context may be changed by this method """ physical_layer_name = context.layers.free_layer_name("FileLayer") physical_config_path = interfaces.configuration.path_join( "pdbreader", physical_layer_name) # Create the file layer # This must be specific to get us started, setup the config and run new_context = context.clone() new_context.config[interfaces.configuration.path_join( physical_config_path, "location")] = location physical_layer = physical.FileLayer(new_context, physical_config_path, physical_layer_name) new_context.add_layer(physical_layer) # Add on the MSF format layer msf_layer_name = context.layers.free_layer_name("MSFLayer") msf_config_path = interfaces.configuration.path_join( "pdbreader", msf_layer_name) new_context.config[interfaces.configuration.path_join( msf_config_path, "base_layer")] = physical_layer_name msf_layer = msf.PdbMultiStreamFormat(new_context, msf_config_path, msf_layer_name) new_context.add_layer(msf_layer) msf_layer.read_streams() return msf_layer_name, new_context
def unsatisfied(self, context: interfaces.context.ContextInterface, config_path: str) -> Dict[str, interfaces.configuration.RequirementInterface]: """Check the types on each of the returned values and their number and then call the element type's check for each one.""" config_path = interfaces.configuration.path_join(config_path, self.name) default = None value = self.config_value(context, config_path, default) if not value and self.min_elements > 0: vollog.log(constants.LOGLEVEL_V, "ListRequirement Unsatisfied - ListRequirement has non-zero min_elements") return {config_path: self} if value is None and not self.optional: # We need to differentiate between no value and an empty list vollog.log(constants.LOGLEVEL_V, "ListRequirement Unsatisfied - Value was not specified") return {config_path: self} elif value is None: context.config[config_path] = [] if not isinstance(value, list): # TODO: Check this is the correct response for an error raise TypeError("Unexpected config value found: {}".format(repr(value))) if not (self.min_elements <= len(value)): vollog.log(constants.LOGLEVEL_V, "TypeError - Too few values provided to list option.") return {config_path: self} if self.max_elements and not (len(value) < self.max_elements): vollog.log(constants.LOGLEVEL_V, "TypeError - Too many values provided to list option.") return {config_path: self} if not all([isinstance(element, self.element_type) for element in value]): vollog.log(constants.LOGLEVEL_V, "TypeError - At least one element in the list is not of the correct type.") return {config_path: self} return {}
def list_notify_routines( cls, context: interfaces.context.ContextInterface, layer_name: str, symbol_table: str, callback_table_name: str ) -> Iterable[Tuple[str, int, Optional[str]]]: """Lists all kernel notification routines. Args: context: The context to retrieve required elements (layers, symbol tables) from layer_name: The name of the layer on which to operate symbol_table: The name of the table containing the kernel symbols callback_table_name: The nae of the table containing the callback symbols Yields: A name, location and optional detail string """ kvo = context.layers[layer_name].config['kernel_virtual_offset'] ntkrnlmp = context.module(symbol_table, layer_name=layer_name, offset=kvo) is_vista_or_later = versions.is_vista_or_later( context=context, symbol_table=symbol_table) full_type_name = callback_table_name + constants.BANG + "_GENERIC_CALLBACK" symbol_names = [("PspLoadImageNotifyRoutine", False), ("PspCreateThreadNotifyRoutine", True), ("PspCreateProcessNotifyRoutine", True)] for symbol_name, extended_list in symbol_names: try: symbol_offset = ntkrnlmp.get_symbol(symbol_name).address except exceptions.SymbolError: vollog.debug(f"Cannot find {symbol_name}") continue if is_vista_or_later and extended_list: count = 64 else: count = 8 fast_refs = ntkrnlmp.object( object_type="array", offset=symbol_offset, subtype=ntkrnlmp.get_type("_EX_FAST_REF"), count=count) for fast_ref in fast_refs: try: callback = fast_ref.dereference().cast(full_type_name) except exceptions.InvalidAddressException: continue if callback.Callback != 0: yield symbol_name, callback.Callback, None
def get_kernel_module(cls, context: interfaces.context.ContextInterface, layer_name: str, symbol_table: str): """Returns the kernel module based on the layer and symbol_table""" virtual_layer = context.layers[layer_name] if not isinstance(virtual_layer, layers.intel.Intel): raise TypeError("Virtual Layer is not an intel layer") kvo = virtual_layer.config["kernel_virtual_offset"] ntkrnlmp = context.module(symbol_table, layer_name = layer_name, offset = kvo) return ntkrnlmp
def virtual_process_from_physical(cls, context: interfaces.context.ContextInterface, layer_name: str, symbol_table: str, proc: interfaces.objects.ObjectInterface) -> \ Iterable[interfaces.objects.ObjectInterface]: """ Returns a virtual process from a physical addressed one Args: context: The context to retrieve required elements (layers, symbol tables) from layer_name: The name of the layer on which to operate symbol_table: The name of the table containing the kernel symbols proc: the process object with phisical address Returns: A process object on virtual address layer """ version = cls.get_osversion(context, layer_name, symbol_table) # If it's WinXP->8.1 we have now a physical process address. # We'll use the first thread to bounce back to the virtual process kvo = context.layers[layer_name].config['kernel_virtual_offset'] ntkrnlmp = context.module(symbol_table, layer_name=layer_name, offset=kvo) tleoffset = ntkrnlmp.get_type("_ETHREAD").relative_child_offset( "ThreadListEntry") # Start out with the member offset offsets = [tleoffset] # If (and only if) we're dealing with 64-bit Windows 7 SP1 # then add the other commonly seen member offset to the list bits = context.layers[layer_name].bits_per_register if version == (6, 1, 7601) and bits == 64: offsets.append(tleoffset + 8) # Now we can try to bounce back for ofs in offsets: ethread = ntkrnlmp.object(object_type="_ETHREAD", offset=proc.ThreadListHead.Flink - ofs, absolute=True) # Ask for the thread's process to get an _EPROCESS with a virtual address layer virtual_process = ethread.owning_process() # Sanity check the bounce. # This compares the original offset with the new one (translated from virtual layer) (_, _, ph_offset, _, _) = list(context.layers[layer_name].mapping( offset=virtual_process.vol.offset, length=0))[0] if virtual_process and \ proc.vol.offset == ph_offset: return virtual_process
def create(cls, context: interfaces.context.ContextInterface, module_name: str, layer_name: str, offset: int, **kwargs) -> 'Module': pathjoin = interfaces.configuration.path_join # Check if config_path is None free_module_name = context.modules.free_module_name(module_name) config_path = kwargs.get('config_path', None) if config_path is None: config_path = pathjoin('temporary', 'modules', free_module_name) # Populate the configuration context.config[pathjoin(config_path, 'layer_name')] = layer_name context.config[pathjoin(config_path, 'offset')] = offset # This is important, since the module_name may be changed in case it is already in use if 'symbol_table_name' not in kwargs: kwargs['symbol_table_name'] = module_name for arg in kwargs: context.config[pathjoin(config_path, arg)] = kwargs.get(arg, None) # Construct the object return_val = cls(context, config_path, free_module_name) context.add_module(return_val) context.config[config_path] = return_val.name # Add the module to the context modules collection return return_val
def find_aslr(cls, context: interfaces.context.ContextInterface, symbol_table: str, layer_name: str, progress_callback: constants.ProgressCallback = None) \ -> Tuple[int, int]: """Determines the offset of the actual DTB in physical space and its symbol offset.""" init_task_symbol = symbol_table + constants.BANG + 'init_task' init_task_json_address = context.symbol_space.get_symbol( init_task_symbol).address swapper_signature = rb"swapper(\/0|\x00\x00)\x00\x00\x00\x00\x00\x00" module = context.module(symbol_table, layer_name, 0) address_mask = context.symbol_space[symbol_table].config.get( 'symbol_mask', None) task_symbol = module.get_type('task_struct') comm_child_offset = task_symbol.relative_child_offset('comm') for offset in context.layers[layer_name].scan( scanner=scanners.RegExScanner(swapper_signature), context=context, progress_callback=progress_callback): init_task_address = offset - comm_child_offset init_task = module.object(object_type='task_struct', offset=init_task_address, absolute=True) if init_task.pid != 0: continue elif init_task.has_member( 'state') and init_task.state.cast('unsigned int') != 0: continue # This we get for free aslr_shift = init_task.files.cast( 'long unsigned int') - module.get_symbol('init_files').address kaslr_shift = init_task_address - cls.virtual_to_physical_address( init_task_json_address) if address_mask: aslr_shift = aslr_shift & address_mask if aslr_shift & 0xfff != 0 or kaslr_shift & 0xfff != 0: continue vollog.debug( "Linux ASLR shift values determined: physical {:0x} virtual {:0x}" .format(kaslr_shift, aslr_shift)) return kaslr_shift, aslr_shift # We don't throw an exception, because we may legitimately not have an ASLR shift, but we report it vollog.debug( "Scanners could not determine any ASLR shifts, using 0 for both") return 0, 0
def get_session_layers( cls, context: interfaces.context.ContextInterface, layer_name: str, symbol_table: str, pids: List[int] = None) -> Generator[str, None, None]: """Build a cache of possible virtual layers, in priority starting with the primary/kernel layer. Then keep one layer per session by cycling through the process list. Args: context: The context to retrieve required elements (layers, symbol tables) from layer_name: The name of the layer on which to operate symbol_table: The name of the table containing the kernel symbols pids: A list of process identifiers to include exclusively or None for no filter Returns: A list of session layer names """ seen_ids: List[interfaces.objects.ObjectInterface] = [] filter_func = pslist.PsList.create_pid_filter(pids or []) for proc in pslist.PsList.list_processes(context=context, layer_name=layer_name, symbol_table=symbol_table, filter_func=filter_func): proc_id = "Unknown" try: proc_id = proc.UniqueProcessId proc_layer_name = proc.add_process_layer() # create the session space object in the process' own layer. # not all processes have a valid session pointer. session_space = context.object(symbol_table + constants.BANG + "_MM_SESSION_SPACE", layer_name=layer_name, offset=proc.Session) if session_space.SessionId in seen_ids: continue except exceptions.InvalidAddressException: vollog.log( constants.LOGLEVEL_VVV, "Process {} does not have a valid Session or a layer could not be constructed for it" .format(proc_id)) continue # save the layer if we haven't seen the session yet seen_ids.append(session_space.SessionId) yield proc_layer_name
def list_processes(cls, context: interfaces.context.ContextInterface, layer_name: str, symbol_table: str, filter_func: Callable[[interfaces.objects.ObjectInterface], bool] = lambda _: False) -> \ Iterable[interfaces.objects.ObjectInterface]: """Lists all the processes in the primary layer that are in the pid config option. Args: context: The context to retrieve required elements (layers, symbol tables) from layer_name: The name of the layer on which to operate symbol_table: The name of the table containing the kernel symbols filter_func: A function which takes an EPROCESS object and returns True if the process should be ignored/filtered Returns: The list of EPROCESS objects from the `layer_name` layer's PsActiveProcessHead list after filtering """ # We only use the object factory to demonstrate how to use one kvo = context.layers[layer_name].config['kernel_virtual_offset'] ntkrnlmp = context.module(symbol_table, layer_name=layer_name, offset=kvo) ps_aph_offset = ntkrnlmp.get_symbol("PsActiveProcessHead").address list_entry = ntkrnlmp.object(object_type="_LIST_ENTRY", offset=ps_aph_offset) # This is example code to demonstrate how to use symbol_space directly, rather than through a module: # # ``` # reloff = self.context.symbol_space.get_type( # self.config['nt_symbols'] + constants.BANG + "_EPROCESS").relative_child_offset( # "ActiveProcessLinks") # ``` # # Note: "nt_symbols!_EPROCESS" could have been used, but would rely on the "nt_symbols" symbol table not already # having been present. Strictly, the value of the requirement should be joined with the BANG character # defined in the constants file reloff = ntkrnlmp.get_type("_EPROCESS").relative_child_offset( "ActiveProcessLinks") eproc = ntkrnlmp.object(object_type="_EPROCESS", offset=list_entry.vol.offset - reloff, absolute=True) for proc in eproc.ActiveProcessLinks: if not filter_func(proc): yield proc
def list_registry_callbacks( cls, context: interfaces.context.ContextInterface, layer_name: str, symbol_table: str, callback_table_name: str) -> Iterable[Tuple[str, int, None]]: """Lists all registry callbacks. Args: context: The context to retrieve required elements (layers, symbol tables) from layer_name: The name of the layer on which to operate symbol_table: The name of the table containing the kernel symbols callback_table_name: The nae of the table containing the callback symbols Yields: A name, location and optional detail string """ kvo = context.layers[layer_name].config['kernel_virtual_offset'] ntkrnlmp = context.module(symbol_table, layer_name=layer_name, offset=kvo) full_type_name = callback_table_name + constants.BANG + "_EX_CALLBACK_ROUTINE_BLOCK" try: symbol_offset = ntkrnlmp.get_symbol("CmpCallBackVector").address symbol_count_offset = ntkrnlmp.get_symbol( "CmpCallBackCount").address except exceptions.SymbolError: vollog.debug("Cannot find CmpCallBackVector or CmpCallBackCount") return callback_count = ntkrnlmp.object(object_type="unsigned int", offset=symbol_count_offset) if callback_count == 0: return fast_refs = ntkrnlmp.object(object_type="array", offset=symbol_offset, subtype=ntkrnlmp.get_type("_EX_FAST_REF"), count=callback_count) for fast_ref in fast_refs: try: callback = fast_ref.dereference().cast(full_type_name) except exceptions.InvalidAddressException: continue if callback.Function != 0: yield "CmRegisterCallback", callback.Function, None
def find_suitable_requirements( cls, context: interfaces.context.ContextInterface, config_path: str, requirement: interfaces.configuration.RequirementInterface, stacked_layers: List[str]) -> Optional[Tuple[str, str]]: """Looks for translation layer requirements and attempts to apply the stacked layers to it. If it succeeds it returns the configuration path and layer name where the stacked nodes were spliced into the tree. Returns: A tuple of a configuration path and layer name for the top of the stacked layers or None if suitable requirements are not found """ child_config_path = interfaces.configuration.path_join( config_path, requirement.name) if isinstance(requirement, requirements.TranslationLayerRequirement): if requirement.unsatisfied(context, config_path): original_setting = context.config.get(child_config_path, None) for layer_name in stacked_layers: context.config[child_config_path] = layer_name if not requirement.unsatisfied(context, config_path): return child_config_path, layer_name # Clean-up to restore the config if original_setting: context.config[child_config_path] = original_setting else: del context.config[child_config_path] else: return child_config_path, context.config.get( child_config_path, None) for req_name, req in requirement.requirements.items(): result = cls.find_suitable_requirements(context, child_config_path, req, stacked_layers) if result: return result return None
def dump_pe( cls, context: interfaces.context.ContextInterface, pe_table_name: str, dll_entry: interfaces.objects.ObjectInterface, open_method: Type[interfaces.plugins.FileHandlerInterface], layer_name: str = None, prefix: str = '' ) -> Optional[interfaces.plugins.FileHandlerInterface]: """Extracts the complete data for a process as a FileInterface Args: context: the context to operate upon pe_table_name: the name for the symbol table containing the PE format symbols dll_entry: the object representing the module layer_name: the layer that the DLL lives within open_method: class for constructing output files Returns: An open FileHandlerInterface object containing the complete data for the DLL or None in the case of failure """ try: try: name = dll_entry.FullDllName.get_string() except exceptions.InvalidAddressException: name = 'UnreadbleDLLName' if layer_name is None: layer_name = dll_entry.vol.layer_name file_handle = open_method("{}{}.{:#x}.{:#x}.dmp".format( prefix, ntpath.basename(name), dll_entry.vol.offset, dll_entry.DllBase)) dos_header = context.object(pe_table_name + constants.BANG + "_IMAGE_DOS_HEADER", offset=dll_entry.DllBase, layer_name=layer_name) for offset, data in dos_header.reconstruct(): file_handle.seek(offset) file_handle.write(data) except (IOError, exceptions.VolatilityException, OverflowError, ValueError) as excp: vollog.debug("Unable to dump dll at offset {}: {}".format( dll_entry.DllBase, excp)) return None return file_handle
def find_cookie( cls, context: interfaces.context.ContextInterface, layer_name: str, symbol_table: str) -> Optional[interfaces.objects.ObjectInterface]: """Find the ObHeaderCookie value (if it exists)""" try: offset = context.symbol_space.get_symbol(symbol_table + constants.BANG + "ObHeaderCookie").address except exceptions.SymbolError: return None kvo = context.layers[layer_name].config['kernel_virtual_offset'] return context.object(symbol_table + constants.BANG + "unsigned int", layer_name, offset=kvo + offset)
def populate_config(self, context: interfaces.context.ContextInterface, configurables_list: Dict[str, Type[interfaces.configuration. ConfigurableInterface]], args: argparse.Namespace, plugin_config_path: str) -> None: """Populate the context config based on the returned args. We have already determined these elements must be descended from ConfigurableInterface Args: context: The volatility3 context to operate on configurables_list: A dictionary of configurable items that can be configured on the plugin args: An object containing the arguments necessary plugin_config_path: The path within the context's config containing the plugin's configuration """ vargs = vars(args) for configurable in configurables_list: for requirement in configurables_list[ configurable].get_requirements(): value = vargs.get(requirement.name, None) if value is not None: if isinstance(requirement, requirements.URIRequirement): if isinstance(value, str): scheme = parse.urlparse(value).scheme if not scheme or len(scheme) <= 1: if not os.path.exists(value): raise FileNotFoundError( "Non-existant file {} passed to URIRequirement" .format(value)) value = "file://" + request.pathname2url( os.path.abspath(value)) if isinstance(requirement, requirements.ListRequirement): if not isinstance(value, list): raise TypeError( "Configuration for ListRequirement was not a list: {}" .format(requirement.name)) value = [requirement.element_type(x) for x in value] if not inspect.isclass(configurables_list[configurable]): config_path = configurables_list[ configurable].config_path else: # We must be the plugin, so name it appropriately: config_path = plugin_config_path extended_path = interfaces.configuration.path_join( config_path, requirement.name) context.config[extended_path] = value
def get_version_information( cls, context: interfaces.context.ContextInterface, pe_table_name: str, layer_name: str, base_address: int) -> Tuple[int, int, int, int]: """Get File and Product version information from PE files. Args: context: volatility context on which to operate pe_table_name: name of the PE table layer_name: name of the layer containing the PE file base_address: base address of the PE (where MZ is found) """ if layer_name is None: raise TypeError("Layer must be a string not None") pe_data = io.BytesIO() dos_header = context.object(pe_table_name + constants.BANG + "_IMAGE_DOS_HEADER", offset=base_address, layer_name=layer_name) for offset, data in dos_header.reconstruct(): pe_data.seek(offset) pe_data.write(data) pe = pefile.PE(data=pe_data.getvalue(), fast_load=True) pe.parse_data_directories( [pefile.DIRECTORY_ENTRY["IMAGE_DIRECTORY_ENTRY_RESOURCE"]]) if isinstance(pe.VS_FIXEDFILEINFO, list): # pefile >= 2018.8.8 (estimated) version_struct = pe.VS_FIXEDFILEINFO[0] else: # pefile <= 2017.11.5 (estimated) version_struct = pe.VS_FIXEDFILEINFO major = version_struct.ProductVersionMS >> 16 minor = version_struct.ProductVersionMS & 0xFFFF product = version_struct.ProductVersionLS >> 16 build = version_struct.ProductVersionLS & 0xFFFF pe_data.close() return major, minor, product, build
def set_kernel_virtual_offset(self, context: interfaces.context.ContextInterface, valid_kernel: ValidKernelType) -> None: """Traverses the requirement tree, looking for kernel_virtual_offset values that may need setting and sets it based on the previously identified `valid_kernel`. Args: context: Context on which to operate and provide the kernel virtual offset valid_kernel: List of valid kernels and offsets """ if valid_kernel: # Set the virtual offset under the TranslationLayer it applies to virtual_layer, kvo, kernel = valid_kernel if kvo is not None: kvo_path = interfaces.configuration.path_join(context.layers[virtual_layer].config_path, 'kernel_virtual_offset') context.config[kvo_path] = kvo vollog.debug(f"Setting kernel_virtual_offset to {hex(kvo)}")
def scan_hives(cls, context: interfaces.context.ContextInterface, layer_name: str, symbol_table: str) -> \ Iterable[interfaces.objects.ObjectInterface]: """Scans for hives using the poolscanner module and constraints or bigpools module with tag. Args: context: The context to retrieve required elements (layers, symbol tables) from layer_name: The name of the layer on which to operate symbol_table: The name of the table containing the kernel symbols Returns: A list of Hive objects as found from the `layer_name` layer based on Hive pool signatures """ is_64bit = symbols.symbol_table_is_64bit(context, symbol_table) is_windows_8_1_or_later = versions.is_windows_8_1_or_later( context=context, symbol_table=symbol_table) if is_windows_8_1_or_later and is_64bit: kvo = context.layers[layer_name].config['kernel_virtual_offset'] ntkrnlmp = context.module(symbol_table, layer_name=layer_name, offset=kvo) for pool in bigpools.BigPools.list_big_pools( context, layer_name=layer_name, symbol_table=symbol_table, tags=["CM10"]): cmhive = ntkrnlmp.object(object_type="_CMHIVE", offset=pool.Va, absolute=True) yield cmhive else: constraints = poolscanner.PoolScanner.builtin_constraints( symbol_table, [b'CM10']) for result in poolscanner.PoolScanner.generate_pool_scan( context, layer_name, symbol_table, constraints): _constraint, mem_object, _header = result yield mem_object
def get_cmdline(cls, context: interfaces.context.ContextInterface, kernel_table_name: str, proc): """Extracts the cmdline from PEB Args: context: the context to operate upon kernel_table_name: the name for the symbol table containing the kernel's symbols proc: the process object Returns: A string with the command line """ proc_layer_name = proc.add_process_layer() peb = context.object(kernel_table_name + constants.BANG + "_PEB", layer_name = proc_layer_name, offset = proc.Peb) result_text = peb.ProcessParameters.CommandLine.get_string() return result_text