def vad_dump(cls, context: interfaces.context.ContextInterface, layer_name: str, vad: interfaces.objects.ObjectInterface) -> bytes: """Extracts the complete data for Vad as a FileInterface Args: context: the context to operate upon layer_name: the name of the layer that the VAD lives within vad: the virtual address descriptor to be dumped Returns: bytes containing the data from the vad """ tmp_data = b"" proc_layer = context.layers[layer_name] chunk_size = 1024 * 1024 * 10 offset = vad.get_start() out_of_range = vad.get_end() # print("walking from {:x} to {:x} | {:x}".format(offset, out_of_range, out_of_range-offset)) while offset < out_of_range: to_read = min(chunk_size, out_of_range - offset) data = proc_layer.read(offset, to_read, pad=True) if not data: break tmp_data += data offset += to_read return tmp_data
def get_record_tuple(service_record: interfaces.objects.ObjectInterface): return (format_hints.Hex(service_record.vol.offset), service_record.Order, service_record.get_pid(), service_record.Start.description, service_record.State.description, service_record.get_type(), service_record.get_name(), service_record.get_display(), service_record.get_binary())
def vad_dump(cls, context: interfaces.context.ContextInterface, proc: interfaces.objects.ObjectInterface, vad: interfaces.objects.ObjectInterface, open_method: Type[ interfaces.plugins.FileHandlerInterface], maxsize: int = MAXSIZE_DEFAULT) -> Optional[interfaces.plugins.FileHandlerInterface]: """Extracts the complete data for Vad as a FileInterface. Args: context: The context to retrieve required elements (layers, symbol tables) from proc: an _EPROCESS instance vad: The suspected VAD to extract (ObjectInterface) open_method: class to provide context manager for opening the file maxsize: Max size of VAD section (default MAXSIZE_DEFAULT) Returns: An open FileInterface object containing the complete data for the process or None in the case of failure """ try: vad_start = vad.get_start() vad_end = vad.get_end() except AttributeError: vollog.debug("Unable to find the starting/ending VPN member") return if maxsize > 0 and (vad_end - vad_start) > maxsize: vollog.debug("Skip VAD dump {0:#x}-{1:#x} due to maxsize limit".format(vad_start, vad_end)) return proc_id = "Unknown" try: proc_id = proc.UniqueProcessId proc_layer_name = proc.add_process_layer() except exceptions.InvalidAddressException as excp: vollog.debug("Process {}: invalid address {} in layer {}".format(proc_id, excp.invalid_address, excp.layer_name)) return None proc_layer = context.layers[proc_layer_name] file_name = "pid.{0}.vad.{1:#x}-{2:#x}.dmp".format(proc_id, vad_start, vad_end) try: file_handle = open_method(file_name) chunk_size = 1024 * 1024 * 10 offset = vad_start while offset < vad_end: to_read = min(chunk_size, vad_end - offset) data = proc_layer.read(offset, to_read, pad = True) if not data: break file_handle.write(data) offset += to_read except Exception as excp: vollog.debug("Unable to dump VAD {}: {}".format(file_name, excp)) return return file_handle
def parse_string(structure: interfaces.objects.ObjectInterface, parse_as_pascal: bool = False, size: int = 0) -> str: """Consumes either a c-string or a pascal string depending on the leaf_type.""" if not parse_as_pascal: name = structure.cast("string", max_length = size, encoding = "latin-1") else: name = structure.cast("pascal_string") name = name.string.cast("string", max_length = name.length, encoding = "latin-1") return str(name)
def _walk_iterable( cls, queue: interfaces.objects.ObjectInterface, list_head_member: str, list_next_member: str, next_member: str, max_elements: int = 4096 ) -> Iterable[interfaces.objects.ObjectInterface]: seen = set() # type: Set[int] try: current = queue.member(attr=list_head_member) except exceptions.InvalidAddressException: return while current: if current.vol.offset in seen: break seen.add(current.vol.offset) if len(seen) == max_elements: break if current.is_readable(): yield current try: current = current.member(attr=next_member).member( attr=list_next_member) except exceptions.InvalidAddressException: break
def process_dump( cls, context: interfaces.context.ContextInterface, kernel_table_name: str, pe_table_name: str, proc: interfaces.objects.ObjectInterface, open_method: Type[interfaces.plugins.FileHandlerInterface]) -> interfaces.plugins.FileHandlerInterface: """Extracts the complete data for a process as a FileHandlerInterface Args: context: the context to operate upon kernel_table_name: the name for the symbol table containing the kernel's symbols pe_table_name: the name for the symbol table containing the PE format symbols proc: the process object whose memory should be output open_method: class to provide context manager for opening the file Returns: An open FileHandlerInterface object containing the complete data for the process or None in the case of failure """ file_handle = None try: proc_layer_name = proc.add_process_layer() peb = context.object(kernel_table_name + constants.BANG + "_PEB", layer_name = proc_layer_name, offset = proc.Peb) dos_header = context.object(pe_table_name + constants.BANG + "_IMAGE_DOS_HEADER", offset = peb.ImageBaseAddress, layer_name = proc_layer_name) file_handle = open_method("pid.{0}.{1:#x}.dmp".format(proc.UniqueProcessId, peb.ImageBaseAddress)) for offset, data in dos_header.reconstruct(): file_handle.seek(offset) file_handle.write(data) except Exception as excp: vollog.debug("Unable to dump PE with pid {}: {}".format(proc.UniqueProcessId, excp)) return file_handle
def process_dump( cls, context: interfaces.context.ContextInterface, kernel_table_name: str, pe_table_name: str, proc: interfaces.objects.ObjectInterface ) -> interfaces.plugins.FileInterface: """Extracts the complete data for a process as a FileInterface Args: context: the context to operate upon kernel_table_name: the name for the symbol table containing the kernel's symbols pe_table_name: the name for the symbol table containing the PE format symbols proc: the process object whose memory should be output Returns: A FileInterface object containing the complete data for the process """ proc_id = proc.UniqueProcessId proc_layer_name = proc.add_process_layer() peb = context.object(kernel_table_name + constants.BANG + "_PEB", layer_name=proc_layer_name, offset=proc.Peb) dos_header = context.object(pe_table_name + constants.BANG + "_IMAGE_DOS_HEADER", offset=peb.ImageBaseAddress, layer_name=proc_layer_name) filedata = interfaces.plugins.FileInterface( "pid.{0}.{1:#x}.dmp".format(proc.UniqueProcessId, peb.ImageBaseAddress)) for offset, data in dos_header.reconstruct(): filedata.data.seek(offset) filedata.data.write(data) return filedata
def list_injections( cls, context: interfaces.context.ContextInterface, kernel_layer_name: str, symbol_table: str, proc: interfaces.objects.ObjectInterface ) -> Iterable[Tuple[interfaces.objects.ObjectInterface, bytes]]: """Generate memory regions for a process that may contain injected code. Args: context: The context to retrieve required elements (layers, symbol tables) from kernel_layer_name: The name of the kernel layer from which to read the VAD protections symbol_table: The name of the table containing the kernel symbols proc: an _EPROCESS instance Returns: An iterable of VAD instances and the first 64 bytes of data containing in that region """ proc_id = "Unknown" try: proc_id = proc.UniqueProcessId proc_layer_name = proc.add_process_layer() except exceptions.InvalidAddressException as excp: vollog.debug("Process {}: invalid address {} in layer {}".format( proc_id, excp.invalid_address, excp.layer_name)) return proc_layer = context.layers[proc_layer_name] for vad in proc.get_vad_root().traverse(): protection_string = vad.get_protection( vadinfo.VadInfo.protect_values(context, kernel_layer_name, symbol_table), vadinfo.winnt_protections) write_exec = "EXECUTE" in protection_string and "WRITE" in protection_string # the write/exec check applies to everything if not write_exec: continue if (vad.get_private_memory() == 1 and vad.get_tag() == "VadS") or ( vad.get_private_memory() == 0 and protection_string != "PAGE_EXECUTE_WRITECOPY"): if cls.is_vad_empty(proc_layer, vad): continue data = proc_layer.read(vad.get_start(), 64, pad=True) yield vad, data
def _vnode_name( cls, vnode: interfaces.objects.ObjectInterface) -> Optional[str]: # roots of mount points have special name handling if vnode.v_flag & 1 == 1: v_name = vnode.full_path() else: try: v_name = utility.pointer_to_string(vnode.v_name, 255) except exceptions.InvalidAddressException: v_name = None return v_name
def vad_dump(cls, context: interfaces.context.ContextInterface, layer_name: str, vad: interfaces.objects.ObjectInterface) -> bytes: """ Returns VAD content """ tmp_data = b"" proc_layer = context.layers[layer_name] chunk_size = 1024 * 1024 * 10 offset = vad.get_start() out_of_range = vad.get_start() + vad.get_end() # print("walking from {:x} to {:x} | {:x}".format(offset, out_of_range, out_of_range-offset)) while offset < out_of_range: to_read = min(chunk_size, out_of_range - offset) data = proc_layer.read(offset, to_read, pad=True) if not data: break tmp_data += data offset += to_read return tmp_data
def get_user_name(cls, user: interfaces.objects.ObjectInterface, samhive: registry.RegistryHive) -> Optional[bytes]: V = None for v in user.get_values(): if v.get_name() == 'V': V = samhive.read(v.Data + 4, v.DataLength) if not V: return None name_offset = unpack("<L", V[0x0c:0x10])[0] + 0xCC name_length = unpack("<L", V[0x10:0x14])[0] if name_length > len(V): return None username = V[name_offset:name_offset + name_length] return username
def list_vads(cls, proc: interfaces.objects.ObjectInterface, filter_func: Callable[[interfaces.objects.ObjectInterface], bool] = lambda _: False) -> \ Generator[interfaces.objects.ObjectInterface, None, None]: """Lists the Virtual Address Descriptors of a specific process. Args: proc: _EPROCESS object from which to list the VADs filter_func: Function to take a virtual address descriptor value and return True if it should be filtered out Returns: A list of virtual address descriptors based on the process and filtered based on the filter function """ for vad in proc.get_vad_root().traverse(): if not filter_func(vad): yield vad
def get_vad(task: interfaces.objects.ObjectInterface, address: int): # vad """Creates a map of start/end addresses within a virtual address descriptor tree. Args: task: The EPROCESS object of which to traverse the vad tree Returns: An iterable of tuples containing start and end addresses for each descriptor """ vad_root = task.get_vad_root() for vad in vad_root.traverse(): end = vad.get_end() start = vad.get_start() if end > address >= start: return vad, start, end return None, None, None
def get_vad_maps(task: interfaces.objects.ObjectInterface) -> Iterable[Tuple[int, int]]: """Creates a map of start/end addresses within a virtual address descriptor tree. Args: task: The EPROCESS object of which to traverse the vad tree Returns: An iterable of tuples containing start and end addresses for each descriptor """ vad_root = task.get_vad_root() for vad in vad_root.traverse(): end = vad.get_end() start = vad.get_start() yield (start, end - start)
def array_of_pointers( array: interfaces.objects.ObjectInterface, count: int, subtype: Union[str, interfaces.objects.Template], context: interfaces.context.ContextInterface ) -> interfaces.objects.ObjectInterface: """Takes an object, and recasts it as an array of pointers to subtype.""" symbol_table = array.vol.type_name.split(constants.BANG)[0] if isinstance(subtype, str) and context is not None: subtype = context.symbol_space.get_type(subtype) if not isinstance(subtype, interfaces.objects.Template) or subtype is None: raise TypeError( "Subtype must be a valid template (or string name of an object template)" ) subtype_pointer = context.symbol_space.get_type(symbol_table + constants.BANG + "pointer") subtype_pointer.update_vol(subtype=subtype) return array.cast("array", count=count, subtype=subtype_pointer)
def _get_subkeys_recursive( self, hive: RegistryHive, node: interfaces.objects.ObjectInterface ) -> Iterable[interfaces.objects.ObjectInterface]: """Recursively descend a node returning subkeys.""" # The keylist appears to include 4 bytes of key name after each value # We can either double the list and only use the even items, or # We could change the array type to a struct with both parts try: signature = node.cast('string', max_length=2, encoding='latin-1') except (exceptions.InvalidAddressException, RegistryFormatException): return listjump = None if signature == 'ri': listjump = 1 elif signature == 'lh' or signature == 'lf': listjump = 2 elif node.vol.type_name.endswith(constants.BANG + "_CM_KEY_NODE"): yield node else: vollog.debug( "Unexpected node type encountered when traversing subkeys: {}, signature: {}" .format(node.vol.type_name, signature)) if listjump: node.List.count = node.Count * listjump for subnode_offset in node.List[::listjump]: if (subnode_offset & 0x7fffffff) > hive.maximum_address: vollog.log( constants.LOGLEVEL_VVV, "Node found with address outside the valid Hive size: {}" .format(hex(subnode_offset))) else: try: subnode = hive.get_node(subnode_offset) except (exceptions.InvalidAddressException, RegistryFormatException): vollog.log( constants.LOGLEVEL_VVV, "Failed to get node at {}, skipping".format( hex(subnode_offset))) continue yield from self._get_subkeys_recursive(hive, subnode)
def dump_file_producer( cls, file_object: interfaces.objects.ObjectInterface, memory_object: interfaces.objects.ObjectInterface, open_method: Type[interfaces.plugins.FileHandlerInterface], layer: interfaces.layers.DataLayerInterface, desired_file_name: str ) -> Optional[interfaces.plugins.FileHandlerInterface]: """Produce a file from the memory object's get_available_pages() interface. :param file_object: the parent _FILE_OBJECT :param memory_object: the _CONTROL_AREA or _SHARED_CACHE_MAP :param open_method: class for constructing output files :param layer: the memory layer to read from :param desired_file_name: name of the output file :return: result status """ filedata = open_method(desired_file_name) try: # Description of these variables: # memoffset: offset in the specified layer where the page begins # fileoffset: write to this offset in the destination file # datasize: size of the page # track number of bytes written so we don't write empty files to disk bytes_written = 0 for memoffset, fileoffset, datasize in memory_object.get_available_pages( ): data = layer.read(memoffset, datasize, pad=True) bytes_written += len(data) filedata.seek(fileoffset) filedata.write(data) if not bytes_written: vollog.debug("No data is cached for the file at {0:#x}".format( file_object.vol.offset)) return None else: vollog.debug("Stored {}".format(filedata.preferred_filename)) return filedata except exceptions.InvalidAddressException: vollog.debug("Unable to dump file at {0:#x}".format( file_object.vol.offset)) return None
def save_vacb(self, vacb_obj: interfaces.objects.ObjectInterface, vacb_list: List): data = (int(vacb_obj.BaseAddress), int(vacb_obj.get_file_offset()), self.VACB_BLOCK) vacb_list.append(data)
def process_file_object( cls, context: interfaces.context.ContextInterface, primary_layer_name: str, open_method: Type[interfaces.plugins.FileHandlerInterface], file_obj: interfaces.objects.ObjectInterface) -> Tuple: """Given a FILE_OBJECT, dump data to separate files for each of the three file caches. :param context: the context to operate upon :param primary_layer_name: primary/virtual layer to operate on :param open_method: class for constructing output files :param file_object: the FILE_OBJECT """ # Filtering by these types of devices prevents us from processing other types of devices that # use the "File" object type, such as \Device\Tcp and \Device\NamedPipe. if file_obj.DeviceObject.DeviceType not in [ FILE_DEVICE_DISK, FILE_DEVICE_NETWORK_FILE_SYSTEM ]: vollog.log( constants.LOGLEVEL_VVV, "The file object at {0:#x} is not a file on disk".format( file_obj.vol.offset)) return # Depending on the type of object (DataSection, ImageSection, SharedCacheMap) we may need to # read from the memory layer or the primary layer. memory_layer_name = context.layers[primary_layer_name].config[ 'memory_layer'] memory_layer = context.layers[memory_layer_name] primary_layer = context.layers[primary_layer_name] obj_name = file_obj.file_name_with_device() # This stores a list of tuples, describing what to dump and how to dump it. # Ex: ( # memory_object with get_available_pages() API (either CONTROL_AREA or SHARED_CACHE_MAP), # layer to read from, # file extension to apply, # ) dump_parameters = [] # The DataSectionObject and ImageSectionObject caches are handled in basically the same way. # We carve these "pages" from the memory_layer. for member_name, extension in [("DataSectionObject", "dat"), ("ImageSectionObject", "img")]: try: section_obj = getattr(file_obj.SectionObjectPointer, member_name) control_area = section_obj.dereference().cast("_CONTROL_AREA") if control_area.is_valid(): dump_parameters.append( (control_area, memory_layer, extension)) except exceptions.InvalidAddressException: vollog.log( constants.LOGLEVEL_VVV, "{0} is unavailable for file {1:#x}".format( member_name, file_obj.vol.offset)) # The SharedCacheMap is handled differently than the caches above. # We carve these "pages" from the primary_layer. try: scm_pointer = file_obj.SectionObjectPointer.SharedCacheMap shared_cache_map = scm_pointer.dereference().cast( "_SHARED_CACHE_MAP") if shared_cache_map.is_valid(): dump_parameters.append( (shared_cache_map, primary_layer, "vacb")) except exceptions.InvalidAddressException: vollog.log( constants.LOGLEVEL_VVV, "SharedCacheMap is unavailable for file {0:#x}".format( file_obj.vol.offset)) for memory_object, layer, extension in dump_parameters: cache_name = EXTENSION_CACHE_MAP[extension] desired_file_name = "file.{0:#x}.{1:#x}.{2}.{3}.{4}".format( file_obj.vol.offset, memory_object.vol.offset, cache_name, ntpath.basename(obj_name), extension) file_handle = DumpFiles.dump_file_producer(file_obj, memory_object, open_method, layer, desired_file_name) file_output = "Error dumping file" if file_handle: file_handle.close() file_output = file_handle.preferred_filename yield ( cache_name, format_hints.Hex(file_obj.vol.offset), ntpath.basename( obj_name), # temporary, so its easier to visualize output file_output)
def filter_function(x: interfaces.objects.ObjectInterface) -> bool: return x.get_start() not in [self.config['address']]