def get_mappings(self, start=0, end=2**64): """Enumerate all valid memory ranges. Yields: tuples of (starting virtual address, size) for valid the memory ranges. """ # Pages that hold PDEs and PTEs are 0x1000 bytes each. # Each PDE and PTE is four bytes. Thus there are 0x1000 / 4 = 0x400 # PDEs and PTEs we must test for pde in range(0, 0x400): vaddr = pde << 22 if vaddr > end: return next_vaddr = (pde + 1) << 22 if start > next_vaddr: continue pde_addr = ((self.dtb & 0xfffff000) | (vaddr & 0xffc00000) >> 20) pde_value = self.read_pte(pde_addr) if not pde_value & self.valid_mask: continue # PDE is for a large page. if pde_value & self.page_size_mask: yield addrspace.Run(start=vaddr, end=vaddr + 0x400000, file_offset=(pde_value & 0xffc00000) | (vaddr & 0x3fffff), address_space=self.base) continue # This reads the entire PTE table at once - On # windows where IO is extremely expensive, its # about 10 times more efficient than reading it # one value at the time - and this loop is HOT! pte_table_addr = ((pde_value & 0xfffff000) | ((vaddr & 0x3ff000) >> 10)) data = self.base.read(pte_table_addr, 4 * 0x400) pte_table = struct.unpack("<" + "I" * 0x400, data) tmp1 = vaddr for i, pte_value in enumerate(pte_table): vaddr = tmp1 | i << 12 if vaddr > end: return next_vaddr = tmp1 | ((i + 1) << 12) if start > next_vaddr: continue if pte_value & self.valid_mask: yield addrspace.Run(start=vaddr, end=vaddr + 0x1000, file_offset=(pte_value & 0xfffff000) | (vaddr & 0xfff), address_space=self.base)
def get_all_pages_x86(self, start=0, end=2**64): for pde_vaddr, pde_value, pde_addr in self.get_available_PDEs_x86( start, end): if pde_value & self.proc_as.valid_mask and \ pde_value & self.proc_as.page_size_mask: # large page phys_offset = (pde_value & 0xfffffffe00000) | (pde_vaddr & 0x1fffff) yield addrspace.Run(start=pde_vaddr, end=pde_vaddr + self.LARGE_PAGE_SIZE, file_offset=phys_offset, address_space=self.proc_as.base, data={ 'pte_value': pde_value, 'is_proto': False, 'pte_addr': pde_addr }) continue for vaddr, pte_value, pte_addr in self.get_available_PTEs_x86( pde_vaddr, pde_value, start, end): phys_offset = \ self._get_phys_addr_from_pte(vaddr, pte_value) yield addrspace.Run(start=vaddr, end=vaddr + self.PAGE_SIZE, file_offset=phys_offset, address_space=self.proc_as.base, data={ 'pte_value': pte_value, 'is_proto': False, 'pte_addr': pte_addr, 'pfn': pfn })
def _generate_coarse_page_table_addresses(self, base_vaddr, coarse_page_base): vaddr = base_vaddr while vaddr < base_vaddr + (1 << 20): l2_addr = (coarse_page_base | (vaddr & self.l2_table_index_mask) >> 10) l2_descriptor = self.read_long_phys(l2_addr) l2_descriptor_type = l2_descriptor & 0b11 # 64kb Large (coarse) page table. if l2_descriptor_type == 0b01: yield addrspace.Run(start=vaddr, end=vaddr + (1 << 16), file_offset=l2_descriptor & self.large_page_base_address_mask, address_space=self.base) vaddr += 1 << 16 continue # 4kb small page. if l2_descriptor_type == 0b10 or l2_descriptor_type == 0b11: yield addrspace.Run(start=vaddr, end=vaddr + (1 << 12), file_offset=l2_descriptor & self.small_page_base_address_mask, address_space=self.base) vaddr += 1 << 12 continue # Invalid page. if l2_descriptor_type == 0b00: vaddr += 1 << 10 continue raise RuntimeError("Unreachable")
def get_all_pages(self, start=0, end=2**64): """Simply enumerates all Paging structures and returns the virtual address and, if possible, the PFN. Yields Run objects for all available ranges in the virtual address space. """ for pdpte_vaddr, pdpte_value, pdpte_addr in self._get_available_PDPTEs( start, end): if pdpte_vaddr & self.proc_as.valid_mask and \ pdpte_value & self.proc_as.page_size_mask: # huge page (1 GB) phys_offset = ((pdpte_value & 0xfffffc0000000) | (pdpte_vaddr & 0x3fffffff)) yield addrspace.Run(start=pdpte_vaddr, end=pdpte_vaddr + self.HUGE_PAGE_SIZE, file_offset=phys_offset, address_space=self.proc_as.base, data={ 'pte_value': pdpte_value, 'is_proto': False, 'pte_addr': pdpte_addr }) continue for pde_vaddr, pde_value, pde_addr in self._get_available_PDEs( pdpte_vaddr, pdpte_value, start, end): if pde_value & self.proc_as.valid_mask and \ pde_value & self.proc_as.page_size_mask: # large page phys_offset = ((pde_value & 0xfffffffe00000) | (pde_vaddr & 0x1fffff)) yield addrspace.Run(start=pde_vaddr, end=pde_vaddr + self.LARGE_PAGE_SIZE, file_offset=phys_offset, address_space=self.proc_as.base, data={ 'pte_value': pde_value, 'is_proto': False, 'pte_addr': pde_addr }) continue for vaddr, pte_value, pte_addr in self._get_available_PTEs( pde_value, pde_vaddr, start, end): phys_offset = \ self._get_phys_addr_from_pte(vaddr, pte_value) yield addrspace.Run(start=vaddr, end=vaddr + self.PAGE_SIZE, file_offset=phys_offset, address_space=self.proc_as.base, data={ 'pte_value': pte_value, 'is_proto': False, 'pte_addr': pte_addr })
def generate_memory_ranges(self): """Parse the plugin args and generate memory ranges. Yields rekall.addrspace.Run objects. """ if not self.scan_specification_requested(): # Copy the plugin defaults into the args. for k in self.plugin_args: if k.startswith("scan_"): self.plugin_args[k] = self.scanner_defaults.get(k, False) # Physical address space requested. if self.plugin_args.scan_physical: yield addrspace.Run( start=0, end=self.session.physical_address_space.end(), address_space=self.session.physical_address_space, data=dict(type="PhysicalAS")) # Scan all of the kernel address space. if self.plugin_args.scan_kernel: yield addrspace.Run( start=0, end=self.session.kernel_address_space.end(), address_space=self.session.kernel_address_space, data=dict(type="KernelAS")) # Scan the complete process memory, not including the kernel. if self.plugin_args.scan_process_memory: # We use direct inheritance here so we can support process # selectors. for task in self.filter_processes(): cc = self.session.plugins.cc() with cc: # Switch to the process address space. cc.SwitchProcessContext(task) end = self.session.GetParameter("highest_usermode_address") resolver = self.session.address_resolver for module in sorted(resolver.GetAllModules(), key=lambda x: x.start): # Skip modules in kernel space. if module.start > end: break comment = "%s (%s), %s" % (task.name, task.pid, module.name) self.session.logging.info( "Scanning %s (%s) in: %s [%#x-%#x]" % (task.name, task.pid, comment, module.start, module.end)) yield addrspace.Run( start=module.start, end=module.end, address_space=self.session.default_address_space, data=dict(type=comment, module=module, task=task))
def get_mappings(self, start=0, end=2**64): """Generate all valid addresses. Note that ARM requires page table entries for large sections to be duplicated (e.g. a supersection first_level_descriptor must be duplicated 16 times). We don't actually check for this here. """ vaddr = 0 while vaddr < end: l1_descriptor = self.read_long_phys(self.dtb | ( (vaddr & self.table_index_mask) >> 18)) l1_descriptor_type = l1_descriptor & 0b11 # Page is invalid, skip the entire range. if l1_descriptor_type == 0b00: vaddr += 1 << 20 continue if l1_descriptor_type == 0b10: # A valid super section is 16mb (1<<24) large. if l1_descriptor & self.super_section_mask: yield addrspace.Run( start=vaddr, end=vaddr + (1 << 24), file_offset=(l1_descriptor & self.super_section_base_address_mask), address_space=self.base) vaddr += 1 << 24 continue # Regular sections is 1mb large. yield addrspace.Run(start=vaddr, end=vaddr + (1 << 20), file_offset=l1_descriptor & self.section_base_address_mask, address_space=self.base) vaddr += 1 << 20 continue # Coarse page table contains a secondary fetch summing up to 1Mb. if l1_descriptor_type == 0b01: for x in self._generate_coarse_page_table_addresses( vaddr, l1_descriptor & self.coarse_page_table_base_address_mask): yield x vaddr += 1 << 20 continue raise RuntimeError("Unreachable")
def get_executable_pages(self, start=0, end=2**64): """Enumerate all available ranges for executable pages. Yields Run objects for all available ranges in the virtual address space. """ for pdpte_vaddr, pdpte_value, _ in self._get_available_PDPTEs( start, end): if pdpte_vaddr & self.proc_as.valid_mask and \ pdpte_value & self.proc_as.page_size_mask: # huge page (1 GB) if not pdpte_value & self.nx_mask: yield addrspace.Run( start=pdpte_vaddr, end=pdpte_vaddr + self.HUGE_PAGE_SIZE, file_offset=((pdpte_value & 0xfffffc0000000) | (pdpte_vaddr & 0x3fffffff)), address_space=self.proc_as.base, data={ 'pte_value': pdpte_value, 'is_proto': False }) continue for pde_vaddr, pde_value, _ in self._get_available_PDEs( pdpte_vaddr, pdpte_value, start, end): if pde_value & self.proc_as.valid_mask and \ pde_value & self.proc_as.page_size_mask: # large page if not pde_value & self.nx_mask: yield addrspace.Run( start=pde_vaddr, end=pde_vaddr + self.LARGE_PAGE_SIZE, file_offset=(pde_value & 0xfffffffe00000) | (pde_vaddr & 0x1fffff), address_space=self.proc_as.base, data={ 'pte_value': pde_value, 'is_proto': False }) continue for vaddr, pte_value, _ in self._get_available_PTEs( pde_value, pde_vaddr, start, end): run = self.is_page_executable(vaddr, pte_value) if run: yield run
def _get_available_PTEs(self, pte_table, vaddr, start=0): """Scan the PTE table and yield address ranges which are valid.""" tmp = vaddr for i in xrange(0, len(pte_table)): pfn = i << 12 pte_value = pte_table[i] vaddr = tmp | pfn next_vaddr = tmp | ((i + 1) << 12) if start >= next_vaddr: continue # A PTE value of 0 means to consult the vad, but the vad shows no # mapping at this virtual address, so we can just skip this PTE in # the iteration. if self.vad: start, end, run = self.vad.get_containing_range(vaddr) if pte_value == 0 and start is None: continue elif pte_value == 0: continue phys_addr = self._get_phys_addr_from_pte(vaddr, pte_value) # Only yield valid physical addresses. This will skip DemandZero # pages and File mappings into the filesystem. if phys_addr is not None: yield addrspace.Run(start=vaddr, end=vaddr + 0x1000, file_offset=phys_addr, address_space=self.base)
def _get_available_PTEs(self, pte_table, vaddr, start=0): """Returns PFNs for each PTE entry.""" tmp3 = vaddr for i, pte_value in enumerate(pte_table): # Each of the PTE values has to be translated back to a PFN, since # they are MFNs. pte_value = self.m2p(pte_value) # When no translation was found, we skip the PTE, since we don't # know where it's pointing to. if pte_value == None: continue if not pte_value & self.valid_mask: continue vaddr = tmp3 | i << 12 next_vaddr = tmp3 | ((i + 1) << 12) if start >= next_vaddr: continue yield addrspace.Run(start=vaddr, end=vaddr + 0x1000, file_offset=( pte_value & 0xffffffffff000) | ( vaddr & 0xfff), address_space=self.base)
def vtop_run(self, addr): phys_addr = self.vtop(addr) if phys_addr is not None: return addrspace.Run(start=addr, end=addr, file_offset=phys_addr, address_space=self.base)
def get_mappings(self, start=0, end=2**64): for run_pfn in sorted(self.runs): start = run_pfn * self.PAGE_SIZE run = addrspace.Run(start=start, end=start + self.PAGE_SIZE, file_offset=self.vtop(start), address_space=self.base) yield run
def get_mappings(self, start=0, end=2**64): """Enumerate all available ranges. Yields Run objects for all available ranges in the virtual address space. """ # Pages that hold PDEs and PTEs are 0x1000 bytes each. # Each PDE and PTE is eight bytes. Thus there are 0x1000 / 8 = 0x200 # PDEs and PTEs we must test. for pml4e_index in range(0, 0x200): vaddr = pml4e_index << 39 if vaddr > end: return next_vaddr = (pml4e_index + 1) << 39 if start >= next_vaddr: continue pml4e_addr = ((self.get_pml4() & 0xffffffffff000) | ((vaddr & 0xff8000000000) >> 36)) pml4e_value = self.read_pte(pml4e_addr) if not pml4e_value & self.valid_mask: continue tmp1 = vaddr for pdpte_index in range(0, 0x200): vaddr = tmp1 + (pdpte_index << 30) if vaddr > end: return next_vaddr = tmp1 + ((pdpte_index + 1) << 30) if start >= next_vaddr: continue # Bits 51:12 are from the PML4E # Bits 11:3 are bits 38:30 of the linear address pdpte_addr = ((pml4e_value & 0xffffffffff000) | ((vaddr & 0x7FC0000000) >> 27)) pdpte_value = self.read_pte(pdpte_addr) if not pdpte_value & self.valid_mask: continue # 1 gig page. if pdpte_value & self.page_size_mask: yield addrspace.Run( start=vaddr, end=vaddr + 0x40000000, file_offset=((pdpte_value & 0xfffffc0000000) | (vaddr & 0x3fffffff)), address_space=self.base) continue for x in self._get_available_PDEs(vaddr, pdpte_value, start, end): yield x
def ParseIOMap(string): result = {} line_re = re.compile("([0-9a-f]+)-([0-9a-f]+)\s*:\s*(.+)") for line in string.splitlines(): m = line_re.search(line) if m: result.setdefault(m.group(3), []).append( addrspace.Run(start=int("0x" + m.group(1), 16), end=int("0x" + m.group(2), 16))) else: raise IOError("Unable to parse iomap") return result
def ParseIOMap(string): result = {} line_re = re.compile("([0-9a-f]+)-([0-9a-f]+)\s*:\s*(.+)") for line in string.splitlines(): m = line_re.search(line) if m: result.setdefault(m.group(3), []).append( addrspace.Run(start=int("0x" + m.group(1), 16), end=int("0x" + m.group(2), 16))) else: import pdb pdb.set_trace() return result
def _get_available_PDEs(self, vaddr, pdpte_value, start, end): # This reads the entire PDE table at once - On # windows where IO is extremely expensive, its # about 10 times more efficient than reading it # one value at the time - and this loop is HOT! pde_table_addr = self._get_pde_addr(pdpte_value, vaddr) if pde_table_addr is None: return data = self.base.read(pde_table_addr, 8 * 0x200) pde_table = struct.unpack("<" + "Q" * 0x200, data) tmp2 = vaddr for pde_index in range(0, 0x200): vaddr = tmp2 | (pde_index << 21) if vaddr > end: return next_vaddr = tmp2 | ((pde_index + 1) << 21) if start >= next_vaddr: continue pde_value = self.m2p(pde_table[pde_index]) if pde_value & self.valid_mask and pde_value & self.page_size_mask: yield addrspace.Run(start=vaddr, end=vaddr + 0x200000, file_offset=(pde_value & 0xfffffffe00000) | (vaddr & 0x1fffff), address_space=self.base) continue # This reads the entire PTE table at once - On # windows where IO is extremely expensive, its # about 10 times more efficient than reading it # one value at the time - and this loop is HOT! pte_table_addr = self._get_pte_addr(vaddr, pde_value) # Invalid PTEs. if pte_table_addr is None: continue data = self.base.read(pte_table_addr, 8 * 0x200) pte_table = struct.unpack("<" + "Q" * 0x200, data) for x in self._get_available_PTEs(pte_table, vaddr, start=start, end=end): yield x
def _get_available_PTEs(self, pte_table, vaddr, start=0): tmp3 = vaddr for i, pte_value in enumerate(pte_table): if not pte_value & self.valid_mask: continue vaddr = tmp3 | i << 12 next_vaddr = tmp3 | ((i + 1) << 12) if start >= next_vaddr: continue yield addrspace.Run(start=vaddr, end=vaddr + 0x1000, file_offset=(pte_value & 0xffffffffff000) | (vaddr & 0xfff), address_space=self.base)
def get_mappings(self, start=0): for run in super(RunListAddressSpace, self).get_mappings(start=start): if start > run.end: continue length = run.length # When the run is compressed it really contains an entire # compression unit. if run.data.get("compression"): length = self.compression_unit_size length = min(run.length, self.end() - run.start) if length > 0: yield addrspace.Run(start=run.start, end=run.start + length, address_space=run.address_space, file_offset=run.file_offset)
def get_executable_pages_x86(self, start=0, end=2**64): for pde_vaddr, pde_value, _ in self.get_available_PDEs_x86(start, end): if pde_value & self.proc_as.valid_mask and \ pde_value & self.proc_as.page_size_mask: if not pde_value & self.nx_mask: yield addrspace.Run( start=pde_vaddr, end=pde_vaddr + self.LARGE_PAGE_SIZE, file_offset=(pde_value & 0xfffffffe00000) | (vaddr & 0x1fffff), address_space=self.proc_as.base) continue for vaddr, pte_value, _ in self.get_available_PTEs_x86( pde_vaddr, pde_value, start, end): run = self.is_page_executable(vaddr, pte_value) if run: yield run
def collect(self): count = 0 for path in self.plugin_args.paths: file_info = common.FileFactory(path, session=self.session) run = addrspace.Run(start=0, end=file_info.st_size, file_offset=0, address_space=standard.FDAddressSpace( session=self.session, fhandle=file_info.open())) for rule, address, _, _ in self.generate_hits(run): count += 1 if count >= self.plugin_args.hits: break yield (file_info, rule, address, utils.HexDumpedString( run.address_space.read( address - self.plugin_args.pre_context, self.plugin_args.context + self.plugin_args.pre_context)), None)
def get_mappings(self, start=0): yield addrspace.Run(start=0, end=self.ewf_file.size, file_offset=0, address_space=self)
def get_mappings(self): yield addrspace.Run(start=0, end=self.fsize, file_offset=0, address_space=self.base)
def get_mappings(self, start=0, end=2**64): """A generator of address, length tuple for all valid memory regions.""" # Pages that hold PDEs and PTEs are 0x1000 bytes each. # Each PDE and PTE is eight bytes. Thus there are 0x1000 / 8 = 0x200 # PDEs and PTEs we must test. for pdpte_index in range(0, 4): vaddr = pdpte_index << 30 if vaddr > end: return next_vaddr = (pdpte_index + 1) << 30 if start >= next_vaddr: continue # Bits 31:5 come from CR3 # Bits 4:3 come from bits 31:30 of the original linear address pdpte_addr = (self.dtb & 0xffffffe0) | ((vaddr & 0xc0000000) >> 27) pdpte_value = self.read_pte(pdpte_addr) if not pdpte_value & self.valid_mask: continue tmp1 = vaddr for pde_index in range(0, 0x200): vaddr = tmp1 | (pde_index << 21) if vaddr > end: return next_vaddr = tmp1 | ((pde_index + 1) << 21) if start >= next_vaddr: continue # Bits 51:12 are from the PDPTE # Bits 11:3 are bits 29:21 of the linear address pde_addr = ((pdpte_value & 0xffffffffff000) | ((vaddr & 0x3fe00000) >> 18)) pde_value = self.read_pte(pde_addr) if not pde_value & self.valid_mask: continue if pde_value & self.page_size_mask: yield addrspace.Run( start=vaddr, end=vaddr + 0x200000, file_offset=(pde_value & 0xfffffffe00000) | (vaddr & 0x1fffff), address_space=self.base) continue # This reads the entire PTE table at once - On # windows where IO is extremely expensive, its # about 10 times more efficient than reading it # one value at the time - and this loop is HOT! pte_table_addr = ((pde_value & 0xffffffffff000) | ((vaddr & 0x1ff000) >> 9)) data = self.base.read(pte_table_addr, 8 * 0x200) pte_table = struct.unpack("<" + "Q" * 0x200, data) tmp2 = vaddr for i, pte_value in enumerate(pte_table): if pte_value & self.valid_mask: vaddr = tmp2 | i << 12 if vaddr > end: return next_vaddr = tmp2 | (i + 1) << 12 if start >= next_vaddr: continue yield addrspace.Run( start=vaddr, end=vaddr + 0x1000, file_offset=((pte_value & 0xffffffffff000) | (vaddr & 0xfff)), address_space=self.base)
def is_page_executable(self, vaddr, pte_value): """This function returns a Run object for pages that are executable. It will, however, skip pages that have not yet been accessed, even if they would be executable once accessed.""" executable = False phys_addr = None if self._is_demand_zero_pte(pte_value): return None # active page if pte_value & self.valid_mask: if not pte_value & self.nx_mask: pfn = ((pte_value & self.hard_pfn_mask) >> self.hard_pfn_start) phys_addr = (pfn << self.hard_pfn_start | (vaddr & 0xfff)) executable = True else: return None # proto-pointer elif pte_value & self.prototype_mask: proto_address = ((self.proto_protoaddress_mask & pte_value) >> self.proto_protoaddress_start) if (proto_address == self.proto_pointer_identifier): protection_value = ((pte_value & self.soft_protection_mask) >> self.soft_protection_start) # We observed this state for mapped data files # with no COPY-ON-WRITE. # As it is unusual to have a data file mapped with # executable rights, we report these. if protection_value in self._executable_choices: # Gathering the physical address this way at this point # is inefficient, as it traverses the page tables again, # but since this state is reached very seldom, we use this # lazy approach for now. phys_addr = \ self.task_as._get_phys_addr_from_pte( vaddr, pte_value) executable = True else: return None # in this case, we have to analyze the prototype PTE else: protection_value = ((pte_value & self.proto_protection_mask) >> self.proto_protection_start) if protection_value in self._executable_choices: phys_addr = \ self.task_as._get_phys_addr_from_pte(vaddr, pte_value) executable = True else: proto_value = self.task_as.read(proto_address,8) proto_value = struct.unpack('<Q', proto_value)[0] return self.is_page_executable_proto(vaddr, proto_value) # in transition elif pte_value & self.transition_mask: if ((pte_value & self.soft_protection_mask) >> self.soft_protection_start) in \ self._executable_choices: pfn = ((pte_value & self.trans_pfn_mask) >> self.trans_pfn_start) phys_addr = (pfn << self.trans_pfn_start | vaddr & 0xfff) executable = True else: return None # pagefile PTE elif pte_value & self.soft_pagefilehigh_mask: if ((pte_value & self.soft_protection_mask) >> self.soft_protection_start) in \ self._executable_choices: pagefile_address = \ (((pte_value & self.soft_pagefilehigh_mask) >> self.soft_pagefilehigh_start) * 0x1000 + (vaddr & 0xFFF)) pagefile_num = \ ((pte_value & self.soft_pagefilelow_mask) >> self.soft_pagefilelow_start) # TODO Verify the phys_addr part. Rekall's pagefile support # seems to be broken at the moment. # If the pagefile support doesn't work, the actual content # can't be read but the plugin will still report the page. phys_addr = \ self.task_as._get_pagefile_mapped_address(pagefile_num, pagefile_address) executable = True else: return None if executable: return addrspace.Run(start=vaddr, end=vaddr + self.PAGE_SIZE, file_offset=phys_addr, address_space=self.task_as.base, data={'pte_value': pte_value, 'proto': False}) # unknown state self.session.logging.warning( "Unknown PTE value: 0x{:x}".format(pte_value)) return None
def generate_memory_ranges(self): for run in super(WinScanner, self).generate_memory_ranges(): run.length = min(run.length, self.plugin_args.limit) yield run # If the user did not just ask to scan the entire kernel space, support # dividing the kernel space into subregions. if not self.plugin_args.scan_kernel: regions = list(self.session.plugins.virt_map()) # Scan session pools in each process. if self.plugin_args.scan_kernel_session_pools: pools_plugin = self.session.plugins.pools() for desc in pools_plugin.find_session_pool_descriptors(): comment = desc.Comment self.session.logging.info( "Scanning in: %s. [%#x-%#x]" % (comment, desc.PoolStart, desc.PoolEnd)) run = addrspace.Run(start=desc.PoolStart, end=desc.PoolEnd, address_space=desc.obj_vm, data=dict(type=comment)) run.length = min(run.length, self.plugin_args.limit) yield run # Non paged pool selection. if self.plugin_args.scan_kernel_nonpaged_pool: for region in regions: type = utils.SmartUnicode(region["type"]) if "NonPagedPool" not in type: continue comment = "Pool %s" % type self.session.logging.info( "Scanning in: %s. [%#x-%#x]" % (comment, region["virt_start"], region["virt_end"])) run = addrspace.Run( start=region["virt_start"], end=region["virt_end"], address_space=self.session.kernel_address_space, data=dict(type=comment)) run.length = min(run.length, self.plugin_args.limit) yield run if self.plugin_args.scan_kernel_paged_pool: for region in regions: if "PagedPool" != region["type"]: continue comment = "Pool %s" % region["type"] self.session.logging.info( "Scanning in: %s [%#x-%#x]" % (comment, region["virt_start"], region["virt_end"])) run = addrspace.Run( start=region["virt_start"], end=region["virt_end"], address_space=self.session.kernel_address_space, data=dict(type=comment)) run.length = min(run.length, self.plugin_args.limit) yield run if self.plugin_args.scan_kernel_code: cc = self.session.plugins.cc() with cc: cc.SwitchProcessContext(None) for module in self.session.address_resolver.GetAllModules( ): comment = "Module %s" % module.name self.session.logging.info( "Scanning in: %s [%#x-%#x]" % (comment, module.start, module.end)) run = addrspace.Run( start=module.start, end=module.end, address_space=self.session.kernel_address_space, data=dict(type=comment, module=module)) run.length = min(run.length, self.plugin_args.limit) yield run run.length = min(run.length, self.plugin_args.limit) yield run
def is_page_executable_proto(self, vaddr, pte_value): """This function returns a Run object for pages that are executable. It will, however, skip pages that have not yet been accessed, even if they would be executable once accessed.""" executable = False phys_addr = None if self._is_demand_zero_pte(pte_value): return None # active page if pte_value & self.valid_mask: if not pte_value & self.nx_mask: pfn = ((pte_value & self.hard_pfn_mask) >> self.hard_pfn_start) phys_addr = (pfn << self.hard_pfn_start | (vaddr & 0xfff)) executable = True else: return None # subsection elif pte_value & self.prototype_mask: if ((pte_value & self.soft_protection_mask) >> self.soft_protection_start) in \ self._executable_choices: phys_addr = \ self._get_subsection_mapped_address(pte_value, isAddress=False) executable = True else: return None # in transition elif pte_value & self.transition_mask: if ((pte_value & self.soft_protection_mask) >> self.soft_protection_start) in \ self._executable_choices: pfn = ((pte_value & self.trans_pfn_mask) >> self.trans_pfn_start) phys_addr = (pfn << self.trans_pfn_start | vaddr & 0xfff) executable = True else: return None # pagefile PTE elif pte_value & self.soft_pagefilehigh_mask: if ((pte_value & self.soft_protection_mask) >> self.soft_protection_start) in \ self._executable_choices: pagefile_address = \ (((pte_value & self.soft_pagefilehigh_mask) >> self.soft_pagefilehigh_start) * 0x1000 + (vaddr & 0xFFF)) pagefile_num = \ ((pte_value & self.soft_pagefilelow_mask) >> self.soft_pagefilelow_start) # TODO Verify the phys_addr part. Rekall's pagefile support # seems to be broken at the moment. # If the pagefile support doesn't work, the actual content # can't be read but the plugin will still report the page. phys_addr = \ self.task_as._get_pagefile_mapped_address(pagefile_num, pagefile_address) executable = True else: return None if executable: return addrspace.Run(start=vaddr, end=vaddr + self.PAGE_SIZE, file_offset=phys_addr, address_space=self.task_as.base, data={'pte_value': pte_value, 'proto': True}) # unknown state self.session.logging.warning( "Unknown PTE value: 0x{:x}".format(pte_value)) return None
def get_mappings(self, start=0, end=2**64): yield addrspace.Run(start=self.min_addr, end=self.max_addr, file_offset=0, address_space=self)
def get_mappings(self, start=0, end=2 ** 64): yield addrspace.Run(start=0, end=self.vmi.get_memsize(), file_offset=0, address_space=self)
def generate_memory_ranges(self): for run in super(WinScanner, self).generate_memory_ranges(): yield run pools_seen = set() # If the user did not just ask to scan the entire kernel space, support # dividing the kernel space into subregions. if not self.plugin_args.scan_kernel: pool_plugin = self.session.plugins.pools() # Scan session pools in each process. if self.plugin_args.scan_kernel_session_pools: for pool in pool_plugin.find_session_pool_descriptors(): if pool.PoolStart in pools_seen: continue pools_seen.add(pool.PoolStart) comment = "%s" % pool.PoolType if pool.Comment: comment += " (%s)" % pool.Comment self.session.logging.info( "Scanning in: %s. [%#x-%#x]" % (comment, pool.PoolStart, pool.PoolEnd)) yield addrspace.Run(start=pool.PoolStart, end=pool.PoolEnd, address_space=pool.obj_vm, data=dict(type=comment, pool=pool)) # Non paged pool selection. if self.plugin_args.scan_kernel_nonpaged_pool: for pool in pool_plugin.find_non_paged_pool(): if pool.PoolStart in pools_seen: continue pools_seen.add(pool.PoolStart) comment = "Pool %s" % pool.PoolType if pool.Comment: comment += " (%s)" % pool.Comment self.session.logging.info( "Scanning in: %s. [%#x-%#x]" % (comment, pool.PoolStart, pool.PoolEnd)) yield addrspace.Run(start=pool.PoolStart, end=pool.PoolEnd, address_space=pool.obj_vm, data=dict(type=comment, pool=pool)) if self.plugin_args.scan_kernel_paged_pool: for pool in pool_plugin.find_paged_pool(): if pool.PoolStart in pools_seen: continue pools_seen.add(pool.PoolStart) comment = "Pool %s" % pool.PoolType if pool.Comment: comment += " (%s)" % pool.Comment self.session.logging.info( "Scanning in: %s [%#x-%#x]" % (comment, pool.PoolStart, pool.PoolEnd)) yield addrspace.Run(start=pool.PoolStart, end=pool.PoolEnd, address_space=pool.obj_vm, data=dict(type=comment, pool=pool)) if self.plugin_args.scan_kernel_code: cc = self.session.plugins.cc() with cc: cc.SwitchProcessContext(None) for module in self.session.address_resolver.GetAllModules( ): comment = "Module %s" % module.name self.session.logging.info( "Scanning in: %s [%#x-%#x]" % (comment, module.start, module.end)) yield addrspace.Run( start=module.start, end=module.end, address_space=self.session.kernel_address_space, data=dict(type=comment, module=module))
def get_mappings(self, start=0, end=2**64): _ = end yield addrspace.Run(start=0, end=self.fsize, file_offset=0, address_space=self)
def get_mappings(self, start=0, end=2**64): _ = end yield addrspace.Run(start=0, end=panda.memory_size(), file_offset=0, address_space=self)