def __init__(self, **kwargs): super(_HEAP_ENTRY, self).__init__(**kwargs) encoding = self.obj_context.get("Encoding") if encoding: heap_as = self.obj_context["HeapAS"] self.obj_vm = addrspace.BufferAddressSpace( session=self.obj_session, base_offset=self.obj_offset, data=utils.XOR(heap_as.read(self.obj_offset, self.obj_size), encoding))
def _msv_rpce_credentials(self, data): vm = addrspace.BufferAddressSpace(data=data, session=self.session) cred_obj = self.Object('_RPCE_CREDENTIAL_KEYCREDENTIAL', profile=self, vm=vm) # This seems to be corrupt sometimes. if cred_obj.unk0 > 10: return for i in range(0, cred_obj.unk0): yield (cred_obj.key[i].unkId, cred_obj.key_data[i].data.v().encode('hex'))
def __init__(self, session, address_space, start, end, buffer_size=constants.SCAN_BLOCKSIZE, overlap_length=0): self.start = start self.end = end self._generator = address_space.merge_base_ranges(start=start, end=end) self.buffer_as = addrspace.BufferAddressSpace(session=session) self.buffer_size = buffer_size self.readptr = start self.overlap_length = overlap_length self.overlap = "" self.current_run = None self.finished = False
def calculate(self): table_offset = self.session.profile.get_constant( "ObpInfoMaskToOffset", True) # We use a temporary buffer for the object to save reads of the image. cached_vm = addrspace.BufferAddressSpace( data=self.session.kernel_address_space.read(table_offset, 0x100), session=self.session) return [ int(x) for x in self.session.profile.Array( target="byte", vm=cached_vm, count=0x100) ]
def testArray(self): # Create an address space from a buffer for testing address_space = addrspace.BufferAddressSpace( config=None, data="abcdefghijklmnopqrstuvwxyz") profile = obj.Profile.classes['Profile32Bits']() test = profile.Object("Array", vm=address_space, offset=0, target="int", count=0) self.assertEqual(test[0], 0x64636261) self.assertEqual(test[1], 0x68676665) # Can read past the end of the array but this returns a None object. self.assertEqual(test[100], None)
def data(self): """Returns an address space representing the data of this attribute.""" if self.resident: return addrspace.BufferAddressSpace(data=self.obj_vm.read( self.obj_offset + self.content_offset, self.content_size), session=self.obj_session) else: # Create a defragmented address space. address_space = RunListAddressSpace( run_list=list(self.RunList()), base=self.obj_session.physical_address_space, session=self.obj_session) return address_space
def pmem_parse_mmap(fd): """Retrieve and parse the physical memory map from the Pmem driver. Yields: tuples of (start, number of pages, type) """ mmap, size, desc_size = pmem_get_mmap(fd) session = rekall_session.Session() buffer_as = addrspace.BufferAddressSpace(data=mmap.raw, session=session) session.SetCache("default_address_space", buffer_as) profile = EFIProfile(session=session) for descriptor in profile.Array(offset=0, target="EFI_MEMORY_DESCRIPTOR", size=size): yield (descriptor.PhysicalStart, descriptor.NumberOfPages, descriptor.Type)
def __init__(self, type_name=None, expected_values=None, *args, **kwargs): super(FastStructScanner, self).__init__(*args, **kwargs) self.type_name = type_name self.expected_values = expected_values self.prototype = self.profile.Object( type_name=type_name, vm=addrspace.BufferAddressSpace( session=self.session, data=b"\x00" * self.profile.get_obj_size(type_name))) if not self.checks: self.checks = [] elif isinstance(self.checks, tuple): # We need the checks array to be mutable. self.checks = list(self.checks) for array_idx, struct_members in enumerate(self.expected_values): self.checks.extend(self.build_checks(array_idx, struct_members))
def __init__(self, profile=None, address_space=None, window_size=8, session=None): """The base scanner. Args: profile: The kernel profile to use for this scan. address_space: The address space we use for scanning. window_size: The size of the overlap window between each buffer read. """ self.session = session or address_space.session self.address_space = address_space self.window_size = window_size self.constraints = None self.profile = profile or self.session.profile self.max_length = None self.base_offset = None self.scan_buffer_offset = None self.buffer_as = addrspace.BufferAddressSpace(session=self.session)
def __init__(self, *args, **kwargs): address_map = kwargs.pop("address_map", None) super(Dump, self).__init__(*args, **kwargs) self.offset = self.plugin_args.offset # default width can be set in the session. self.width = (self.plugin_args.width or self.session.GetParameter("hexdump_width", 16)) self.rows = (self.plugin_args.rows or self.session.GetParameter("paging_limit", 30)) self.address_map = address_map or AddressMap() if self.plugin_args.data: self.plugin_args.address_space = addrspace.BufferAddressSpace( data=self.plugin_args.data, session=self.session) if self.plugin_args.length is None: self.plugin_args.length = len(self.plugin_args.data)
def IterObject(self, type=None): """Gets the _OBJECT_HEADER considering optional headers.""" pool_align = self.obj_profile.get_constant("PoolAlignment") allocation_size = self.BlockSize * pool_align # Operate on a cached version of the next page. # We use a temporary buffer for the object to save reads of the image. cached_data = self.obj_vm.read(self.obj_offset + self.obj_size, allocation_size) cached_vm = addrspace.BufferAddressSpace(data=cached_data, session=self.obj_session) # We search for the _OBJECT_HEADER.InfoMask in close proximity to our # object. We build a lookup table between the values in the InfoMask and # the minimum distance there is between the start of _OBJECT_HEADER and # the end of _POOL_HEADER. This way we can quickly skip unreasonable # values. for i in range(0, allocation_size, pool_align): # Create a test object header from the cached vm to test for # validity. test_object = self.obj_profile._OBJECT_HEADER(offset=i, vm=cached_vm) optional_preamble = max(test_object.NameInfoOffset, test_object.HandleInfoOffset, test_object.QuotaInfoOffset) # Obviously wrong because we need more space than we have. if optional_preamble > i: continue if test_object.is_valid(): if type is not None and test_object.get_object_type() != type: continue yield self.obj_profile._OBJECT_HEADER( offset=i + self.obj_offset + self.obj_size, vm=self.obj_vm, parent=self)
def __init__(self, profile=None, address_space=None, window_size=8, session=None, checks=None): """The base scanner. Args: profile: The profile to use for this scan. address_space: The address space we use for scanning. window_size: The size of the overlap window between each buffer read. """ self.session = session or address_space.session self.address_space = address_space or self.session.default_address_space self.window_size = window_size self.constraints = None if profile is None and self.session.HasParameter("profile"): profile = self.session.profile self.profile = profile self.max_length = None self.base_offset = None self.scan_buffer_offset = None self.buffer_as = addrspace.BufferAddressSpace(session=self.session) if checks is not None: self.checks = checks
def __init__(self, offset=0, address_space=None, data=None, length=None, width=None, rows=None, suppress_headers=False, address_map=None, **kwargs): """Hexdump an object or memory location. You can use this plugin repeateadely to keep dumping more data using the "p _" (print last result) operation: In [2]: dump 0x814b13b0, address_space="K" ------> dump(0x814b13b0, address_space="K") Offset Hex Data ---------- ------------------------------------------------ ---------------- 0x814b13b0 03 00 1b 00 00 00 00 00 b8 13 4b 81 b8 13 4b 81 ..........K...K. Out[3]: <rekall.plugins.core.Dump at 0x2967510> In [4]: p _ ------> p(_) Offset Hex Data ---------- ------------------------------------------------ ---------------- 0x814b1440 70 39 00 00 54 1b 01 00 18 0a 00 00 32 59 00 00 p9..T.......2Y.. 0x814b1450 6c 3c 01 00 81 0a 00 00 18 0a 00 00 00 b0 0f 06 l<.............. 0x814b1460 00 10 3f 05 64 77 ed 81 d4 80 21 82 00 00 00 00 ..?.dw....!..... Args: offset: The offset to start dumping from. address_space: The address_space to dump from. If omitted we use the default address space. data: If provided we dump the string provided in data rather than use an address_space. length: If provided we stop dumping at the specified length. width: How many Hex character per line. rows: How many rows to dump. suppress_headers: If set we do not write the headers. """ super(Dump, self).__init__(**kwargs) # Allow offset to be symbol name. if isinstance(offset, basestring): self.offset = self.session.address_resolver.get_address_by_name( offset) elif isinstance(offset, obj.BaseObject): self.offset = offset.obj_offset address_space = offset.obj_vm length = offset.obj_size else: self.offset = obj.Pointer.integer_to_address(offset) self.length = length # default width can be set in the session. if width is None: width = self.session.GetParameter("hexdump_width", 16) self.width = int(width) if rows is None: rows = self.session.GetParameter("paging_limit") or 30 self.rows = int(rows) self.suppress_headers = suppress_headers self.address_map = address_map or AddressMap() if data is not None: address_space = addrspace.BufferAddressSpace(data=data, session=self.session) if self.length is None: self.length = len(data) # Resolve the correct address space. This allows the address space to be # specified from the command line (e.g. load_as = self.session.plugins.load_as() self.address_space = load_as.ResolveAddressSpace(address_space)
def testPointer(self): # Create an address space from a buffer for testing address_space = addrspace.BufferAddressSpace( data="\x08\x00\x00\x00\x00\x00\x00\x00" "\x66\x55\x44\x33\x00\x00\x00\x00" "\x99\x88\x77\x66\x55\x44\x33\x22", session=self.session) vtype = { 'Test': [ 0x10, { # Check simple type dereferencing 'ptr32': [0x00, ['Pointer', dict(target='unsigned long')]], 'ptr64': [0x00, ['Pointer', dict(target='long long')]], # Check struct dereferencing '_next': [0x00, ['Pointer', dict(target='Test')]], # A pointer to an invalid location 'invalid': [0x08, ['Pointer', dict(target='long')]], # A void pointer 'void': [0x00, ['Pointer', dict(target='Void')]], } ] } # We build a simple profile with just the native types here. profile = obj.Profile.classes['Profile32Bits'](session=self.session) profile.add_types(vtype) test = profile.Object("Test", offset=0, vm=address_space) ptr = test.ptr32 # Can we check the offset of members? self.assertEqual(profile.get_obj_offset("Test", "invalid"), 8) # 32 bit pointers. self.assertEqual(ptr.obj_size, 4) # The pointer itself is at location 0. self.assertEqual(ptr.obj_offset, 0) # But is pointing to location 8. self.assertEqual(ptr.v(), 8) self.assertEqual(int(ptr), 8) self.assertEqual(ptr, 8) # The long is the next 8 bytes. self.assertEqual(ptr.dereference(), 0x33445566) # Pointer comparison self.assertEqual(test.ptr32, test.ptr64) # We could do pointer arithmetic. ptr2 = ptr + 2 # The new pointer is at location 8 (its 32 bits). self.assertEqual(ptr2.obj_offset, 8) # The pointer to long long is moving twice as fast self.assertEqual(test.ptr64 + 1, 0x33445566) self.assertEqual(test.ptr32 + 1, 0) # And its pointing to. self.assertEqual(ptr2.v(), 0x33445566) # The above makes the pointer invalid, so dereferencing it returns a 0. # (This is because there is no good way to validate pages except at # system runtime.) self.assertEqual(ptr2.dereference(), 0) # This is also invalid and will return a zero. self.assertEqual(test.invalid.dereference(), 0) # Test nonzero. self.assert_(test.ptr32) # Now dereference a struct. ptr3 = test._next # This struct starts at offset 8. self.assertEqual(test._next.v(), 8) next = ptr3.dereference() # We get another struct from this. self.assertEqual(next.obj_type, "Test") # This new struct's ptr32 is pointing at this address now. self.assertEqual(next.ptr32, 0x33445566) # Now test 64 bit pointers. profile = obj.Profile.classes['ProfileLLP64'](session=self.session) profile.add_types(vtype) test = profile.Object("Test", offset=0, vm=address_space) ptr = test.ptr32 # 64 bit pointers. self.assertEqual(ptr.obj_size, 8) # The pointer itself is at location 0. self.assertEqual(ptr.obj_offset, 0) # But is pointing to location 8. self.assertEqual(ptr.v(), 8) # The long is the next 8 bytes. self.assertEqual(ptr.dereference(), 0x33445566) # We could do pointer arithmetic. ptr2 = ptr + 2 # This will advance the pointer by 8 bytes (still pointer to long). self.assertEqual(ptr2.obj_offset, 8) self.assertEqual(ptr2, 0x33445566) # NOTE: We assume that long is 32 bits wide in both 64 bits and 32 bits # mode - which is the way windows does it. This is not the same as linux # which has long being the bit size in both cases. # Test the void pointer self.assertEqual(test.void, 8) # A Void object can not be compared to anything! self.assertNotEqual(test.void.dereference(), 0x33445566)
def setUp(self): self.session = session.Session() # Create an address space from a buffer for testing self.address_space = addrspace.BufferAddressSpace(data="hello world" * 100, session=self.session)
def IterObject(self, type=None, freed=True): """Generates possible _OBJECT_HEADER accounting for optional headers. Note that not all pool allocations have an _OBJECT_HEADER - only ones allocated from the the object manager. This means calling this method depends on which pool allocation you are after. On windows 8, pool allocations are done from preset sizes. This means that the allocation is never exactly the same size and we can not use the bottom up method like before. We therefore, have to build the headers forward by checking the preamble size and validity of each object. This is a little slower than with earlier versions of windows. Args: type: The object type name. If not specified we return all objects. """ pool_align = self.obj_profile.get_constant("PoolAlignment") allocation_size = self.BlockSize * pool_align # Operate on a cached version of the next page. # We use a temporary buffer for the object to save reads of the image. start = self.obj_end cached_data = self.obj_vm.read(start, allocation_size) cached_vm = addrspace.BufferAddressSpace( base_offset=start, data=cached_data, session=self.obj_session, metadata=dict(image=self.obj_vm.metadata("image"))) # We search for the _OBJECT_HEADER.InfoMask in close proximity to our # object. We build a lookup table between the values in the InfoMask and # the minimum distance there is between the start of _OBJECT_HEADER and # the end of _POOL_HEADER. This way we can quickly skip unreasonable # values. # This is the offset within _OBJECT_HEADER of InfoMask. info_mask_offset = self.obj_profile.get_obj_offset( "_OBJECT_HEADER", "InfoMask") # Build the cache if needed. if not self.lookup: self._BuildLookupTable() # Walk over all positions in the address space and try to fit an object # header there. for i in utils.xrange(start, start + allocation_size - info_mask_offset, pool_align): possible_info_mask = cached_data[i - start + info_mask_offset] #if possible_info_mask > '\x7f': # continue # The minimum amount of space needed before the object header to # hold all the optional headers. minimum_offset = self.lookup[possible_info_mask] # Obviously wrong because we need more space than we have. if minimum_offset > i - start: continue # Create a test object header from the cached vm to test for # validity. test_object = self.obj_profile._OBJECT_HEADER(offset=i, vm=cached_vm) if test_object.is_valid(): if (type is None or test_object.get_object_type() == type or # Freed objects point to index 2 #(which is also 0xbad0b0b0). (freed and test_object.TypeIndex == 2)): yield test_object
def __init__(self, runs=None, data=None, **kwargs): super(CustomRunsAddressSpace, self).__init__(**kwargs) self.base = addrspace.BufferAddressSpace(data=data, session=self.session) for i in runs: self.runs.insert(i)
def IterObject(self, type=None, freed=True): """Generates possible _OBJECT_HEADER accounting for optional headers. Note that not all pool allocations have an _OBJECT_HEADER - only ones allocated from the the object manager. This means calling this method depends on which pool allocation you are after. On windows 8, pool allocations are done from preset sizes. This means that the allocation is never exactly the same size and we can not use the bottom up method like before. We therefore, have to build the headers forward by checking the preamble size and validity of each object. This is a little slower than with earlier versions of windows. Args: type: The object type name. If not specified we return all objects. """ alignment = self.obj_profile.get_constant("PoolAlignment") # Operate on a cached version of the next page. # We use a temporary buffer for the object to save reads of the image. # self.obj_end는 _POOL_HEADER 다음 오프셋을 가르키고있음. start_offset = self.obj_end assert self.obj_size == 16 allocation_size = self.BlockSize * alignment cached_data = self.obj_vm.read(start_offset, allocation_size) # for debug # if allocation_size > 0: # pool_data = self.obj_vm.read(start - 16, allocation_size) # with open(f'c:\\Temp\\psscan\\{start}_{allocation_size}.dmp', 'wb') as fp: # fp.write((pool_data)) cached_vm = addrspace.BufferAddressSpace(base_offset=start_offset, data=cached_data, session=self.obj_session) # We search for the _OBJECT_HEADER.InfoMask in close proximity to our # object. We build a lookup table between the values in the InfoMask and # the minimum distance there is between the start of _OBJECT_HEADER and # the end of _POOL_HEADER. This way we can quickly skip unreasonable # values. # This is the offset within _OBJECT_HEADER of InfoMask. info_mask_offset = self.obj_profile.get_obj_offset( "_OBJECT_HEADER", "InfoMask") pointer_count_offset = self.obj_profile.get_obj_offset( "_OBJECT_HEADER", "PointerCount") pointer_count_size = self.obj_profile.Object( '_OBJECT_HEADER').PointerCount.obj_size optional_headers, lengths_of_optional_headers = self._CalculateOptionalHeaderLength( self.obj_profile) padding_available = None if 'PADDING_INFO' not in optional_headers else optional_headers.index( 'PADDING_INFO') max_optional_headers_length = sum(lengths_of_optional_headers) addr_limit = min(max_optional_headers_length, self.BlockSize * alignment) info_mask_data = self.obj_vm.read(start_offset, addr_limit + info_mask_offset) for addr in range(0, addr_limit, alignment): infomask_value = info_mask_data[addr + info_mask_offset] pointercount_value = int.from_bytes( info_mask_data[addr + pointer_count_offset:addr + pointer_count_offset + pointer_count_size], byteorder='little', signed=True) if not 0x1000000 > pointercount_value >= 0: continue padding_present = False optional_headers_length = 0 for i in range(len(lengths_of_optional_headers)): if infomask_value & (1 << i): optional_headers_length += lengths_of_optional_headers[i] if i == padding_available: padding_present = True padding_length = 0 if padding_present: # Read the four bytes from just before the next optional_headers_length minus the padding_info size # # --------------- # POOL_HEADER # --------------- # # start of PADDING_INFO # --------------- # End of other optional headers # --------------- # OBJECT_HEADER # --------------- if addr - optional_headers_length < 0: continue padding_length = struct.unpack( '<I', info_mask_data[addr - optional_headers_length:addr - optional_headers_length + 4])[0] padding_length -= lengths_of_optional_headers[padding_available or 0] if addr - optional_headers_length >= padding_length > addr: continue test_object = self.obj_profile._OBJECT_HEADER(offset=start_offset + addr, vm=cached_vm) #if test_object.is_valid(): if (type is None or test_object.get_object_type() == type or # Freed objects point to index 1 #(which is also 0xbad0b0b0). (freed and test_object.TypeIndex <= 2)): yield test_object
def setUp(self): # Create an address space from a buffer for testing self.address_space = addrspace.BufferAddressSpace( config=None, data="hello world" * 100)
def scan(self, offset=0, maxlen=None): """Scan the region from offset for maxlen. Args: offset: The starting offset in our current address space to scan. maxlen: The maximum length to scan. If not provided we just scan until there is no data. Yields: offsets where all the constrainst are satisfied. """ maxlen = maxlen or 2**64 end = offset + maxlen overlap = "" # Record the last reported hit to prevent multiple reporting of the same # hits when using an overlap. last_reported_hit = -1 # Delay building the constraints so they can be added after scanner # construction. if self.constraints is None: self.build_constraints() # We try to optimize the scanning by first merging contiguous ranges # and then passing up to constants.SCAN_BLOCKSIZE bytes to the checkers # and skippers. # # If range has less data than the block size, then the full range is # scanned at once. # # If a range is larger than the block size, it's split in chunks until # it's fully consumed. Overlap is applied only in this case, starting # from the second chunk. chunk_end = 0 for run in self.address_space.merge_base_ranges(start=offset, end=end): # Store where this chunk will start. Absolute offset. chunk_offset = run.start buffer_as = addrspace.BufferAddressSpace(session=self.session) # Keep scanning this range as long as the current chunk isn't # past the end of the range or the end of the scanner. while chunk_offset < run.end: if self.session: self.session.report_progress(self.progress_message % dict( offset=chunk_offset, name=self.__class__.__name__)) # This chunk does not begin where the last chunk ended - this # means there is a gap in the virtual address space and # therefore we should not use any overlap. if chunk_offset != chunk_end: overlap = "" # Our chunk is SCAN_BLOCKSIZE long or as much data there's # left in the range. chunk_size = min(constants.SCAN_BLOCKSIZE, run.end - chunk_offset) chunk_end = chunk_offset + chunk_size # Consume the next block in this range. We read directly from # the physical address space to save an extra translation by the # virtual address space's read() method. phys_chunk_offset = run.file_offset + (chunk_offset - run.start) buffer_as.assign_buffer( overlap + run.address_space.read(phys_chunk_offset, chunk_size), base_offset=chunk_offset - len(overlap)) if self.overlap > 0: overlap = buffer_as.data[-self.overlap:] scan_offset = buffer_as.base_offset while scan_offset < buffer_as.end(): # Check the current offset for a match. res = self.check_addr(scan_offset, buffer_as=buffer_as) # Remove multiple matches in the overlap region which we # have previously reported. if res is not None and scan_offset > last_reported_hit: last_reported_hit = scan_offset yield res # Skip as much data as the skippers tell us to, up to the # end of the buffer. scan_offset += min(len(buffer_as), self.skip(buffer_as, scan_offset)) chunk_offset = scan_offset
def IterObject(self, type=None): """Generates possible _OBJECT_HEADER accounting for optional headers. Note that not all pool allocations have an _OBJECT_HEADER - only ones allocated from the the object manager. This means calling this method depends on which pool allocation you are after. On windows 8, pool allocations are done from preset sizes. This means that the allocation is never exactly the same size and we can not use the bottom up method like before. We therefore, have to build the headers forward by checking the preamble size and validity of each object. This is a little slower than with earlier versions of windows. Args: type: The object type name. If not specified we return all objects. """ pool_align = self.obj_profile.get_constant("PoolAlignment") allocation_size = self.BlockSize * pool_align # Operate on a cached version of the next page. # We use a temporary buffer for the object to save reads of the image. cached_data = self.obj_vm.read(self.obj_offset + self.size(), allocation_size) cached_vm = addrspace.BufferAddressSpace(data=cached_data, session=self.obj_session) # We search for the _OBJECT_HEADER.InfoMask in close proximity to our # object. We build a lookup table between the values in the InfoMask and # the minimum distance there is between the start of _OBJECT_HEADER and # the end of _POOL_HEADER. This way we can quickly skip unreasonable # values. # This is the offset within _OBJECT_HEADER of InfoMask. info_mask_offset = self.obj_profile.get_obj_offset( "_OBJECT_HEADER", "InfoMask") # Build the cache if needed. if not self.lookup: self._BuildLookupTable() for i in xrange(0, allocation_size - info_mask_offset, pool_align): if i + info_mask_offset > len(cached_data): break possible_info_mask = cached_data[i + info_mask_offset] if possible_info_mask > '\x7f': continue minimum_offset = self.lookup[possible_info_mask] # Obviously wrong because we need more space than we have. if minimum_offset > i: continue # Create a test object header from the cached vm to test for # validity. test_object = self.obj_profile._OBJECT_HEADER(offset=i, vm=cached_vm) if test_object.is_valid(): if type is not None and test_object.get_object_type() != type: continue yield self.obj_profile._OBJECT_HEADER( offset=i + self.obj_offset + self.size(), vm=self.obj_vm, parent=self)
def testPointer(self): # Create an address space from a buffer for testing address_space = addrspace.BufferAddressSpace( config=None, data="\x08\x00\x00\x00\x00\x00\x00\x00" "\x66\x55\x44\x33\x00\x00\x00\x00" "\x99\x88\x77\x66\x55\x44\x33\x22") vtype = {'Test': [ 0x10, { # Check simple type dereferencing 'ptr32': [0x00, ['pointer', ['unsigned long']]], 'ptr64': [0x00, ['pointer', ['long long']]], # Check struct dereferencing 'next': [0x00, ['pointer', ['Test']]], # A pointer to an invalid location 'invalid': [0x08, ['pointer', ['long']]], # A void pointer 'void': [0x00, ['pointer', ['void']]], }]} # We build a simple profile with just the native types here. profile = obj.Profile.classes['Profile32Bits']() profile.add_types(vtype) test = profile.Object("Test", offset=0, vm=address_space) ptr = test.ptr32 # Can we check the offset of members? self.assertEqual(profile.get_obj_offset("Test", "invalid"), 8) # 32 bit pointers. self.assertEqual(ptr.size(), 4) # The pointer itself is at location 0. self.assertEqual(ptr.obj_offset, 0) # But is pointing to location 8. self.assertEqual(ptr.v(), 8) self.assertEqual(int(ptr), 8) self.assertEqual(ptr, 8) # The long is the next 8 bytes. self.assertEqual(ptr.dereference(), 0x33445566) # Pointer comparison self.assertEqual(test.ptr32, test.ptr64) # We could do pointer arithmetic. ptr2 = ptr + 2 # The new pointer is at location 8 (its 32 bits). self.assertEqual(ptr2.obj_offset, 8) # The pointer to long long is moving twice as fast self.assertEqual(test.ptr64 + 1, 0x33445566) self.assertEqual(test.ptr32 + 1, 0) # And its pointing to. self.assertEqual(ptr2.v(), 0x33445566) # Alas it cant be dereferenced. self.assertEqual(type(ptr2.dereference()), obj.NoneObject) self.assert_("invalid" in ptr2.dereference().reason) # This is also invalid. self.assertEqual(type(test.invalid.dereference()), obj.NoneObject) # Test nonzero. self.assert_(test.ptr32) # Note this pointer is actually zero, but it is actually valid in this AS. self.assert_(test.ptr32 + 1) # Now dereference a struct. ptr3 = test.next # This struct starts at offset 8. self.assertEqual(test.next.v(), 8) next = ptr3.dereference() # We get another struct from this. self.assertEqual(next.obj_type, "Test") # This new struct's ptr32 is pointing at this address now. self.assertEqual(next.ptr32, 0x33445566) # Now test 64 bit pointers. profile = obj.Profile.classes['Profile64Bits']() profile.add_types(vtype) test = profile.Object("Test", offset=0, vm=address_space) ptr = test.ptr32 # 64 bit pointers. self.assertEqual(ptr.size(), 8) # The pointer itself is at location 0. self.assertEqual(ptr.obj_offset, 0) # But is pointing to location 8. self.assertEqual(ptr.v(), 8) # The long is the next 8 bytes. self.assertEqual(ptr.dereference(), 0x33445566) # We could do pointer arithmetic. ptr2 = ptr + 2 # This will advance the pointer by 8 bytes (still pointer to long). self.assertEqual(ptr2.obj_offset, 8) self.assertEqual(ptr2, 0x33445566) # NOTE: We assume that long is 32 bits wide in both 64 bits and 32 bits # mode - which is the way windows does it. This is not the same as linux # which has long being the bit size in both cases. # Test the void pointer self.assertEqual(test.void, 8) self.assertEqual(test.void.dereference(), 0x33445566)
def scan(self, offset=0, maxlen=None): """Scan the region from offset for maxlen. Args: offset: The starting offset in our current address space to scan. maxlen: The maximum length to scan. If no provided we just scan until there is no data. Yields: offsets where all the constrainst are satisfied. """ maxlen = maxlen or 2**64 end = offset + maxlen # Delay building the constraints so they can be added after scanner # construction. if self.constraints is None: self.build_constraints() # We try to optimize the scanning by first merging contiguous ranges # and then passing up to constants.SCAN_BLOCKSIZE bytes to the checkers # and skippers. # # If range has less data than the block size, then the full range is # scanned at once. # # If a range is larger than the block size, it's split in chunks until # it's fully consumed. Overlap is applied only in this case, starting # from the second chunk. for (range_start, phys_start, length) in self.address_space.get_address_ranges(offset, end): # Find a new range if offset is past this range. range_end = range_start + length if range_end < offset: continue # Stop searching for ranges if this is past end. if range_start > end: break # Calculate where in the range we'll be reading data from. # Covers the case where offset falls within a range. start = max(range_start, offset) # Store where this chunk will start. Absolute offset. chunk_offset = start # Keep scanning this range as long as the current chunk isn't # past the end of the range or the end of the scanner. while chunk_offset < end and chunk_offset < range_end: if self.session: self.session.report_progress( "Scanning 0x%08X with %s" % (chunk_offset, self.__class__.__name__)) chunk_offset = max(start, chunk_offset) # Our chunk is SCAN_BLOCKSIZE long or as much data there's # left in the range. chunk_size = min(constants.SCAN_BLOCKSIZE, range_end - chunk_offset) # Adjust chunk_size if the chunk we're gonna read goes past # the end or we could end up scanning more data than requested. chunk_size = min(chunk_size, end - chunk_offset) phys_chunk_offset = phys_start + (chunk_offset - range_start) # Consume the next block in this range. buffer_as = addrspace.BufferAddressSpace( session=self.session, data=self.address_space.base.read( phys_chunk_offset, chunk_size + self.overlap), base_offset=chunk_offset) scan_offset = chunk_offset while scan_offset < chunk_offset + chunk_size: # Check the current offset for a match. res = self.check_addr(scan_offset, buffer_as=buffer_as) if res is not None: yield res # Skip as much data as the skippers tell us to. scan_offset += min(chunk_size, self.skip(buffer_as, scan_offset)) chunk_offset = scan_offset