def read_variable_address(self, variable=None): """See debugger.py interface """ printed_var = self.get_printed_variable(variable) log.debug(f"pygdbpython.read_variable_address({printed_var})") if variable is None: pu.print_error("Please specify a variable to read") return None try: variable = gdb.selected_frame().read_var(variable) log.debug(f"variable.address = {variable.address}") return variable.address except RuntimeError as e: log.debug(f"exception 1: {e}") # No idea why this works but sometimes the frame is not selected # pu.print_error("No gdb frame is currently selected.\n") try: variable = gdb.selected_frame().read_var(variable) return variable.address except RuntimeError as e: log.debug(f"exception 2: {e}") # variable was not found # pu.print_error("wrong here!") return None except (ValueError, AttributeError) as e: log.debug(f"exception 3: {e}") # variable was not found res = gdb.execute("x/x &{}".format(variable), to_string=True) return int(res.strip().split()[0], 16)
def parse_address(self, addresses): """See debugger.py interface It should be able to handle gdb variables starting with $ or if we ommit it too """ log.debug("pygdbpython.parse_address()") resolved = [] if type(addresses) != list: addresses = [addresses] for item in addresses: addr = None try: # This should parse most cases like integers, # variables (exact name), registers (if we specify $ in front), as well # as arithmetic with integers, variables and registers. # i.e. as long as "p ABC" or "x /x ABC" works, it should work within here too addr = self.parse_variable(item) log.info("parsed address (default) = 0x%x" % addr) except: # XXX - Not sure what this is for? try: addr = self.parse_variable("&" + item) log.info("parsed address (unknown) = 0x%x" % addr) except: # Parse registers if we don't specify the register, e.g. "rdi" instead of "$rdi" try: addr = self.parse_variable("$" + item) log.info("parsed address (register) = 0x%x" % addr) except: pu.print_error(f"ERROR: Unable to parse {item}") continue if addr is not None: resolved.append(addr) return resolved
def list_arenas(self): """List the arena addresses only """ mstate = self.cache.mstate if mstate.next == 0: print("No arenas could be correctly guessed. Wrong glibc version configured?") print("Nothing was found at {0:#x}".format(mstate.address)) return print("Arena(s) found:", end="\n") print(" arena @ ", end="") pu.print_header("{:#x}".format(int(mstate.address)), end="\n") if mstate.address != mstate.next: # we have more than one arena curr_arena = ms.malloc_state( self.ptm, mstate.next, debugger=self.dbg, version=self.version ) while mstate.address != curr_arena.address: print(" arena @ ", end="") pu.print_header("{:#x}".format(int(curr_arena.address)), end="\n") curr_arena = ms.malloc_state( self.ptm, curr_arena.next, debugger=self.dbg, version=self.version ) if curr_arena.address == 0: pu.print_error("No arenas could be correctly found.") break # breaking infinite loop
def read_variable(self, variable=None): """See debugger.py interface """ printed_var = self.get_printed_variable(variable) log.debug(f"pygdbpython.read_variable({printed_var})") if variable is None: pu.print_error("Please specify a variable to read") return None try: variable = gdb.selected_frame().read_var(variable) log.debug(f"variable = {str(variable)}") return variable except RuntimeError as e: log.debug(f"exception 1: {e}") # No idea why this works but sometimes the frame is not selected # pu.print_error("No gdb frame is currently selected.\n") try: return gdb.selected_frame().read_var(variable) except RuntimeError as e: log.debug(f"exception 2: {e}") # variable was not found # pu.print_error("wrong here!") return None except ValueError as e: log.debug(f"exception 3: {e}") # variable was not found return None
def __init__(self, ptm, name): self.ptm = ptm if self.ptm.dbg is None: pu.print_error("Please specify a debugger") raise Exception("sys.exit()") self.name = name self.old_level = None self.parser = None # ArgumentParser self.description = None # Only use if not in the parser super(ptcmd, self).__init__(name, gdb.COMMAND_DATA, gdb.COMPLETE_NONE)
def invoke(self, arg, from_tty): """Inherited from gdb.Command See https://sourceware.org/gdb/current/onlinedocs/gdb/Commands-In-Python.html """ log.debug("ptmeta.invoke()") if self.args.action is None and not self.args.save and not self.args.load: pu.print_error("WARNING: requires an action") self.parser.print_help() return if self.args.action == "list" \ or self.args.action == "add" \ or self.args.action == "del": address = None if self.args.address != None: addresses = self.dbg.parse_address(self.args.address) if len(addresses) == 0: pu.print_error("WARNING: No valid address supplied") self.parser.print_help() return address = addresses[0] if self.args.action == "list": self.list_metadata(address) return if self.args.action == "del": self.delete_metadata(address) return if self.args.action == "config": self.configure_metadata(self.args.feature, self.args.key, self.args.values) return if self.args.action == "add": self.add_metadata(address, self.args.key, self.args.value) return if self.args.save: if self.args.verbose >= 0: # always print since debugging feature print("Saving metadata database to file...") save_metadata_to_file(METADATA_DB) return if self.args.load: if self.args.verbose >= 0: # always print since debugging feature print("Loading metadata database from file...") load_metadata_from_file(METADATA_DB) return
def parse_variable(self, variable=None): """See debugger.py interface """ log.debug("pygdbpython.parse_variable()") if variable is None: pu.print_error("Please specify a variable to read") return None evaluated = int(gdb.parse_and_eval(variable)) log.info("pygdbpython.parse_variable(): evaluated variable = 0x%x" % evaluated) if self.get_size_sz() == 4: p = self.tohex(evaluated, 32) elif self.get_size_sz() == 8: p = self.tohex(evaluated, 64) return int(p, 16)
def write_memory(self, address, buf, length=None): """See debugger.py interface """ log.debug("pygdbpython.write_memory()") if self.inferior is None: self.inferior = self.get_inferior() try: if length is None: self.inferior.write_memory(address, buf) else: self.inferior.write_memory(address, buf, length) except MemoryError: pu.print_error("GDB inferior write_memory error")
def get_heap_address(self, par=None): """See debugger.py interface Read heap address from glibc's mp_ structure if available, otherwise fall back to /proc/self/maps or "info proc mappings" command which are unreliable. """ log.debug("pygdbpython.get_heap_address()") start, end = None, None if par is not None: if isinstance(par, mp.malloc_par): start = par.sbrk_base else: pu.print_error("Please specify a valid malloc_par variable") # XXX: add end from arena(s).system_mem ? else: # ``` # # cat /proc/self/maps # 55555575d000-55555577e000 rw-p 00000000 00:00 0 [heap] # ``` # XXX - Reading a local file won't work if remote debugging #pid, task_id, thread_id = gdb.selected_thread().ptid #maps_file = "/proc/%d/task/%d/maps" #maps_data = open(maps_file % (pid, task_id)).readlines() # for line in maps_data: # if any(x.strip() == "[heap]" for x in line.split(" ")): # heap_range = line.split(" ")[0] # start, end = [int(h, 16) for h in heap_range.split("-")] # break # ``` # (gdb) info proc mappings # 0x555555864000 0x55555586e000 0xa000 0x0 [heap] # ``` maps_data = self.execute("info proc mappings").split("\n") for line in maps_data: if any(x.strip() == "[heap]" for x in line.split(" ")): m = re.match("[\s]*([0-9a-fx]*)[\s]*([0-9a-fx]*).*", line) if m: start = int(m.group(1), 16) end = int(m.group(2), 16) log.debug(f"pygdbpython.get_heap_address() -> {start:#x}, {end:#x}") break return start, end
def initialize_sizes_and_offsets(self): """Initialize malloc_par's specific sizes based on the glibc version and architecture """ self.size_sz = self.dbg.get_size_sz() if self.version < 2.15: # XXX - seems 2.14 has same fields as 2.15 so likely we can support # older easily... pu.print_error("Unsupported version for malloc_par") raise Exception('sys.exit()') if self.version >= 2.15 and self.version <= 2.23: if self.size_sz == 4: # sizeof(malloc_par) = 20 + 16 + 16 self.size = 0x34 elif self.size_sz == 8: # sizeof(malloc_par) = 40 + 16 + 32 self.size = 0x58 elif self.version >= 2.24 and self.version <= 2.25: # max_total_mem removed in 2.24 if self.size_sz == 4: self.size = 0x30 elif self.size_sz == 8: self.size = 0x50 elif self.version >= 2.26: # tcache_* added in 2.26 if self.ptm.is_tcache_enabled(): # USE_TCACHE is set if self.size_sz == 4: self.size = 0x40 elif self.size_sz == 8: self.size = 0x70 else: # revert to same sizes as [2.24, 2.25] if USE_TCACHE not set if self.size_sz == 4: self.size = 0x30 elif self.size_sz == 8: self.size = 0x50 if self.ptm.distribution == "photon" and self.ptm.release == "3.0": # arena_stickiness added for all 2.28 versions self.size += self.size_sz log.debug(f"malloc_par.size = {self.size:#x}")
def unpack_memory(self): """Actually parse all the tcache_perthread_struct's fields from the memory bytes (previously retrieved) """ if self.mem is None: pu.print_error("No memory found") raise Exception("sys.exit()") self.counts = struct.unpack_from("64B", self.mem, 0) offset = 64 if self.size_sz == 4: fmt = "<64I" elif self.size_sz == 8: fmt = "<64Q" self.entries = struct.unpack_from(fmt, self.mem, offset) offset = offset + 64 * self.size_sz
def get_inferior(self): """Get the gdb inferior, used for other gdb commands """ log.debug("pygdbpython.get_inferior()") try: if self.inferior is None: if len(gdb.inferiors()) == 0: pu.print_error("No gdb inferior could be found.") return -1 else: self.inferior = gdb.inferiors()[0] return self.inferior else: return self.inferior except AttributeError: pu.print_error("This gdb's python support is too old.") raise Exception("sys.exit()")
def initialize_sizes_and_offsets(self): """Initialize malloc_state's specific sizes based on the glibc version and architecture """ self.size_sz = self.dbg.get_size_sz() if self.version < 2.15: # XXX - seems 2.14 has same fields as 2.15 so likely we can support # older easily... pu.print_error("Unsupported version for malloc_state") raise Exception('sys.exit()') if self.version >= 2.15 and self.version < 2.23: if self.size_sz == 4: # sizeof(malloc_state) = 4+4+40+4+4+(254*4)+16+4+4+4+4 self.size = 0x450 elif self.size_sz == 8: # sizeof(malloc_state) = 4+4+80+8+8+(254*8)+16+8+8+8+8 self.size = 0x888 self.fastbins_offset = 8 self.bins_offset = self.fastbins_offset + 12 * self.size_sz elif self.version >= 2.23 and self.version <= 2.25: # attached_threads added in 2.23 if self.size_sz == 4: self.size = 0x454 elif self.size_sz == 8: self.size = 0x890 self.fastbins_offset = 8 self.bins_offset = self.fastbins_offset + 12 * self.size_sz elif self.version >= 2.27: # have_fastchunks added in 2.27 if self.size_sz == 4: # hax, empiric: +4 for padding added after fastbinsY[] self.size = 0x458+4 self.fastbins_offset = 0xC elif self.size_sz == 8: self.size = 0x898 self.fastbins_offset = 0x10 self.bins_offset = self.fastbins_offset + 12 * self.size_sz
def configure_metadata(self, feature, key, values): """Save given metadata (key, values) for a given feature (e.g. "backtrace") :param feature: name of the feature (e.g. "ignore") :param key: name of the metadata (e.g. "backtrace") :param values: list of values to associate to the key """ if self.args.verbose >= 1: print("Configuring metadata database...") if key == "backtrace": if feature == "ignore": backtrace_ignore.update(values) else: pu.print_error("WARNING: Unsupported feature") return else: pu.print_error("WARNING: Unsupported key") return
def add_metadata(self, address, key, value): """Save given metadata (key, value) for a given chunk's address E.g. key = "tag" and value is an associated user-defined tag """ if self.args.verbose >= 1: print("Adding to metadata database...") if key == "backtrace": result = self.dbg.get_backtrace() elif key == "color": if value not in colorize_table: pu.print_error(f"ERROR: Unsupported color. Need one of: {', '.join(colorize_table.keys())}") return result = value else: result = value if address not in meta_cache: meta_cache[address] = {} meta_cache[address][key] = result
def get_size_sz(self): """See debugger.py interface """ #log.debug("pygdbpython.get_size_sz()") if self.SIZE_SZ != 0: return self.SIZE_SZ try: _machine = self.get_arch()[0] except IndexError: _machine = "" self.SIZE_SZ = 0 pu.print_error("Retrieving self.SIZE_SZ failed.") except TypeError: # gdb is not running _machine = "" self.SIZE_SZ = 0 pu.print_error("Retrieving self.SIZE_SZ failed.") if "elf64" in _machine: self.SIZE_SZ = 8 elif "elf32" in _machine: self.SIZE_SZ = 4 else: self.SIZE_SZ = 0 pu.print_error("Retrieving self.SIZE_SZ failed.") return self.SIZE_SZ
def get_backtrace(self): """See debugger.py interface """ log.debug("pygdbpython.get_backtrace()") d = {} output = self.execute("backtrace") d["raw"] = output funcs = [] lines = output.split("\n") for i in range(len(lines)): # This is shown when "set verbose on" was executed so skip those if "Reading in symbols" in lines[i]: continue else: lines = lines[i:] break if lines[0].startswith("#0"): for line in lines: if not line: continue log.debug(f"Handling: '{line}'") elts = line.split() if len(elts) < 3: pu.print_error("Skipping too small line in backtrace") continue if not elts[0].startswith("#"): pu.print_error("Skipping non-valid line in backtrace") continue if elts[2] == "in": # Something like: # #1 0x00007f834a8c8190 in _nl_make_l10nflist (l10nfile_list=...) at ../intl/l10nflist.c:237 funcs.append(elts[3]) else: # Something like: # #0 __GI___libc_free (mem=...) at malloc.c:3096 funcs.append(elts[1]) d["funcs"] = funcs return d
def invoke(self, arg, from_tty): """Inherited from gdb.Command See https://sourceware.org/gdb/current/onlinedocs/gdb/Commands-In-Python.html """ log.debug("pttcache.invoke()") if not self.ptm.is_tcache_enabled(): print("tcache is currently disabled. Check glibc version or manually overide the tcache settings") return if not self.ptm.tcache_available: print("tcache is not currently available. Your target binary does not use threads to leverage tcache?") return self.cache.update_tcache(self.args.address, show_status=self.args.debug, use_cache=self.args.use_cache) # This is required by ptchunk.parse_many() self.cache.update_arena(show_status=self.args.debug, use_cache=self.args.use_cache) self.cache.update_param(show_status=self.args.debug, use_cache=self.args.use_cache) # This is required by show_one_bin(), see description self.args.real_count = self.args.count if self.args.list: self.list_tcaches() return if self.args.index != None and self.args.size != None: pu.print_error("Only one of -i and -s can be provided") return log.debug("tcache_address = 0x%x" % self.cache.tcache.address) if self.args.index == None and self.args.size == None: if self.args.verbose == 0: print(self.cache.tcache.to_summary_string()) elif self.args.verbose == 1: print(self.cache.tcache) elif self.args.verbose == 2: print(self.cache.tcache.to_string(verbose=True)) else: ptfree.ptfree.show_one_bin(self, "tcache", index=self.args.index, size=self.args.size, use_cache=self.args.use_cache)
def invoke(self, arg, from_tty): """Inherited from gdb.Command See https://sourceware.org/gdb/current/onlinedocs/gdb/Commands-In-Python.html """ log.debug("ptfast.invoke()") self.cache.update_arena(self.args.address, show_status=self.args.debug, use_cache=self.args.use_cache) mstate = self.cache.mstate # This is required by ptchunk.parse_many() self.cache.update_param(show_status=self.args.debug, use_cache=self.args.use_cache) # This is required by show_one_bin(), see description self.args.real_count = self.args.count if self.args.index != None and self.args.size != None: pu.print_error("Only one of -i and -s can be provided") return if self.args.index != None or self.args.size != None: ptfree.ptfree.show_one_bin(self, "fast", index=self.args.index, size=self.args.size, use_cache=self.args.use_cache) else: self.show_fastbins(mstate, use_cache=self.args.use_cache)
def print_hexdump(self, address, size, unit=8): """See debugger.py interface """ # See https://visualgdb.com/gdbreference/commands/x if unit == 1: #cmd = "x/%dbx 0x%x\n" % (size, address) try: mem = self.read_memory(address, size) except TypeError: pu.print_error("Invalid address specified") return except RuntimeError: pu.print_error("Could not read address {0:#x}".format(addr)) return i = 0 for line in hexdump.hexdump(bytes(mem), result='generator'): elts = line.split(":") txt = ":".join(elts[1:]) print("0x%x: %s" % (address+i*0x10, txt)) i += 1 return elif unit == 2: cmd = "x/%dhx 0x%x\n" % (size/2, address) elif unit == 4: cmd = "x/%dwx 0x%x\n" % (size/4, address) elif unit == 8: cmd = "x/%dgx 0x%x\n" % (size/8, address) elif unit == "dps": # XXX - call into dps_like_for_gdb.py command for now # but we want to just add it to libptmalloc cmd = "dps 0x%x %d\n" % (address, size/self.get_size_sz()) else: print("[!] Invalid unit specified") return print(self.execute(cmd, to_string=True)) return
def __str__(self): """Pretty printer for the malloc_chunk """ if self.prev_size == 0 and self.size == 0: return "" # XXX - since they all share the same prev_size/size and 2 chunk types # also share the fd/bk, we could refactor code here? elif self.type == pt.chunk_type.INUSE: title = "struct malloc_chunk @ 0x%x {" % self.address ret = pu.color_title(title) ret += "\n{:11} = ".format("prev_size") ret += pu.color_value("{:#x}".format(self.prev_size)) ret += "\n{:11} = ".format("size") ret += pu.color_value("{:#x}".format(self.size & ~self.ptm.SIZE_BITS)) if ( self.ptm.prev_inuse(self) or self.ptm.chunk_is_mmapped(self) or self.ptm.chunk_non_main_arena(self) ): ret += " (" if self.ptm.prev_inuse(self): ret += "PREV_INUSE|" if self.ptm.chunk_is_mmapped(self): ret += "MMAPPED|" if self.ptm.chunk_non_main_arena(self): ret += "NON_MAIN_ARENA|" ret += "\b)" return ret elif self.type == pt.chunk_type.FREE_TCACHE: title = "struct malloc_chunk @ 0x%x {" % self.address ret = pu.color_title(title) ret += "\n{:11} = ".format("prev_size") ret += pu.color_value("{:#x}".format(self.prev_size)) ret += "\n{:11} = ".format("size") ret += pu.color_value("{:#x}".format(self.size & ~self.ptm.SIZE_BITS)) flag_str = "" if self.ptm.prev_inuse(self): flag_str += "PREV_INUSE|" if self.ptm.chunk_is_mmapped(self): flag_str += "MMAPPED|" if self.ptm.chunk_non_main_arena(self): flag_str += "NON_MAIN_ARENA|" if len(flag_str) != 0: ret += " (" ret += flag_str ret += "\b)" title = "\nstruct tcache_entry @ 0x%x {" % (self.address + self.ptm.INUSE_HDR_SZ) ret += pu.color_title(title) ret += "\n{:11} = ".format("next") ret += pu.color_value("{:#x}".format(self.next)) ret += "\n{:11} = ".format("key") ret += pu.color_value("{:#x}".format(self.key)) return ret elif self.type == pt.chunk_type.FREE_FAST: title = "struct malloc_chunk @ 0x%x {" % self.address ret = pu.color_title(title) ret += "\n{:11} = ".format("prev_size") ret += pu.color_value("{:#x}".format(self.prev_size)) ret += "\n{:11} = ".format("size") ret += pu.color_value("{:#x}".format(self.size & ~self.ptm.SIZE_BITS)) flag_str = "" if self.ptm.prev_inuse(self): flag_str += "PREV_INUSE|" if self.ptm.chunk_is_mmapped(self): flag_str += "MMAPPED|" if self.ptm.chunk_non_main_arena(self): flag_str += "NON_MAIN_ARENA|" if len(flag_str) != 0: ret += " (" ret += flag_str ret += "\b)" ret += "\n{:11} = ".format("fd") ret += pu.color_value("{:#x}".format(self.fd)) return ret elif self.type == pt.chunk_type.FREE_SMALL: ret = "struct malloc_chunk @ " ret += "{:#x} ".format(self.address) ret += "{" ret += "\n{:11} = ".format("prev_size") ret += "{:#x}".format(self.prev_size) ret += "\n{:11} = ".format("size") ret += "{:#x}".format(self.size & ~self.ptm.SIZE_BITS) ret += " (" if self.ptm.prev_inuse(self): ret += "PREV_INUSE|" if self.ptm.chunk_is_mmapped(self): ret += "MMAPPED|" if self.ptm.chunk_non_main_arena(self): ret += "NON_MAIN_ARENA|" ret += "\b)" ret += "\n{:11} = ".format("fd") ret += pu.color_value("{:#x}".format(self.fd)) ret += "\n{:11} = ".format("bk") ret += pu.color_value("{:#x}".format(self.bk)) return ret elif self.type == pt.chunk_type.FREE_LARGE: title = "struct malloc_chunk @ 0x%x {" % self.address ret = pu.color_title(title) ret += "\n{:11} = ".format("prev_size") ret += pu.color_value("{:#x}".format(self.prev_size)) ret += "\n{:11} = ".format("size") ret += pu.color_value("{:#x}".format(self.size & ~self.ptm.SIZE_BITS)) ret += " (" if self.ptm.prev_inuse(self): ret += "PREV_INUSE|" if self.ptm.chunk_is_mmapped(self): ret += "MMAPPED|" if self.ptm.chunk_non_main_arena(self): ret += "NON_MAIN_ARENA|" ret += "\b)" ret += "\n{:11} = ".format("fd") ret += pu.color_value("{:#x}".format(self.fd)) ret += "\n{:11} = ".format("bk") ret += pu.color_value("{:#x}".format(self.bk)) ret += "\n{:11} = ".format("fd_nextsize") ret += pu.color_value("{:#x}".format(self.fd_nextsize)) ret += "\n{:11} = ".format("bk_nextsize") ret += pu.color_value("{:#x}".format(self.bk_nextsize)) return ret else: pu.print_error("Error: unknown hdr_size. Should not happen") return ""
def write(self, inferior=None): """Write tcache_perthread_struct's data into memory using debugger """ pu.print_error("tcache_perthread write() not yet implemented.")
def __init__(self, ptm, addr=None, mem=None, debugger=None, version=None): """ Parse tcache_perthread_struct's data and initialize the tcache_perthread object :param ptm: ptmalloc object :param addr: address for a tcache_perthread_struct where to read the structure's content from the debugger :param mem: alternatively to "addr", provides the memory bytes of that tcache_perthread_struct's content :param debugger: the pydbg object :param version: the glibc version """ super(tcache_perthread, self).__init__(ptm, debugger=debugger) self.size = 0 # sizeof(struct tcache_perthread_struct) self.counts = [] self.entries = [] if addr is None: if mem is None: pu.print_error("Please specify a struct tcache_perthread address") self.initOK = False return self.address = None else: self.address = addr if debugger is not None: self.dbg = debugger else: pu.print_error("Please specify a debugger") raise Exception("sys.exit()") if version is None: pu.print_error("Please specify a glibc version for tcache_perthread") raise Exception("sys.exit()") else: self.version = version if version <= 2.25: pu.print_error("tcache was added in glibc 2.26. Wrong version configured?") raise Exception("sys.exit()") if not self.ptm.is_tcache_enabled(): pu.print_error("tcache is configured as disabled. Wrong configuration?") raise Exception("sys.exit()") self.initialize_sizes_and_offsets() if mem is None: # a string of raw memory was not provided, let's read it from the debugger try: self.mem = self.dbg.read_memory(addr, self.size) except TypeError: pu.print_error("Invalid address specified") self.initOK = False return except RuntimeError: pu.print_error("Could not read address {0:#x}".format(addr)) self.initOK = False return else: if len(mem) < self.size: pu.print_error("Provided memory size is too small for a tcache_perthread") self.initOK = False return self.mem = mem[:self.size] self.unpack_memory()
def __init__(self, ptm, addr=None, mem=None, debugger=None, version=None): """ Parse malloc_state's data and initialize the malloc_state object :param ptm: ptmalloc object :param addr: address for a malloc_state where to read the structure's content from the debugger :param mem: alternatively to "addr", provides the memory bytes of that malloc_state's content :param debugger: the pydbg object :param version: the glibc version """ super(malloc_state, self).__init__(ptm, debugger=debugger) self.size = 0 # sizeof(struct malloc_state) # malloc_state structure's fields, in this order for easy lookup # Note: commented ones have been added at some point in glibc # so are not present in older glibc versions self.mutex = 0 self.flags = 0 # self.have_fastchunks = 0 # added in 2.27 self.fastbinsY = 0 self.top = 0 self.last_remainder = 0 self.bins = 0 self.binmap = 0 self.next = 0 self.next_free = 0 # self.attached_threads = 0 # added in 2.23 self.system_mem = 0 self.max_system_mem = 0 # helpers self.fastbins_offset = 0 self.bins_offset = 0 if addr is None: if mem is None: pu.print_error("Please specify a struct malloc_state address") self.initOK = False return self.address = None else: self.address = addr if debugger is not None: self.dbg = debugger else: pu.print_error("Please specify a debugger") raise Exception('sys.exit()') if version is None: pu.print_error("Please specify a glibc version for malloc_state") raise Exception('sys.exit()') else: self.version = version self.initialize_sizes_and_offsets() if mem is None: # a string of raw memory was not provided, let's read it from the debugger try: self.mem = self.dbg.read_memory(addr, self.size) except TypeError: pu.print_error("Invalid address specified") self.initOK = False return except RuntimeError: pu.print_error("Could not read address {0:#x}".format(addr)) self.initOK = False return else: if len(mem) < self.size: pu.print_error("Provided memory size is too small for a malloc_state") self.initOK = False return self.mem = mem[:self.size] self.unpack_memory()
def prepare_args_if_negative_count(self): """This is a little bit of a hack. The idea is to handle cases where the user wants to print N chunks going backwards. We are going to list all the chunks in the arena until we find all the addresses requested and then craft new arguments as if the user requested to print from new addresses N chunks before the requested addresses before calling parse_many2() """ self.args.reverse = False # Nothing to do if the count is positive or unlimited if self.args.count == None or self.args.count >= 0: return # We are making the count positive self.args.count = self.args.count*-1 # And we print N chunks before the requested chunk + the actual chunk self.args.count += 1 addresses = self.dbg.parse_address(self.args.addresses) if len(addresses) == 0: pu.print_error("WARNING: No valid address supplied") self.parser.print_help() return [] # We will fill it with new addresses later below self.args.addresses = [] # Let's get all the chunks' addresses in the arena mstate = self.cache.mstate par = self.cache.par if mstate.address == self.cache.main_arena_address: addr, _ = self.dbg.get_heap_address(par) else: print("Using manual arena calculation for heap start") addr = (mstate.address + mstate.size + self.ptm.MALLOC_ALIGN_MASK) & ~self.ptm.MALLOC_ALIGN_MASK chunks_addresses = [] chunks_addresses.append(addr) while True: p = mc.malloc_chunk( self.ptm, addr, read_data=False, debugger=self.dbg, use_cache=True ) if not p.initOK: pu.print_error("WARNING: Stopping due to invalid chunk parsed in arena") break chunks_addresses.append(addr) if p.address == self.ptm.top(self.cache.mstate): break addr = self.ptm.next_chunk(p) # Prepare arguments for "ptchunk" format # i.e. for every address, get the new address N chunks before for addr in addresses: try: index = chunks_addresses.index(addr) except ValueError: pu.print_error(f"WARNING: Could not find {addr:#x} in arena, skipping") continue index -= self.args.count if index < 0: pu.print_error(f"WARNING: Reaching beginning of arena with {addr:#x}") index = 0 self.args.addresses.append(f"{chunks_addresses[index]:#x}")
def parse_many2(self, inuse=None, tcache=None, fast=None, allow_invalid=False, separate_addresses_non_verbose=True, header_once=None, count_handle=None, count_printed=None, ): """Most arguments are shared by "ptchunk" and "ptlist" commands. This function allows for "ptlist" to call into "ptchunk" :param inuse: True if we know it is an inuse chunk (i.e. not in any bin) (not required) :param tcache: True if we know all the chunks are in the tcache bins, False if we know they are NOT in the tcache bins. None otherwise. Useful to specify when parsing a tcache bin :param fast: Same as "tcache" but for fast bins :param allow_invalid: sometimes these structures will be used for that isn't actually a complete chunk, like a freebin, in these cases we still wanted to be able to parse so that we can access the forward and backward pointers, so shouldn't complain about their being invalid size :param separate_addresses_non_verbose: False to avoid a separation when printing one-line chunks, like in freebins :param header_once: string to print before printing the first chunk, or None if not needed :param count_handle: maximum number of chunks to handle per address, even if not printed, or None if unlimited :param count_printed: maximum number of chunks to print in total for all addresses, or None if unlimited. Only useful if handling a freebin. :return: the list of malloc_chunk() found Note that it is a static method but it has self as a first argument to make it easier to read its implementation """ addresses = [] if not self.args.addresses: print("WARNING: No address supplied?") self.parser.print_help() return [] else: addresses = self.dbg.parse_address(self.args.addresses) if len(addresses) == 0: pu.print_error("WARNING: No valid address supplied") self.parser.print_help() return [] if self.args.hexdump_unit not in h.hexdump_units: pu.print_error("Wrong hexdump unit specified") self.parser.print_help() return [] hexdump_unit = self.args.hexdump_unit count = self.args.count search_depth = self.args.search_depth skip_header = self.args.skip_header print_offset = self.args.print_offset metadata = self.args.metadata verbose = self.args.verbose no_newline = self.args.no_newline debug = self.args.debug hexdump = self.args.hexdump maxbytes = self.args.maxbytes commands = self.args.commands address_offset = self.args.address_offset if self.args.search_type not in ptchunk.search_types: pu.print_error(f"Wrong search type specified {self.args.search_type}") self.parser.print_help() return [] if self.args.search_type != "string" and not self.args.search_value.startswith("0x"): pu.print_error("Wrong search value for specified type") self.parser.print_help() return [] search_value = self.args.search_value search_type = self.args.search_type match_only = self.args.match_only highlight_only = self.args.highlight_only highlight_addresses = [] if self.args.highlight_addresses: list_highlight_addresses = [e.strip() for e in self.args.highlight_addresses.split(",")] highlight_addresses = self.dbg.parse_address(list_highlight_addresses) if len(highlight_addresses) == 0: pu.print_error("WARNING: No valid address to highlight supplied") self.parser.print_help() return [] highlight_metadata = [] if self.args.highlight_metadata: highlight_metadata = [e.strip() for e in self.args.highlight_metadata.split(",")] # some commands inheriting ptchunk arguments don't support highlighting types try: highlight_types = self.args.highlight_types except AttributeError: highlight_types = None if highlight_types: highlight_types = [e.strip() for e in highlight_types.split(",")] for e in highlight_types: if e not in ["M", "F", "f", "t"]: pu.print_error("WARNING: Invalid type to highlight supplied") self.parser.print_help() return [] else: highlight_types = [] all_chunks = [] chunks = None for address in addresses: if chunks is not None and len(chunks) > 0 and \ (separate_addresses_non_verbose or verbose > 0): print("-" * 60) if count_printed == None: count_linear = count elif count == None: count_linear = count_printed else: count_linear = min(count_printed, count) chunks = ptchunk.parse_many( address, self.ptm, self.dbg, count_linear, count_handle, search_depth, skip_header, hexdump_unit, search_value, search_type, match_only, print_offset, verbose, no_newline, debug, hexdump, maxbytes, metadata, highlight_types=highlight_types, highlight_addresses=highlight_addresses, highlight_metadata=highlight_metadata, highlight_only=highlight_only, inuse=inuse, tcache=tcache, fast=fast, allow_invalid=allow_invalid, header_once=header_once, commands=commands, use_cache=True, # we enforced updating the cache once above so no need to do it for every chunk address_offset=address_offset ) if chunks is not None and len(chunks) > 0: all_chunks.extend(chunks) if count_printed != None: count_printed -= len(chunks) header_once = None if count_printed == 0: break return all_chunks
def unpack_memory(self): """Actually parse all the malloc_state's fields from the memory bytes (previously retrieved) """ if self.mem is None: pu.print_error("No memory found") raise Exception('sys.exit()') self.mutex = self.unpack_variable("<I", 0) self.flags = self.unpack_variable("<I", 4) offset = 8 if self.version >= 2.27: # have_fastchunks added in 2.27 if self.size_sz == 4: fmt = "<I" elif self.size_sz == 8: fmt = "<Q" # this is padded on 64-bit despite being int self.have_fastchunks = self.unpack_variable(fmt, offset) offset = offset + self.size_sz if self.size_sz == 4: fmt = "<10I" elif self.size_sz == 8: fmt = "<10Q" self.fastbinsY = struct.unpack_from(fmt, self.mem, offset) offset = offset + 10 * self.size_sz if self.version >= 2.27: if self.size_sz == 4: # hax, empiric: +4 for padding added after fastbinsY[] offset += 4 if self.size_sz == 4: fmt = "<I" elif self.size_sz == 8: fmt = "<Q" self.top = self.unpack_variable(fmt, offset) offset += self.size_sz self.last_remainder = self.unpack_variable(fmt, offset) offset = offset + self.size_sz if self.size_sz == 4: fmt = "<254I" elif self.size_sz == 8: fmt = "<254Q" self.bins = struct.unpack_from(fmt, self.mem, offset) offset = offset + (254 * self.size_sz) self.binmap = struct.unpack_from("<IIII", self.mem, offset) offset = offset + 16 if self.size_sz == 4: fmt = "<I" elif self.size_sz == 8: fmt = "<Q" self.next = self.unpack_variable(fmt, offset) offset = offset + self.size_sz self.next_free = self.unpack_variable(fmt, offset) offset = offset + self.size_sz if self.version >= 2.23: # attached_threads added in 2.23 self.attached_threads = self.unpack_variable(fmt, offset) offset = offset + self.size_sz self.system_mem = self.unpack_variable(fmt, offset) offset = offset + self.size_sz self.max_system_mem = self.unpack_variable(fmt, offset)
def __init__( self, ptm, addr=None, mem=None, size=None, inuse=None, tcache=None, fast=None, read_data=True, # XXX - actually use that argument debugger=None, allow_invalid=False, use_cache=False, ): """ Parse chunk's data and initialize the malloc_chunk object :param ptm: ptmalloc object :param addr: chunk address where to read the chunk's content from the debugger :param mem: alternatively to "addr", provides the memory bytes of that chunk's content :param size: provide the chunk's size if you know it (not required) :param inuse: True if we know it is an inuse chunk (i.e. not in any bin) (not required) :param tcache: True if we know it is a chunk in the tcache bins, False if we know it is NOT in the tcache bins. None otherwise. Whenever possible, specify it as otherwise it will try to search for it in the tcache array which is slower :param fast: Same as "tcache" but for fast bins :param read_data: XXX :param debugger: the pydbg object :param allow_invalid: sometimes these structures will be used for that isn't actually a complete chunk, like a freebin, in these cases we still wanted to be able to parse so that we can access the forward and backward pointers, so shouldn't complain about their being invalid size :param use_cache: True if we want to use the cached information from the cache object. False if we want to fetch the data again """ super(malloc_chunk, self).__init__(ptm, debugger=debugger) if not self.initOK: return if fast is True and tcache is True: raise Exception("Can't be fast and tcache at the same time") if fast is True: tcache = False if tcache is True: fast = False self.prev_size = 0 self.size = 0 self.data = None # tcache specific # Note: In glibc source, it is part of another structure (tcache_entry) but is simpler # to just have it tracked in the malloc_chunk object self.next = None self.key = None # free specific self.fd = None self.bk = None # large blocks specific + free specific self.fd_nextsize = None self.bk_nextsize = None # actual chunk flags self.cinuse_bit = 0 # general indicator if we are inuse # XXX - is redondant with self.type so need to get rid of it self.inuse = inuse self.data_address = None self.hdr_size = 0 self.mem = mem self.from_mem = False self.is_top = False self.type = None # ptmalloc.chunk_type cache = self.ptm.cache if not self.validate_address(addr): return log.info("malloc_chunk(): self.address = 0x%x" % self.address) if self.dbg is None and mem is None: pu.print_error("no active debugger and no memory specified") return self.SIZE_SZ = self.ptm.SIZE_SZ if mem is None: # a string of raw memory was not provided try: mem = self.dbg.read_memory(addr, self.ptm.INUSE_HDR_SZ) except TypeError: pu.print_error("Invalid address specified") self.initOK = False return except RuntimeError: pu.print_error("Could not read address {0:#x}".format(addr)) self.initOK = False return else: self.from_mem = True # a string of raw memory was provided if self.inuse: if len(mem) < self.ptm.INUSE_HDR_SZ: pu.print_error("Insufficient mem provided for malloc_chunk.") self.initOK = False return # header only provided elif len(mem) == self.ptm.INUSE_HDR_SZ: read_data = False elif len(mem) < self.ptm.FREE_HDR_SZ: pu.print_error("Insufficient memory provided for a free chunk.") self.initOK = False return if self.SIZE_SZ == 4: (self.prev_size, self.size) = struct.unpack_from("<II", mem, 0x0) elif self.SIZE_SZ == 8: (self.prev_size, self.size) = struct.unpack_from("<QQ", mem, 0x0) if not allow_invalid and self.size == 0: pu.print_error("chunk with zero size detected at 0x%x" % self.address) self.initOK = False return # XXX - add support for seeing if mem has enough space # XXX - check of the computed address goes outside of the arena # boundary instead of just accepting some bad chunk if self.size != 0 and self.dbg: # read next chunk size field to determine if current chunk is inuse if size is None: nextchunk_addr = self.address + (self.size & ~self.ptm.SIZE_BITS) else: nextchunk_addr = self.address + (size & ~self.ptm.SIZE_BITS) if cache.mstate and self.address == self.ptm.top(cache.mstate): self.cinuse_bit = 0 self.is_top = True else: nextchunk_error = False try: mem2 = self.dbg.read_memory( nextchunk_addr + self.ptm.SIZE_SZ, self.ptm.SIZE_SZ ) # except gdb.MemoryError: except Exception: if not allow_invalid: #print("self.address: 0x%x" % self.address) pu.print_error( "Could not read nextchunk (@0x%x) size. Invalid chunk address?" % nextchunk_addr ) self.initOK = False return nextchunk_error = True if not nextchunk_error: if self.ptm.SIZE_SZ == 4: nextchunk_size = struct.unpack_from("<I", mem2, 0x0)[0] elif self.ptm.SIZE_SZ == 8: nextchunk_size = struct.unpack_from("<Q", mem2, 0x0)[0] self.cinuse_bit = nextchunk_size & self.ptm.PREV_INUSE if fast is None: # XXX - if too slow, we could specify the bin size? fast = ptfast.ptfast.is_in_fastbin(self.address, self.ptm, dbg=self.dbg, use_cache=use_cache) else: # Trust the caller is right pass log.debug(f"fast = {str(fast)}") if tcache is None: # XXX - if too slow, we could specify the bin size? tcache = pttcache.pttcache.is_in_tcache(self.address, self.ptm, dbg=self.dbg, use_cache=use_cache) else: # Trust the caller is right pass log.debug(f"tcache = {str(tcache)}") # decide if chunk is actually inuse if inuse is None: if self.cinuse_bit and not fast and not tcache: self.inuse = True else: self.inuse = False else: # Trust the caller is right self.inuse = inuse # now that we know the size and if it is inuse/freed, we can determine # the chunk type and though the chunk header size if self.inuse is True: self.type = pt.chunk_type.INUSE else: if size is None: if fast is True: self.type = pt.chunk_type.FREE_FAST elif tcache is True: self.type = pt.chunk_type.FREE_TCACHE elif self.ptm.in_smallbin_range(self.size): self.type = pt.chunk_type.FREE_SMALL else: self.type = pt.chunk_type.FREE_LARGE else: # Trust the caller size if self.ptm.in_smallbin_range(size): self.type = pt.chunk_type.FREE_SMALL else: self.type = pt.chunk_type.FREE_LARGE log.debug(f"self.hdr_size = {self.hdr_size:#x}") # parse additional fields in chunk header depending on type # fastbins freed follows if self.type == pt.chunk_type.INUSE: self.hdr_size = self.ptm.INUSE_HDR_SZ elif self.type == pt.chunk_type.FREE_FAST: self.hdr_size = self.ptm.FREE_FASTCHUNK_HDR_SZ if self.address is not None: # a string of raw memory was not provided if self.dbg is not None: if self.ptm.SIZE_SZ == 4: mem = self.dbg.read_memory( self.address, self.ptm.FREE_FASTCHUNK_HDR_SZ ) elif self.ptm.SIZE_SZ == 8: mem = self.dbg.read_memory( self.address, self.ptm.FREE_FASTCHUNK_HDR_SZ ) if self.ptm.SIZE_SZ == 4: self.fd = struct.unpack_from("<I", mem, self.ptm.INUSE_HDR_SZ)[0] elif self.ptm.SIZE_SZ == 8: self.fd = struct.unpack_from("<Q", mem, self.ptm.INUSE_HDR_SZ)[0] # tcache follows elif self.type == pt.chunk_type.FREE_TCACHE: self.hdr_size = self.ptm.FREE_TCACHE_HDR_SZ if self.address is not None: # a string of raw memory was not provided if self.dbg is not None: if self.ptm.SIZE_SZ == 4: mem = self.dbg.read_memory( self.address, self.ptm.FREE_TCACHE_HDR_SZ ) elif self.ptm.SIZE_SZ == 8: mem = self.dbg.read_memory( self.address, self.ptm.FREE_TCACHE_HDR_SZ ) if self.ptm.SIZE_SZ == 4: (self.next, self.key) = struct.unpack_from( "<II", mem, self.ptm.INUSE_HDR_SZ ) elif self.ptm.SIZE_SZ == 8: (self.next, self.key) = struct.unpack_from( "<QQ", mem, self.ptm.INUSE_HDR_SZ ) # smallbin freed follows elif self.type == pt.chunk_type.FREE_SMALL: self.hdr_size = self.ptm.FREE_HDR_SZ if self.address is not None: # a string of raw memory was not provided if self.dbg is not None: if self.ptm.SIZE_SZ == 4: mem = self.dbg.read_memory( self.address, self.ptm.FREE_HDR_SZ ) elif self.ptm.SIZE_SZ == 8: mem = self.dbg.read_memory( self.address, self.ptm.FREE_HDR_SZ ) if self.ptm.SIZE_SZ == 4: (self.fd, self.bk) = struct.unpack_from( "<II", mem, self.ptm.INUSE_HDR_SZ ) elif self.ptm.SIZE_SZ == 8: (self.fd, self.bk) = struct.unpack_from( "<QQ", mem, self.ptm.INUSE_HDR_SZ ) # largebin freed freed follows elif self.type == pt.chunk_type.FREE_LARGE: self.hdr_size = self.ptm.FREE_LARGE_HDR_SZ if self.address is not None: # a string of raw memory was not provided if self.dbg is not None: if self.ptm.SIZE_SZ == 4: mem = self.dbg.read_memory( self.address, self.ptm.FREE_LARGE_HDR_SZ ) elif self.ptm.SIZE_SZ == 8: mem = self.dbg.read_memory( self.address, self.ptm.FREE_LARGE_HDR_SZ ) if self.ptm.SIZE_SZ == 4: ( self.fd, self.bk, self.fd_nextsize, self.bk_nextsize, ) = struct.unpack_from("<IIII", mem, self.ptm.INUSE_HDR_SZ) elif self.ptm.SIZE_SZ == 8: ( self.fd, self.bk, self.fd_nextsize, self.bk_nextsize, ) = struct.unpack_from("<QQQQ", mem, self.ptm.INUSE_HDR_SZ) # keep track where the data follows if self.address is not None: self.data_address = self.address + self.hdr_size log.debug(f"self.data_address = {self.data_address:#x}")
def parse_many(address, ptm, dbg=None, count=1, count_handle=None, search_depth=0, skip_header=False, hexdump_unit=1, search_value=None, search_type=None, match_only=False, print_offset=0, verbose=0, no_newline=False, debug=False, hexdump=False, maxbytes=0, metadata=None, highlight_types=[], highlight_addresses=[], highlight_metadata=[], highlight_only=False, inuse=None, tcache=None, fast=None, allow_invalid=False, header_once=None, commands=None, use_cache=False, address_offset=False ): """Parse many chunks starting from a given address and show them based passed arguments :param address: chunk's address to start parsing from :param ptm: ptmalloc object (libptmalloc constants and helpers) :param dbg: pydbg object (debugger interface) :param count: see ptchunk's ArgumentParser definition maximum number of chunks to print, or None if unlimited :param count_handle: maximum number of chunks to handle per address, even if not printed, or None if unlimited :param search_depth: see ptchunk's ArgumentParser definition :param skip_header: see ptchunk's ArgumentParser definition :param hexdump_unit: see ptchunk's ArgumentParser definition :param search_value: see ptchunk's ArgumentParser definition :param search_type: see ptchunk's ArgumentParser definition :param match_only: see ptchunk's ArgumentParser definition :param print_offset: see ptchunk's ArgumentParser definition :param verbose: see ptchunk's ArgumentParser definition :param no_newline: see ptchunk's ArgumentParser definition :param debug: see ptchunk's ArgumentParser definition :param hexdump: see ptchunk's ArgumentParser definition :param maxbytes: see ptchunk's ArgumentParser definition :param metadata: see ptchunk's ArgumentParser definition :param highlight_types: list of types. highlight chunks with matching type with a '*' e.g. to be used by 'ptlist' :param highlight_addresses: list of addresses. highlight chunks with matching address with a '*' e.g. to be used by 'ptlist' :param highlight_metadata: list of metadata. highlight chunks with matching metadata with a '*' e.g. to be used by 'ptlist' :param highlight_only: see ptchunk's ArgumentParser definition :param inuse: True if we know all the chunks are inuse (i.e. not in any bin) False if we know they are NOT in inuse. None otherwise. Useful to specify when parsing a regular bin :param tcache: True if we know all the chunks are in the tcache bins, False if we know they are NOT in the tcache bins. None otherwise. Useful to specify when parsing a tcache bin :param fast: Same as "tcache" but for fast bins :param allow_invalid: sometimes these structures will be used for that isn't actually a complete chunk, like a freebin, in these cases we still wanted to be able to parse so that we can access the forward and backward pointers, so shouldn't complain about their being invalid size :param header_once: string to print before printing the first chunk, or None if not needed :param commands: see ptchunk's ArgumentParser definition :param use_cache: see ptchunk's ArgumentParser definition :param address_offset: see ptchunk's ArgumentParser definition :return: the list of malloc_chunk being parsed and already shown """ chunks = [] highlight_types2 = [] highlight_types = set(highlight_types) for t in highlight_types: if t == "M": highlight_types2.append(pt.chunk_type.INUSE) elif t == "F": highlight_types2.append(pt.chunk_type.FREE_SMALL) highlight_types2.append(pt.chunk_type.FREE_LARGE) elif t == "f": highlight_types2.append(pt.chunk_type.FREE_FAST) elif t == "t": highlight_types2.append(pt.chunk_type.FREE_TCACHE) else: print("ERROR: invalid chunk type provided, should not happen") return [] highlight_addresses = set(highlight_addresses) highlight_metadata = set(highlight_metadata) highlight_metadata_found = set([]) p = mc.malloc_chunk( ptm, addr=address, debugger=dbg, use_cache=use_cache, tcache=tcache, fast=fast, allow_invalid=allow_invalid ) if not p.initOK: return first_address = p.address dump_offset = 0 while True: prefix = "" # used for one-line output suffix = "" # used for one-line output epilog = "" # used for verbose output colorize_func = str # do not colorize by default if metadata is not None: opened = False list_metadata = [e.strip() for e in metadata.split(",")] L, s, e, colorize_func = ptmeta.get_metadata(p.address, list_metadata=list_metadata) suffix += s epilog += e p.metadata = L # save so we can easily export to json later if search_value is not None: if not dbg.search_chunk( ptm, p, search_value, search_type=search_type, depth=search_depth, skip=skip_header ): found_match = False suffix += " [NO MATCH]" else: suffix += pu.light_green(" [MATCH]") found_match = True # XXX - the current representation is not really generic as we print the first short # as an ID and the second 2 bytes as 2 characters. We may want to support passing the # format string as an argument but this is already useful if print_offset != 0: mem = dbg.read_memory( p.data_address + print_offset, 4 ) (id_, desc) = struct.unpack_from("<H2s", mem, 0x0) if h.is_ascii(desc): suffix += " 0x%04x %s" % (id_, str(desc, encoding="utf-8")) else: suffix += " 0x%04x hex(%s)" % ( id_, str(binascii.hexlify(desc), encoding="utf-8"), ) # Only print the chunk type for non verbose if p.address == ptm.cache.par.sbrk_base: suffix += " (sbrk_base)" elif p.address == ptm.top(ptm.cache.mstate): suffix += " (top)" printed = False if verbose == 0: found_highlight = False # Only highlight chunks for non verbose if p.address in highlight_addresses: found_highlight = True highlight_addresses.remove(p.address) if p.type in highlight_types2: found_highlight = True if len(highlight_metadata) > 0: # We retrieve all metadata since we want to highlight chunks containing any of the # metadata, even if we don't show some of the metadata _, s, _, _ = ptmeta.get_metadata(p.address, list_metadata="all") for m in highlight_metadata: # we check in the one-line output as it should have less non-useful information if m in s: found_highlight = True highlight_metadata_found.add(m) if found_highlight: prefix += "* " if (not highlight_only or found_highlight) \ and (not match_only or found_match): if header_once != None: print(header_once) header_once = None if no_newline: print(prefix + ptm.chunk_info(p, colorize_func=colorize_func, first_address=first_address, address_offset=address_offset) + suffix, end="") else: print(prefix + ptm.chunk_info(p, colorize_func=colorize_func, first_address=first_address, address_offset=address_offset) + suffix) printed = True elif verbose >= 1 and (not match_only or found_match): if header_once != None: print(header_once) header_once = None print(p) printed = True # XXX - this is old code used in Cisco ASA. Need removal or merge? if ptm.ptchunk_callback is not None: size = ptm.chunksize(p) - p.hdr_size if p.data_address is not None: # We can provide an excess of information and the # callback can choose what to use cbinfo = {} cbinfo["caller"] = "ptchunk" cbinfo["allocator"] = "ptmalloc" cbinfo["addr"] = p.data_address cbinfo["hdr_sz"] = p.hdr_size cbinfo["chunksz"] = ptm.chunksize(p) cbinfo["min_hdr_sz"] = ptm.INUSE_HDR_SZ cbinfo["data_size"] = size cbinfo["inuse"] = p.inuse cbinfo["size_sz"] = ptm.SIZE_SZ if debug: cbinfo["debug"] = True print(cbinfo) # We expect callback to tell us how much data it # 'consumed' in printing out info dump_offset = ptm.ptchunk_callback(cbinfo) # mem-based callbacks not yet supported if printed: if hexdump: dbg.print_hexdump_chunk(ptm, p, maxlen=maxbytes, off=dump_offset, unit=hexdump_unit, verbose=verbose) if verbose >= 1 and epilog: print(epilog, end="") if commands: for command in commands.split(";"): formatted_command = command.replace("@", f"{p.address:#x}") print(dbg.execute(formatted_command)) chunks.append(p) if count != None: count -= 1 if count_handle != None: count_handle -= 1 if count != 0 and count_handle != 0: if printed and (verbose >= 1 or hexdump): print("--") if p.is_top: # Only print the chunk type for non verbose if verbose == 0: if ptm.cache.mstate.address == ptm.cache.main_arena_address: start = ptm.cache.par.sbrk_base else: # XXX - seems mstate is at offset 0x20 so there is 0x10 unknown bytes and 0x10 bytes for the chunk # header holding the mstate. So aligning to page works? start = ((ptm.cache.mstate.address & ~0xfff) + ptm.MALLOC_ALIGN_MASK) & ~ptm.MALLOC_ALIGN_MASK end = int(start + ptm.cache.mstate.max_system_mem) if address_offset is True: end -= first_address if ptm.cache.mstate.address == ptm.cache.main_arena_address: print("{:#x}".format(end), end="") print(" (sbrk_end)") else: print("{:#x}".format(end)) else: print("Stopping due to end of heap") break p = mc.malloc_chunk( ptm, addr=(p.address + ptm.chunksize(p)), debugger=dbg, use_cache=use_cache, tcache=tcache, fast=fast, allow_invalid=allow_invalid ) if not p.initOK: break else: break if len(highlight_addresses) != 0: pu.print_error("WARNING: Could not find these chunk addresses: %s" % (", ".join(["0x%x" % x for x in highlight_addresses]))) if len(highlight_metadata-highlight_metadata_found) != 0: pu.print_error("WARNING: Could not find these metadata: %s" % (", ".join(list(highlight_metadata-highlight_metadata_found)))) return chunks
def _gdb_is_running(*args, **kwargs): if gdb.selected_thread() is not None: return f(*args, **kwargs) else: pu.print_error("GDB is not running.")