def thread_cache(self): tcache_addr = pwndbg.symbol.address('tcache') # The symbol.address returns ptr to ptr to tcache struct, as in: # pwndbg> p &tcache # $1 = (tcache_perthread_struct **) 0x7ffff7fd76f0 # so we need to dereference it if tcache_addr is not None: tcache_addr = pwndbg.memory.pvoid(tcache_addr) if tcache_addr is None: tcache_addr = self._fetch_tcache_addr() if tcache_addr is not None: try: self._thread_cache = pwndbg.memory.poi(self.tcache_perthread_struct, tcache_addr) _ = self._thread_cache['entries'].fetch_lazy() except Exception as e: print(message.error('Error fetching tcache. GDB cannot access ' 'thread-local variables unless you compile with -lpthread.')) return None else: if not self.has_tcache(): print(message.warn('Your libc does not use thread cache')) return None print(message.error('Symbol \'tcache\' not found. Try installing libc ' 'debugging symbols and try again.')) return self._thread_cache
def probeleak(address=None, count=0x40, max_distance=0x0): address = int(address) address &= pwndbg.arch.ptrmask ptrsize = pwndbg.arch.ptrsize count = max(int(count), ptrsize) off_zeros = int(math.ceil(math.log(count,2)/4)) if count > address > 0x10000: # in case someone puts in an end address and not a count (smh) print(message.warn("Warning: you gave an end address, not a count. Substracting 0x%x from the count." % (address))) count -= address try: data = pwndbg.memory.read(address, count, partial=True) except gdb.error as e: print(message.error(str(e))) return if not data: print(message.error("Couldn't read memory at 0x%x. See 'probeleak -h' for the usage." % (address,))) return found = False for i in range(0, len(data) - ptrsize + 1): p = pwndbg.arch.unpack(data[i:i+ptrsize]) page = find_module(p, max_distance) if page: if not found: print(M.legend()) found = True mod_name = page.objfile if not mod_name: mod_name = '[anon]' if p >= page.end: right_text = '(%s) %s + 0x%x + 0x%x (outside of the page)' % (page.permstr, mod_name, page.memsz, p - page.end) elif p < page.start: right_text = '(%s) %s - 0x%x (outside of the page)' % (page.permstr, mod_name, page.start - p) else: right_text = '(%s) %s + 0x%x' % (page.permstr, mod_name, p - page.start) offset_text = '0x%0*x' % (off_zeros, i) p_text = '0x%0*x' % (int(ptrsize*2), p) text = '%s: %s = %s' % (offset_text, M.get(p, text=p_text), M.get(p, text=right_text)) symbol = pwndbg.symbol.get(p) if symbol: text += ' (%s)' % symbol print(text) if not found: print(message.hint('No leaks found at 0x%x-0x%x :(' % (address, address+count)))
def print_symbols_in_section(section_name, filter_text=''): start, end = get_section_bounds(section_name) if start is None: print(message.error('Could not find section')) return symbols = get_symbols_in_region(start, end, filter_text) if not symbols: print(message.error('No symbols found in section %s' % section_name)) for symbol, addr in symbols: print(hex(int(addr)) + ': ' + symbol)
def main_arena(self): main_arena_addr = pwndbg.symbol.address('main_arena') if main_arena_addr is not None: self._main_arena = pwndbg.memory.poi(self.malloc_state, main_arena_addr) else: print(message.error('Symbol \'main_arena\' not found. Try installing libc ' 'debugging symbols and try again.')) return self._main_arena
def arenas(self): arena = self.main_arena arenas = [] arena_cnt = 0 main_arena_addr = int(arena.address) sbrk_page = self.get_heap_boundaries().vaddr # Create the main_arena with a fake HeapInfo main_arena = Arena(main_arena_addr, [HeapInfo(sbrk_page, sbrk_page)]) arenas.append(main_arena) # Iterate over all the non-main arenas addr = int(arena['next']) while addr != main_arena_addr: heaps = [] arena = self.get_arena(addr) arena_cnt += 1 # Get the first and last element on the heap linked list of the arena last_heap_addr = heap_for_ptr(int(arena['top'])) first_heap_addr = heap_for_ptr(addr) heap = self.get_heap(last_heap_addr) if not heap: print(message.error('Could not find the heap for arena %s' % hex(addr))) return # Iterate over the heaps of the arena haddr = last_heap_addr while haddr != 0: if haddr == first_heap_addr: # The first heap has a heap_info and a malloc_state before the actual chunks chunks_offset = self.heap_info.sizeof + self.malloc_state.sizeof else: # The others just chunks_offset = self.heap_info.sizeof heaps.append(HeapInfo(haddr, haddr + chunks_offset)) # Name the heap mapping, so that it can be colored properly. Note that due to the way malloc is # optimized, a vm mapping may contain two heaps, so the numbering will not be exact. page = self.get_region(haddr) page.objfile = '[heap %d:%d]' % (arena_cnt, len(heaps)) heap = self.get_heap(haddr) haddr = int(heap['prev']) # Add to the list of arenas and move on to the next one arenas.append(Arena(addr, tuple(reversed(heaps)))) addr = int(arena['next']) arenas = tuple(arenas) self._arenas = arenas return arenas
def format_bin(bins, verbose=False, offset=None): main_heap = pwndbg.heap.current if offset is None: offset = main_heap.chunk_key_offset('fd') result = [] bins_type = bins.pop('type') for size in bins: b = bins[size] count, is_chain_corrupted = None, False # fastbins consists of only single linked list if bins_type == 'fastbins': chain_fd = b # tcachebins consists of single linked list and entries count elif bins_type == 'tcachebins': chain_fd, count = b # normal bins consists of double linked list and may be corrupted (we can detect corruption) else: # normal bin chain_fd, chain_bk, is_chain_corrupted = b if not verbose and (chain_fd == [0] and not count) and not is_chain_corrupted: continue formatted_chain = pwndbg.chain.format(chain_fd[0], offset=offset) if isinstance(size, int): size = hex(size) if is_chain_corrupted: line = message.hint(size) + message.error(' [corrupted]') + '\n' line += message.hint('FD: ') + formatted_chain + '\n' line += message.hint('BK: ') + pwndbg.chain.format( chain_bk[0], offset=main_heap.chunk_key_offset('bk')) else: if count is not None: line = (message.hint(size) + message.hint(' [%3d]' % count) + ': ').ljust(13) else: line = (message.hint(size) + ': ').ljust(13) line += formatted_chain result.append(line) if not result: result.append(message.hint('empty')) return result
def thread_cache(self): tcache_addr = pwndbg.symbol.address('tcache') # The symbol.address returns ptr to ptr to tcache struct, as in: # pwndbg> p &tcache # $1 = (tcache_perthread_struct **) 0x7ffff7fd76f0 # so we need to dereference it if tcache_addr is not None: tcache_addr = pwndbg.memory.pvoid(tcache_addr) if tcache_addr is None: tcache_addr = self._fetch_tcache_addr() if tcache_addr is not None: try: self._thread_cache = pwndbg.memory.poi( self.tcache_perthread_struct, tcache_addr) _ = self._thread_cache['entries'].fetch_lazy() except Exception as e: print( message.error( 'Error fetching tcache. GDB cannot access ' 'thread-local variables unless you compile with -lpthread.' )) return None else: if not self.has_tcache(): print(message.warn('Your libc does not use thread cache')) return None print( message.error( 'Symbol \'tcache\' not found. Try installing libc ' 'debugging symbols and try again.')) return self._thread_cache
def addCustomOutput(function): ## can add as many number of custom outputs. shouldn't restrict it. global custom_output_functions if function.__name__ in custom_output_functions: print( message.error("there is already a custom output with the name %s" % function.__name__)) return custom_output_functions[function.__name__] = function config_custom_context.value = config_custom_context.value + " %s" % function.__name__ print("added " + function.__name__) if "user_custom" not in config_context_sections.value: config_context_sections.value = config_context_sections.value + " user_custom"
def breakrva(offset=0, module=None): offset = int(offset) if not module: # Note: we do not use `pwndbg.file.get_file(module)` here as it is not needed. # (as we do need the actual path that is in vmmap, not the file itself) module = get_exe_name() addr = translate_addr(offset, module) if addr is not None: spec = "*%#x" % (addr) gdb.Breakpoint(spec) else: print( message.error( "Could not determine rebased breakpoint address on current target" ))
def format_bin(bins, verbose=False, offset=None): main_heap = pwndbg.heap.current if offset is None: offset = main_heap.chunk_key_offset('fd') result = [] bins_type = bins.pop('type') for size in bins: b = bins[size] count, is_chain_corrupted = None, False # fastbins consists of only single linked list if bins_type == 'fastbins': chain_fd = b # tcachebins consists of single linked list and entries count elif bins_type == 'tcachebins': chain_fd, count = b # normal bins consists of double linked list and may be corrupted (we can detect corruption) else: # normal bin chain_fd, chain_bk, is_chain_corrupted = b if not verbose and (chain_fd == [0] and not count) and not is_chain_corrupted: continue formatted_chain = pwndbg.chain.format(chain_fd[0], offset=offset) if isinstance(size, int): size = hex(size) if is_chain_corrupted: line = message.hint(size) + message.error(' [corrupted]') + '\n' line += message.hint('FD: ') + formatted_chain + '\n' line += message.hint('BK: ') + pwndbg.chain.format(chain_bk[0], offset=main_heap.chunk_key_offset('bk')) else: if count is not None: line = (message.hint(size) + message.hint(' [%3d]' % count) + ': ').ljust(13) else: line = (message.hint(size) + ': ').ljust(13) line += formatted_chain result.append(line) if not result: result.append(message.hint('empty')) return result
def arenas(): """ Prints out allocated arenas """ heap = pwndbg.heap.current addr = None arena = heap.get_arena(addr) main_arena_addr = int(arena.address) fmt = '[%%%ds]' % (pwndbg.arch.ptrsize *2) while addr != main_arena_addr: h = heap.get_region(addr) if not h: print(message.error('Could not find the heap')) return hdr = message.hint(fmt % (hex(addr) if addr else 'main')) print(hdr, M.heap(str(h))) addr = int(arena['next']) arena = heap.get_arena(addr)
def context_ghidra(target=sys.stdout, with_banner=True, width=None): """ Print out the source of the current function decompiled by ghidra. The context-ghidra config parameter is used to configure whether to always, never or only show the context if no source is available. """ banner = [pwndbg.ui.banner("ghidra decompile", target=target, width=width)] if with_banner else [] if config_context_ghidra == "never": return [] if config_context_ghidra == "if-no-source": source_filename = pwndbg.symbol.selected_frame_source_absolute_filename() if source_filename and os.path.exists(source_filename): return [] try: return banner + pwndbg.ghidra.decompile().split('\n') except Exception as e: return banner + [message.error(e)]
def canary(): global_canary, at_random = canary_value() if global_canary is None or at_random is None: print(message.error("Couldn't find AT_RANDOM - can't display canary.")) return print(message.notice("AT_RANDOM = %#x # points to (not masked) global canary value" % at_random)) print(message.notice("Canary = 0x%x" % global_canary)) stack_canaries = list( pwndbg.search.search(pwndbg.arch.pack(global_canary), mappings=pwndbg.stack.stacks.values()) ) if not stack_canaries: print(message.warn('No valid canaries found on the stacks.')) return print(message.success('Found valid canaries on the stacks:')) for stack_canary in stack_canaries: pwndbg.commands.telescope.telescope(address=stack_canary, count=1)
def break_on_program_code(): """ Breaks on next instruction that belongs to process' objfile code. :return: True for success, False when process ended or when pc is at the code. """ mp = pwndbg.proc.mem_page start = mp.start end = mp.end if start <= pwndbg.regs.pc < end: print(message.error("The pc is already at the binary objfile code. Not stepping.")) return False while pwndbg.proc.alive: gdb.execute("si", from_tty=False, to_string=False) addr = pwndbg.regs.pc if start <= addr < end: return True return False
def break_on_program_code(): """ Breaks on next instruction that belongs to process' objfile code. :return: True for success, False when process ended or when pc is at the code. """ mp = pwndbg.proc.mem_page start = mp.start end = mp.end if start <= pwndbg.regs.pc < end: print(message.error('The pc is already at the binary objfile code. Not stepping.')) return False while pwndbg.proc.alive: gdb.execute('si', from_tty=False, to_string=False) addr = pwndbg.regs.pc if start <= addr < end: return True return False
def canary(): global_canary, at_random = canary_value() if global_canary is None or at_random is None: print(message.error("Couldn't find AT_RANDOM - can't display canary.")) return print(message.notice("AT_RANDOM = %#x # points to (not masked) global canary value" % at_random)) print(message.notice("Canary = 0x%x (may be incorrect on != glibc)" % global_canary)) stack_canaries = list( pwndbg.search.search(pwndbg.arch.pack(global_canary), mappings=pwndbg.stack.stacks.values()) ) if not stack_canaries: print(message.warn('No valid canaries found on the stacks.')) return print(message.success('Found valid canaries on the stacks:')) for stack_canary in stack_canaries: pwndbg.commands.telescope.telescope(address=stack_canary, count=1)
def handle(name='Error'): """Displays an exception to the user, optionally displaying a full traceback and spawning an interactive post-moretem debugger. Notes: - ``set exception-verbose on`` enables stack traces. - ``set exception-debugger on`` enables the post-mortem debugger. """ # This is for unit tests so they fail on exceptions instead of displaying them. if getattr(sys, '_pwndbg_unittest_run', False) is True: E, V, T = sys.exc_info() e = E(V) e.__traceback__ = T raise e # Display the error if debug or verbose: exception_msg = traceback.format_exc() print(exception_msg) inform_report_issue(exception_msg) else: exc_type, exc_value, exc_traceback = sys.exc_info() print( message.error('Exception occured: {}: {} ({})'.format( name, exc_value, exc_type))) print( message.notice('For more info invoke `') + message.hint('set exception-verbose on') + message.notice( '` and rerun the command\nor debug it by yourself with `') + message.hint('set exception-debugger on') + message.notice('`')) # Break into the interactive debugger if debug: with pwndbg.stdio.stdio: pdb.post_mortem()
def got(name_filter=""): relro_status = pwndbg.wrappers.checksec.relro_status() pie_status = pwndbg.wrappers.checksec.pie_status() jmpslots = list(pwndbg.wrappers.readelf.get_jmpslots()) if not len(jmpslots): print(message.error("NO JUMP_SLOT entries available in the GOT")) return if "PIE enabled" in pie_status: bin_base = pwndbg.elf.exe().address relro_color = message.off if "Partial" in relro_status: relro_color = message.warn elif "Full" in relro_status: relro_color = message.on print( "\nGOT protection: %s | GOT functions: %d\n " % (relro_color(relro_status), len(jmpslots)) ) for line in jmpslots: address, info, rtype, value, name = line.split()[:5] if name_filter not in name: continue address_val = int(address, 16) if ( "PIE enabled" in pie_status ): # if PIE, address is only the offset from the binary base address address_val = bin_base + address_val got_address = pwndbg.memory.pvoid(address_val) print( "[0x%x] %s -> %s" % (address_val, message.hint(name), pwndbg.chain.format(got_address)) )
def handle(name='Error'): """Displays an exception to the user, optionally displaying a full traceback and spawning an interactive post-moretem debugger. Notes: - ``set exception-verbose on`` enables stack traces. - ``set exception-debugger on`` enables the post-mortem debugger. """ # This is for unit tests so they fail on exceptions instead of displaying them. if getattr(sys, '_pwndbg_unittest_run', False) is True: E, V, T = sys.exc_info() e = E(V) e.__traceback__ = T raise e # Display the error if debug or verbose: exception_msg = traceback.format_exc() print(exception_msg) inform_report_issue(exception_msg) else: exc_type, exc_value, exc_traceback = sys.exc_info() print(message.error('Exception occured: {}: {} ({})'.format(name, exc_value, exc_type))) print(message.notice('For more info invoke `') + message.hint('set exception-verbose on') + message.notice('` and rerun the command\nor debug it by yourself with `') + message.hint('set exception-debugger on') + message.notice('`')) # Break into the interactive debugger if debug: with pwndbg.stdio.stdio: pdb.post_mortem()
def heap(addr=None): """ Prints out all chunks in the main_arena, or the arena specified by `addr`. """ main_heap = pwndbg.heap.current main_arena = main_heap.get_arena(addr) if main_arena is None: return heap_region = main_heap.get_region(addr) if heap_region is None: print(message.error('Could not find the heap')) return top = main_arena['top'] last_remainder = main_arena['last_remainder'] print(message.hint('Top Chunk: ') + M.get(top)) print(message.hint('Last Remainder: ') + M.get(last_remainder)) print() # Print out all chunks on the heap # TODO: Add an option to print out only free or allocated chunks addr = heap_region.vaddr while addr <= top: chunk = malloc_chunk(addr) size = int(chunk['size']) # Clear the bottom 3 bits size &= ~7 if size == 0: break addr += size
def top_chunk(addr=None): """ Prints out the address of the top chunk of the main arena, or of the arena at the specified address. """ main_heap = pwndbg.heap.current main_arena = main_heap.get_arena(addr) if main_arena is None: heap_region = main_heap.get_heap_boundaries() if not heap_region: print(message.error('Could not find the heap')) return heap_start = heap_region.vaddr heap_end = heap_start + heap_region.size # If we don't know where the main_arena struct is, just iterate # through all the heap objects until we hit the last one last_addr = None addr = heap_start while addr < heap_end: chunk = read_chunk(addr) size = int(chunk['size']) # Clear the bottom 3 bits size &= ~7 last_addr = addr addr += size addr += size address = last_addr else: address = main_arena['top'] return malloc_chunk(address)
def thread_cache(self): """Locate a thread's tcache struct. If it doesn't have one, use the main thread's tcache. """ if self.has_tcache(): tcache = self.mp['sbrk_base'] + 0x10 if self.multithreaded: tcache_addr = pwndbg.memory.pvoid(pwndbg.symbol.address('tcache')) if tcache_addr != 0: tcache = tcache_addr try: self._thread_cache = pwndbg.memory.poi(self.tcache_perthread_struct, tcache) _ = self._thread_cache['entries'].fetch_lazy() except Exception as e: print(message.error('Error fetching tcache. GDB cannot access ' 'thread-local variables unless you compile with -lpthread.')) return None return self._thread_cache else: print(message.warn('This version of GLIBC was not compiled with tcache support.')) return None
def probeleak(address=None, count=0x40, max_distance=0x0, point_to=None, max_ptrs=0, flags=None): address = int(address) address &= pwndbg.arch.ptrmask ptrsize = pwndbg.arch.ptrsize count = max(int(count), ptrsize) off_zeros = int(math.ceil(math.log(count, 2) / 4)) if flags != None: require_flags = flags_str2int(flags) if count > address > 0x10000: # in case someone puts in an end address and not a count (smh) print( message.warn( "Warning: you gave an end address, not a count. Substracting 0x%x from the count." % (address))) count -= address try: data = pwndbg.memory.read(address, count, partial=True) except gdb.error as e: print(message.error(str(e))) return if not data: print( message.error( "Couldn't read memory at 0x%x. See 'probeleak -h' for the usage." % (address, ))) return found = False find_cnt = 0 for i in range(0, len(data) - ptrsize + 1): p = pwndbg.arch.unpack(data[i:i + ptrsize]) page = find_module(p, max_distance) if page: if point_to != None and point_to not in page.objfile: continue if flags != None and not satisfied_flags(require_flags, page.flags): continue if not found: print(M.legend()) found = True mod_name = page.objfile if not mod_name: mod_name = '[anon]' if p >= page.end: right_text = '(%s) %s + 0x%x + 0x%x (outside of the page)' % ( page.permstr, mod_name, page.memsz, p - page.end) elif p < page.start: right_text = '(%s) %s - 0x%x (outside of the page)' % ( page.permstr, mod_name, page.start - p) else: right_text = '(%s) %s + 0x%x' % (page.permstr, mod_name, p - page.start) offset_text = '0x%0*x' % (off_zeros, i) p_text = '0x%0*x' % (int(ptrsize * 2), p) text = '%s: %s = %s' % (offset_text, M.get( p, text=p_text), M.get(p, text=right_text)) symbol = pwndbg.symbol.get(p) if symbol: text += ' (%s)' % symbol print(text) find_cnt += 1 if max_ptrs != 0 and find_cnt >= max_ptrs: break if not found: print( message.hint('No leaks found at 0x%x-0x%x :(' % (address, address + count)))
def search(type, hex, string, executable, writable, value, mapping_name, save, next): # Adjust pointer sizes to the local architecture if type == 'pointer': type = {4: 'dword', 8: 'qword'}[pwndbg.arch.ptrsize] if save is None: save = bool(pwndbg.config.auto_save_search) if hex: try: value = codecs.decode(value, 'hex') except binascii.Error as e: print('invalid input for type hex: {}'.format(e)) return # Convert to an integer if needed, and pack to bytes if type not in ('string', 'bytes'): value = pwndbg.commands.fix_int(value) value &= pwndbg.arch.ptrmask fmt = { 'little': '<', 'big': '>' }[pwndbg.arch.endian] + { 'byte': 'B', 'short': 'H', 'word': 'H', 'dword': 'L', 'qword': 'Q' }[type] # Work around Python 2.7.6 struct.pack / unicode incompatibility # See https://github.com/pwndbg/pwndbg/pull/336 for more information. fmt = str(fmt) try: value = struct.pack(fmt, value) except struct.error as e: print('invalid input for type {}: {}'.format(type, e)) return # Null-terminate strings elif type == 'string': value = value.encode() value += b'\x00' # Find the mappings that we're looking for mappings = pwndbg.vmmap.get() if mapping_name: mappings = [m for m in mappings if mapping_name in m.objfile] if not mappings: print(message.error("Could not find mapping %r" % mapping_name)) return # Prep the saved set if necessary global saved if save: saved = set() # Perform the search for address in pwndbg.search.search(value, mappings=mappings, executable=executable, writable=writable): if next and address not in saved: continue if save: saved.add(address) print_search_hit(address)
def nearpc(pc=None, lines=None, to_string=False, emulate=False): """ Disassemble near a specified address. """ # Repeating nearpc (pressing enter) makes it show next addresses # (writing nearpc explicitly again will reset its state) if nearpc.repeat: pc = nearpc.next_pc result = [] # Fix the case where we only have one argument, and # it's a small value. if lines is None and (pc is None or int(pc) < 0x100): lines = pc pc = None if pc is None: pc = pwndbg.regs.pc if lines is None: lines = nearpc_lines // 2 pc = int(pc) lines = int(lines) # Check whether we can even read this address if not pwndbg.memory.peek(pc): result.append(message.error('Invalid address %#x' % pc)) # # Load source data if it's available # pc_to_linenos = collections.defaultdict(lambda: []) # lineno_to_src = {} # frame = gdb.selected_frame() # if frame: # sal = frame.find_sal() # if sal: # symtab = sal.symtab # objfile = symtab.objfile # sourcefilename = symtab.filename # with open(sourcefilename, 'r') as sourcefile: # lineno_to_src = {i:l for i,l in enumerate(sourcefile.readlines())} # for line in symtab.linetable(): # pc_to_linenos[line.pc].append(line.line) instructions = pwndbg.disasm.near(pc, lines, emulate=emulate, show_prev_insns=not nearpc.repeat) if pwndbg.memory.peek(pc) and not instructions: result.append(message.error('Invalid instructions at %#x' % pc)) # In case $pc is in a new map we don't know about, # this will trigger an exploratory search. pwndbg.vmmap.find(pc) # Gather all addresses and symbols for each instruction symbols = [pwndbg.symbol.get(i.address) for i in instructions] addresses = ['%#x' % i.address for i in instructions] nearpc.next_pc = instructions[-1].address + instructions[-1].size if instructions else 0 # Format the symbol name for each instruction symbols = ['<%s> ' % sym if sym else '' for sym in symbols] # Pad out all of the symbols and addresses if pwndbg.config.left_pad_disasm and not nearpc.repeat: symbols = ljust_padding(symbols) addresses = ljust_padding(addresses) prev = None # Print out each instruction for address_str, symbol, instr in zip(addresses, symbols, instructions): asm = D.instruction(instr) prefix_sign = pwndbg.config.nearpc_prefix # Show prefix only on the specified address and don't show it while in repeat-mode show_prefix = instr.address == pc and not nearpc.repeat prefix = ' %s' % (prefix_sign if show_prefix else ' ' * len(prefix_sign)) prefix = N.prefix(prefix) pre = pwndbg.ida.Anterior(instr.address) if pre: result.append(N.ida_anterior(pre)) # Colorize address and symbol if not highlighted # symbol is fetched from gdb and it can be e.g. '<main+8>' if instr.address != pc or not pwndbg.config.highlight_pc or nearpc.repeat: address_str = N.address(address_str) symbol = N.symbol(symbol) elif pwndbg.config.highlight_pc: prefix = C.highlight(prefix) address_str = C.highlight(address_str) symbol = C.highlight(symbol) line = ' '.join((prefix, address_str, symbol, asm)) # If there was a branch before this instruction which was not # contiguous, put in some ellipses. if prev and prev.address + prev.size != instr.address: result.append(N.branch_marker('%s' % nearpc_branch_marker)) # Otherwise if it's a branch and it *is* contiguous, just put # and empty line. elif prev and any(g in prev.groups for g in (CS_GRP_CALL, CS_GRP_JUMP, CS_GRP_RET)): if len('%s' % nearpc_branch_marker_contiguous) > 0: result.append('%s' % nearpc_branch_marker_contiguous) # For syscall instructions, put the name on the side if instr.address == pc: syscall_name = pwndbg.arguments.get_syscall_name(instr) if syscall_name: line += ' <%s>' % N.syscall_name(syscall_name) result.append(line) # For call instructions, attempt to resolve the target and # determine the number of arguments. if show_args: result.extend(['%8s%s' % ('', arg) for arg in pwndbg.arguments.format_args(instruction=instr)]) prev = instr if not to_string: print('\n'.join(result)) return result
def ghidra(func): try: print(pwndbg.ghidra.decompile(func)) except Exception as e: print(message.error(e))
def nearpc(pc=None, lines=None, to_string=False, emulate=False): """ Disassemble near a specified address. """ # Repeating nearpc (pressing enter) makes it show next addresses # (writing nearpc explicitly again will reset its state) if nearpc.repeat: pc = nearpc.next_pc result = [] # Fix the case where we only have one argument, and # it's a small value. if lines is None and (pc is None or int(pc) < 0x100): lines = pc pc = None if pc is None: pc = pwndbg.regs.pc if lines is None: lines = nearpc_lines // 2 pc = int(pc) lines = int(lines) # Check whether we can even read this address if not pwndbg.memory.peek(pc): result.append(message.error('Invalid address %#x' % pc)) # # Load source data if it's available # pc_to_linenos = collections.defaultdict(lambda: []) # lineno_to_src = {} # frame = gdb.selected_frame() # if frame: # sal = frame.find_sal() # if sal: # symtab = sal.symtab # objfile = symtab.objfile # sourcefilename = symtab.filename # with open(sourcefilename, 'r') as sourcefile: # lineno_to_src = {i:l for i,l in enumerate(sourcefile.readlines())} # for line in symtab.linetable(): # pc_to_linenos[line.pc].append(line.line) instructions = pwndbg.disasm.near(pc, lines, emulate=emulate, show_prev_insns=not nearpc.repeat) if pwndbg.memory.peek(pc) and not instructions: result.append(message.error('Invalid instructions at %#x' % pc)) # In case $pc is in a new map we don't know about, # this will trigger an exploratory search. pwndbg.vmmap.find(pc) # Gather all addresses and symbols for each instruction symbols = [pwndbg.symbol.get(i.address) for i in instructions] addresses = ['%#x' % i.address for i in instructions] nearpc.next_pc = instructions[-1].address + instructions[ -1].size if instructions else 0 # Format the symbol name for each instruction symbols = ['<%s> ' % sym if sym else '' for sym in symbols] # Pad out all of the symbols and addresses if pwndbg.config.left_pad_disasm and not nearpc.repeat: symbols = ljust_padding(symbols) addresses = ljust_padding(addresses) prev = None first_pc = True # Print out each instruction for address_str, symbol, instr in zip(addresses, symbols, instructions): asm = D.instruction(instr) prefix_sign = pwndbg.config.nearpc_prefix # Show prefix only on the specified address and don't show it while in repeat-mode # or when showing current instruction for the second time show_prefix = instr.address == pc and not nearpc.repeat and first_pc prefix = ' %s' % (prefix_sign if show_prefix else ' ' * len(prefix_sign)) prefix = N.prefix(prefix) pre = pwndbg.ida.Anterior(instr.address) if pre: result.append(N.ida_anterior(pre)) # Colorize address and symbol if not highlighted # symbol is fetched from gdb and it can be e.g. '<main+8>' if instr.address != pc or not pwndbg.config.highlight_pc or nearpc.repeat: address_str = N.address(address_str) symbol = N.symbol(symbol) elif pwndbg.config.highlight_pc and first_pc: prefix = C.highlight(prefix) address_str = C.highlight(address_str) symbol = C.highlight(symbol) first_pc = False line = ' '.join((prefix, address_str, symbol, asm)) # If there was a branch before this instruction which was not # contiguous, put in some ellipses. if prev and prev.address + prev.size != instr.address: result.append(N.branch_marker('%s' % nearpc_branch_marker)) # Otherwise if it's a branch and it *is* contiguous, just put # and empty line. elif prev and any(g in prev.groups for g in (CS_GRP_CALL, CS_GRP_JUMP, CS_GRP_RET)): if len('%s' % nearpc_branch_marker_contiguous) > 0: result.append('%s' % nearpc_branch_marker_contiguous) # For syscall instructions, put the name on the side if instr.address == pc: syscall_name = pwndbg.arguments.get_syscall_name(instr) if syscall_name: line += ' <%s>' % N.syscall_name(syscall_name) # For Comment Function try: line += " " * 10 + C.comment( pwndbg.commands.comments.file_lists[pwndbg.proc.exe][hex( instr.address)]) except: pass result.append(line) # For call instructions, attempt to resolve the target and # determine the number of arguments. if show_args: result.extend([ '%8s%s' % ('', arg) for arg in pwndbg.arguments.format_args(instruction=instr) ]) prev = instr if not to_string: print('\n'.join(result)) return result
def init_ida_rpc_client(): global _ida, _ida_last_exception, _ida_last_connection_check if not ida_enabled: return now = time.time() if _ida is None and (now - _ida_last_connection_check) < int(ida_timeout) + 5: return addr = 'http://{host}:{port}'.format(host=ida_rpc_host, port=ida_rpc_port) _ida = xmlrpclib.ServerProxy(addr) socket.setdefaulttimeout(int(ida_timeout)) exception = None # (type, value, traceback) try: _ida.here() print( message.success( "Pwndbg successfully connected to Ida Pro xmlrpc: %s" % addr)) except socket.error as e: if e.errno != errno.ECONNREFUSED: exception = sys.exc_info() _ida = None except socket.timeout: exception = sys.exc_info() _ida = None except xmlrpclib.ProtocolError: exception = sys.exc_info() _ida = None if exception: if not isinstance( _ida_last_exception, exception[0]) or _ida_last_exception.args != exception[1].args: if hasattr( pwndbg.config, "exception_verbose") and pwndbg.config.exception_verbose: print(message.error("[!] Ida Pro xmlrpc error")) traceback.print_exception(*exception) else: exc_type, exc_value, _ = exception print( message.error( 'Failed to connect to IDA Pro ({}: {})'.format( exc_type.__qualname__, exc_value))) if exc_type is socket.timeout: print( message.notice( 'To increase the time to wait for IDA Pro use `') + message.hint( 'set ida-timeout <new-timeout-in-seconds>') + message.notice('`')) else: print( message.notice('For more info invoke `') + message.hint('set exception-verbose on') + message.notice('`')) print( message.notice('To disable IDA Pro integration invoke `') + message.hint('set ida-enabled off') + message.notice('`')) _ida_last_exception = exception and exception[1] _ida_last_connection_check = now
def format_bin(bins, verbose=False, offset=None): allocator = pwndbg.heap.current if offset is None: offset = allocator.chunk_key_offset("fd") result = [] bins_type = bins.pop("type") for size in bins: b = bins[size] count, is_chain_corrupted = None, False safe_lnk = False # fastbins consists of only single linked list if bins_type == "fastbins": chain_fd = b safe_lnk = pwndbg.glibc.check_safe_linking() # tcachebins consists of single linked list and entries count elif bins_type == "tcachebins": chain_fd, count = b safe_lnk = pwndbg.glibc.check_safe_linking() # normal bins consists of double linked list and may be corrupted (we can detect corruption) else: # normal bin chain_fd, chain_bk, is_chain_corrupted = b if not verbose and (chain_fd == [0] and not count) and not is_chain_corrupted: continue if bins_type == "tcachebins": limit = 8 if count <= 7: limit = count + 1 formatted_chain = pwndbg.chain.format(chain_fd[0], offset=offset, limit=limit, safe_linking=safe_lnk) else: formatted_chain = pwndbg.chain.format(chain_fd[0], offset=offset, safe_linking=safe_lnk) if isinstance(size, int): size = hex(size) if is_chain_corrupted: line = message.hint(size) + message.error(" [corrupted]") + "\n" line += message.hint("FD: ") + formatted_chain + "\n" line += message.hint("BK: ") + pwndbg.chain.format( chain_bk[0], offset=allocator.chunk_key_offset("bk")) else: if count is not None: line = (message.hint(size) + message.hint(" [%3d]" % count) + ": ").ljust(13) else: line = (message.hint(size) + ": ").ljust(13) line += formatted_chain result.append(line) if not result: result.append(message.hint("empty")) return result
def try_free(addr): addr = int(addr) # check hook free_hook = pwndbg.symbol.address('__free_hook') if free_hook is not None: if pwndbg.memory.pvoid(free_hook) != 0: message.success('__libc_free: will execute __free_hook') # free(0) has no effect if addr == 0: message.success('__libc_free: addr is 0, nothing to do') return # constants allocator = pwndbg.heap.current arena = allocator.get_arena() aligned_lsb = allocator.malloc_align_mask.bit_length() size_sz = allocator.size_sz malloc_alignment = allocator.malloc_alignment malloc_align_mask = allocator.malloc_align_mask chunk_minsize = allocator.minsize ptr_size = pwndbg.arch.ptrsize def unsigned_size(size): # read_chunk()['size'] is signed in pwndbg ;/ # there may be better way to handle that if ptr_size < 8: return ctypes.c_uint32(size).value x = ctypes.c_uint64(size).value return x def chunksize(chunk_size): # maybe move this to ptmalloc.py return chunk_size & (~7) def finalize(errors_found, returned_before_error): print('-' * 10) if returned_before_error: print(message.success('Free should succeed!')) elif errors_found > 0: print(message.error('Errors found!')) else: print(message.success('All checks passed!')) # mem2chunk addr -= 2 * size_sz # try to get the chunk try: chunk = read_chunk(addr) except gdb.MemoryError as e: print( message.error( 'Can\'t read chunk at address 0x{:x}, memory error'.format( addr))) return chunk_size = unsigned_size(chunk['size']) chunk_size_unmasked = chunksize(chunk_size) _, is_mmapped, _ = allocator.chunk_flags(chunk_size) if is_mmapped: print(message.notice('__libc_free: Doing munmap_chunk')) return errors_found = False returned_before_error = False # chunk doesn't overlap memory print(message.notice('General checks')) max_mem = (1 << (ptr_size * 8)) - 1 if addr + chunk_size >= max_mem: err = 'free(): invalid pointer -> &chunk + chunk->size > max memory\n' err += ' 0x{:x} + 0x{:x} > 0x{:x}' err = err.format(addr, chunk_size, max_mem) print(message.error(err)) errors_found += 1 # chunk address is aligned addr_tmp = addr if malloc_alignment != 2 * size_sz: addr_tmp = addr + 2 * size_sz if addr_tmp & malloc_align_mask != 0: err = 'free(): invalid pointer -> misaligned chunk\n' err += ' LSB of 0x{:x} are 0b{}, should be 0b{}' if addr_tmp != addr: err += ' (0x{:x} was added to the address)'.format(2 * size_sz) err = err.format(addr_tmp, bin(addr_tmp)[-aligned_lsb:], '0' * aligned_lsb) print(message.error(err)) errors_found += 1 # chunk's size is big enough if chunk_size_unmasked < chunk_minsize: err = 'free(): invalid size -> chunk\'s size smaller than MINSIZE\n' err += ' size is 0x{:x}, MINSIZE is 0x{:x}' err = err.format(chunk_size_unmasked, chunk_minsize) print(message.error(err)) errors_found += 1 # chunk's size is aligned if chunk_size_unmasked & malloc_align_mask != 0: err = 'free(): invalid size -> chunk\'s size is not aligned\n' err += ' LSB of size 0x{:x} are 0b{}, should be 0b{}' err = err.format(chunk_size_unmasked, bin(chunk_size_unmasked)[-aligned_lsb:], '0' * aligned_lsb) print(message.error(err)) errors_found += 1 # tcache if allocator.has_tcache() and 'key' in allocator.tcache_entry.keys(): tc_idx = (chunk_size_unmasked - chunk_minsize + malloc_alignment - 1) // malloc_alignment if tc_idx < allocator.mp['tcache_bins']: print(message.notice('Tcache checks')) e = addr + 2 * size_sz e += allocator.tcache_entry.keys().index('key') * ptr_size e = pwndbg.memory.pvoid(e) tcache_addr = int(allocator.thread_cache.address) if e == tcache_addr: # todo, actually do checks print( message.error( 'Will do checks for tcache double-free (memory_tcache_double_free)' )) errors_found += 1 if int(allocator.get_tcache()['counts'][tc_idx]) < int( allocator.mp['tcache_count']): print(message.success('Using tcache_put')) if errors_found == 0: returned_before_error = True if errors_found > 0: finalize(errors_found, returned_before_error) return # is fastbin if chunk_size_unmasked <= allocator.global_max_fast: print(message.notice('Fastbin checks')) chunk_fastbin_idx = allocator.fastbin_index(chunk_size_unmasked) fastbin_list = allocator.fastbins(int( arena.address))[(chunk_fastbin_idx + 2) * (ptr_size * 2)] try: next_chunk = read_chunk(addr + chunk_size_unmasked) except gdb.MemoryError as e: print( message.error( 'Can\'t read next chunk at address 0x{:x}, memory error'. format(chunk + chunk_size_unmasked))) finalize(errors_found, returned_before_error) return # next chunk's size is big enough and small enough next_chunk_size = unsigned_size(next_chunk['size']) if next_chunk_size <= 2 * size_sz or chunksize(next_chunk_size) >= int( arena['system_mem']): err = 'free(): invalid next size (fast) -> next chunk\'s size not in [2*size_sz; av->system_mem]\n' err += ' next chunk\'s size is 0x{:x}, 2*size_sz is 0x{:x}, system_mem is 0x{:x}' err = err.format(next_chunk_size, 2 * size_sz, int(arena['system_mem'])) print(message.error(err)) errors_found += 1 # chunk is not the same as the one on top of fastbin[idx] if int(fastbin_list[0]) == addr: err = 'double free or corruption (fasttop) -> chunk already is on top of fastbin list\n' err += ' fastbin idx == {}' err = err.format(chunk_fastbin_idx) print(message.error(err)) errors_found += 1 # chunk's size is ~same as top chunk's size fastbin_top_chunk = int(fastbin_list[0]) if fastbin_top_chunk != 0: try: fastbin_top_chunk = read_chunk(fastbin_top_chunk) except gdb.MemoryError as e: print( message.error( 'Can\'t read top fastbin chunk at address 0x{:x}, memory error' .format(fastbin_top_chunk))) finalize(errors_found, returned_before_error) return fastbin_top_chunk_size = chunksize( unsigned_size(fastbin_top_chunk['size'])) if chunk_fastbin_idx != allocator.fastbin_index( fastbin_top_chunk_size): err = 'invalid fastbin entry (free) -> chunk\'s size is not near top chunk\'s size\n' err += ' chunk\'s size == {}, idx == {}\n' err += ' top chunk\'s size == {}, idx == {}' err += ' if `have_lock` is false then the error is invalid' err = err.format( chunk['size'], chunk_fastbin_idx, fastbin_top_chunk_size, allocator.fastbin_index(fastbin_top_chunk_size)) print(message.error(err)) errors_found += 1 # is not mapped elif is_mmapped == 0: print(message.notice('Not mapped checks')) # chunks is not top chunk if addr == int(arena['top']): err = 'double free or corruption (top) -> chunk is top chunk' print(message.error(err)) errors_found += 1 # next chunk is not beyond the boundaries of the arena NONCONTIGUOUS_BIT = 2 top_chunk_addr = (int(arena['top'])) top_chunk = read_chunk(top_chunk_addr) next_chunk_addr = addr + chunk_size_unmasked # todo: in libc, addition may overflow if (arena['flags'] & NONCONTIGUOUS_BIT == 0) and next_chunk_addr >= top_chunk_addr + chunksize( top_chunk['size']): err = 'double free or corruption (out) -> next chunk is beyond arena and arena is contiguous\n' err += 'next chunk at 0x{:x}, end of arena at 0x{:x}' err = err.format( next_chunk_addr, top_chunk_addr + chunksize(unsigned_size(top_chunk['size']))) print(message.error(err)) errors_found += 1 # now we need to dereference chunk try: next_chunk = read_chunk(next_chunk_addr) next_chunk_size = chunksize(unsigned_size(next_chunk['size'])) except (OverflowError, gdb.MemoryError) as e: print( message.error( 'Can\'t read next chunk at address 0x{:x}'.format( next_chunk_addr))) finalize(errors_found, returned_before_error) return # next chunk's P bit is set prev_inuse, _, _ = allocator.chunk_flags(next_chunk['size']) if prev_inuse == 0: err = 'double free or corruption (!prev) -> next chunk\'s previous-in-use bit is 0\n' print(message.error(err)) errors_found += 1 # next chunk's size is big enough and small enough if next_chunk_size <= 2 * size_sz or next_chunk_size >= int( arena['system_mem']): err = 'free(): invalid next size (normal) -> next chunk\'s size not in [2*size_sz; system_mem]\n' err += 'next chunk\'s size is 0x{:x}, 2*size_sz is 0x{:x}, system_mem is 0x{:x}' err = err.format(next_chunk_size, 2 * size_sz, int(arena['system_mem'])) print(message.error(err)) errors_found += 1 # consolidate backward prev_inuse, _, _ = allocator.chunk_flags(chunk['size']) if prev_inuse == 0: print(message.notice('Backward consolidation')) prev_size = chunksize(unsigned_size(chunk['prev_size'])) prev_chunk_addr = addr - prev_size try: prev_chunk = read_chunk(prev_chunk_addr) prev_chunk_size = chunksize(unsigned_size(prev_chunk['size'])) except (OverflowError, gdb.MemoryError) as e: print( message.error( 'Can\'t read next chunk at address 0x{:x}'.format( prev_chunk_addr))) finalize(errors_found, returned_before_error) return if unsigned_size(prev_chunk['size']) != prev_size: err = 'corrupted size vs. prev_size while consolidating\n' err += 'prev_size field is 0x{:x}, prev chunk at 0x{:x}, prev chunk size is 0x{:x}' err = err.format(prev_size, prev_chunk_addr, unsigned_size(prev_chunk['size'])) print(message.error(err)) errors_found += 1 else: addr = prev_chunk_addr chunk_size += prev_size chunk_size_unmasked += prev_size try_unlink(addr) # consolidate forward if next_chunk_addr != top_chunk_addr: print(message.notice('Next chunk is not top chunk')) try: next_next_chunk_addr = next_chunk_addr + next_chunk_size next_next_chunk = read_chunk(next_next_chunk_addr) except (OverflowError, gdb.MemoryError) as e: print( message.error( 'Can\'t read next chunk at address 0x{:x}'.format( next_next_chunk_addr))) finalize(errors_found, returned_before_error) return prev_inuse, _, _ = allocator.chunk_flags(next_next_chunk['size']) if prev_inuse == 0: print(message.notice('Forward consolidation')) try_unlink(next_chunk_addr) chunk_size += next_chunk_size chunk_size_unmasked += next_chunk_size else: print(message.notice('Clearing next chunk\'s P bit')) # unsorted bin fd->bk should be unsorted bean unsorted_addr = int(arena['bins'][0]) try: unsorted = read_chunk(unsorted_addr) try: if read_chunk(unsorted['fd'])['bk'] != unsorted_addr: err = 'free(): corrupted unsorted chunks -> unsorted_chunk->fd->bk != unsorted_chunk\n' err += 'unsorted at 0x{:x}, unsorted->fd == 0x{:x}, unsorted->fd->bk == 0x{:x}' err = err.format(unsorted_addr, unsorted['fd'], read_chunk(unsorted['fd'])['bk']) print(message.error(err)) errors_found += 1 except (OverflowError, gdb.MemoryError) as e: print( message.error( 'Can\'t read chunk at 0x{:x}, it is unsorted bin fd' .format(unsorted['fd']))) errors_found += 1 except (OverflowError, gdb.MemoryError) as e: print( message.error( 'Can\'t read unsorted bin chunk at 0x{:x}'.format( unsorted_addr))) errors_found += 1 else: print(message.notice('Next chunk is top chunk')) chunk_size += next_chunk_size chunk_size_unmasked += next_chunk_size # todo: this may vary strongly FASTBIN_CONSOLIDATION_THRESHOLD = 65536 if chunk_size_unmasked >= FASTBIN_CONSOLIDATION_THRESHOLD: print( message.notice( 'Doing malloc_consolidate and systrim/heap_trim')) #is mapped else: message.notice('Doing munmap_chunk') finalize(errors_found, returned_before_error)
def search(type, hex, string, executable, writable, value, mapping_name, save, next, trunc_out): global saved if next and not saved: print( "WARNING: cannot filter previous search results as they were empty. Performing new search saving results." ) next = False save = True # Adjust pointer sizes to the local architecture if type == "pointer": type = {4: "dword", 8: "qword"}[pwndbg.arch.ptrsize] if save is None: save = bool(pwndbg.config.auto_save_search) if hex: try: value = codecs.decode(value, "hex") except binascii.Error as e: print("invalid input for type hex: {}".format(e)) return # Convert to an integer if needed, and pack to bytes if type not in ("string", "bytes"): value = pwndbg.commands.fix_int(value) value &= pwndbg.arch.ptrmask fmt = { "little": "<", "big": ">" }[pwndbg.arch.endian] + { "byte": "B", "short": "H", "word": "H", "dword": "L", "qword": "Q", }[type] try: value = struct.pack(fmt, value) except struct.error as e: print("invalid input for type {}: {}".format(type, e)) return # Null-terminate strings elif type == "string": value = value.encode() value += b"\x00" # Find the mappings that we're looking for mappings = pwndbg.vmmap.get() if mapping_name: mappings = [m for m in mappings if mapping_name in m.objfile] if not mappings: print(message.error("Could not find mapping %r" % mapping_name)) return # If next is passed, only perform a manual search over previously saved addresses print("Searching for value: " + repr(value)) if next: val_len = len(value) new_saved = set() i = 0 for addr in saved: try: val = pwndbg.memory.read(addr, val_len) except Exception: continue if val == value: new_saved.add(addr) if not trunc_out or i < 20: print_search_hit(addr) i += 1 print("Search found %d items" % i) saved = new_saved return # Prep the saved set if necessary if save: saved = set() # Perform the search i = 0 for address in pwndbg.search.search(value, mappings=mappings, executable=executable, writable=writable): if save: saved.add(address) if not trunc_out or i < 20: print_search_hit(address) i += 1