''' Created on Oct 18, 2011 @author: Rob ''' import morpher.pydbg.pydbg as pydbg import morpher.pydbg.defines as defines import struct def sprintf_handler(dbg): addr = dbg.context.Esp + 0xC count = dbg.read_process_memory(addr, 4) count = int(struct.unpack("L", count)[0]) print "Caught myself a sprintf with a counter of %d!" % count return defines.DBG_CONTINUE if __name__ == '__main__': dbg = pydbg.pydbg() pid = int(raw_input("Enter PID of process: ")) dbg.attach(pid) print "Running...." sprintf_address = dbg.func_resolve("msvcrt.dll", "sprintf") dbg.bp_set(sprintf_address, description="sprintf_address", handler=sprintf_handler) dbg.run()
def record(self, exe, arg): """ Given an application that uses the target DLL, runs the program and captures a L{Trace} object thats capable of replaying all the function calls made by the application to the DLL. The L{Trace} is captured by launching the application in a second process and setting breakpoints at the beginning of each of the functions in the DLL. The application is allowed to run and if any of the breakpoints are tripped, a L{FuncRecorder} is used along with the debugger to capture all relevant areas of the stack. Each L{Snapshot} is stored in the created L{Trace} in the same order that they were captured in. @param exe: The path to the application to record. @type exe: string @param arg: List of command-line arguments for the program @type arg: string @return: A L{Trace} containing the captured function calls @rtype: L{Trace} object """ self.log.info("Running collection line: exe - %s arg - %s", exe, arg) # Clear the trace recording self.trace = [] if not self.global_limit: self.copies = {} # Load the application in a debugger self.log.info("Loaded program, setting breakpoints") self.dbg = pydbg.pydbg() self.dbg.load(exe, command_line=arg, create_new_console=True, show_window=False) # Set breakpoints on functions self.dbg.set_callback(defines.LOAD_DLL_DEBUG_EVENT, self.loadHandler) self.dbg.set_callback(defines.USER_CALLBACK_DEBUG_EVENT, self.checkTimeout) # Set up the timeout mechanism self.timed_out = False t = threading.Timer(self.limit, self.timeoutHandler) self.log.info("Running the program") t.start() self.dbg.run() t.cancel() self.log.info("Program terminated, recording type information") # Record the type information and create the Trace if not len(self.trace) == 0: usertypes = {} for usernode in self.model.getElementsByTagName("usertype"): userid = usernode.getAttribute("id") usertype = usernode.getAttribute("type") userparams = [] for childnode in usernode.getElementsByTagName("param"): userparams.append(childnode.getAttribute("type")) usertypes[userid] = (usertype, userparams) newtrace = trace.Trace(self.trace, usertypes) else: newtrace = None # Record some collection stats possible = 0 seen = 0 for (func, copies) in self.copies.items(): possible += 1 if copies > 0: seen += 1 self.collected.add(func) self.log.info("Collected %d unique function calls out of %d collectable functions", seen, possible) return newtrace
def run(self, trace): ''' Takes the L{Trace} and runs it in a L{Harness}, monitoring for crashes. This function spawns a new process using a L{Harness} object connected to this process by a pair of pipes. A debugger is attached to the L{Harness} process and handlers are attached to monitor for crashes and hangs (defined as the harness not completing by a certain time limit). The given L{Trace} is then sent over the pipe to the L{Harness} for replay, and the L{Harness} is watched for completion. If a crash or hang occurs, relevant information is collected and dumped to a file for inspection and possible reproduction. Each L{Trace} is identified as a certain run (the iteration) of a certain batch (the trace number), and this identification is reflected by the file name if a dump occurs. The scheme is based off the common fuzzing pattern of taking one "base" trace and fuzzing the values in it to create multiple fuzzed versions - so a batch is all traces that were generated by fuzzing the same base trace. @param trace: The trace to run and monitor @type trace: L{Trace} object ''' self.log.info("Monitor is running. Creating pipe and harness") self.last_trace = trace # Spawn a new test harness and connect to it (inpipe, outpipe) = multiprocessing.Pipe() h = harness.Harness(self.cfg, (inpipe, outpipe)) self.log.info("Running the harness") h.start() self.inpipe = inpipe if self.log.isEnabledFor(logging.DEBUG) : tracestr = trace.toString() self.log.debug("Trace %d run %d contents:\n\n%s\n", \ self.tracenum, self.iter, tracestr) # Send the trace self.log.info("Sending trace %d run %d", self.tracenum, self.iter) try : outpipe.send(trace) except : msg = "Error sending trace over pipe to harness" self.log.exception(msg) raise Exception(msg) # Attach the debugger to the waiting harness pid = h.pid self.log.debug("Stopping and attaching to harness, pid %d", pid) dbg = pydbg.pydbg() dbg.attach(pid) dbg.set_callback(defines.EXCEPTION_ACCESS_VIOLATION, self.crash_handler) dbg.set_callback(defines.USER_CALLBACK_DEBUG_EVENT, self.time_check) # Send continue signal self.log.debug("Sending continuation flag to harness") outpipe.send(True) # Prepare our timeout object self.log.debug("Setting timeout to %d seconds", self.limit) self.timed_out = False t = threading.Timer(self.limit, self.timeout) # Release the test harness self.log.debug("Releasing the harness") t.start() dbg.run() t.cancel() self.log.info("Harness exited, cleaning up") outpipe.close() self.inpipe.close() self.iter += 1 self.log.info("Monitor exiting")
""" Created on Oct 18, 2011 @author: Rob """ import morpher.pydbg.pydbg as pydbg import morpher.pydbg.defines as defines import struct def sprintf_handler(dbg): addr = dbg.context.Esp + 0xC count = dbg.read_process_memory(addr, 4) count = int(struct.unpack("L", count)[0]) print "Caught myself a sprintf with a counter of %d!" % count return defines.DBG_CONTINUE if __name__ == "__main__": dbg = pydbg.pydbg() pid = int(raw_input("Enter PID of process: ")) dbg.attach(pid) print "Running...." sprintf_address = dbg.func_resolve("msvcrt.dll", "sprintf") dbg.bp_set(sprintf_address, description="sprintf_address", handler=sprintf_handler) dbg.run()