def build_functions_py(self): (apr_prefix, apr_include_dir, cpp, ldflags, flags, library_path) = self.get_build_config() cwd = os.getcwd() if self.svn_include_dir[-18:] == "subversion/include": includes = ('subversion/include/svn_*.h ' '%s/ap[ru]_*.h' % apr_include_dir) cmd = [ "%s %s --cpp '%s %s' %s " "%s -o subversion/bindings/ctypes-python/svn_all.py " "--no-macro-warnings --strip-build-path=%s" % (sys.executable, self.ctypesgen_py, cpp, flags, ldflags, includes, self.svn_include_dir[:-19]) ] os.chdir(self.svn_include_dir[:-19]) else: includes = ('%s/svn_*.h ' '%s/ap[ru]_*.h' % (self.svn_include_dir, apr_include_dir)) cmd = [ "%s %s --cpp '%s %s' %s " "%s -o svn_all.py --no-macro-warnings" % (sys.executable, self.ctypesgen_py, cpp, flags, ldflags, includes) ] if self.lib_dirs: cmd.extend('-R ' + x for x in self.lib_dirs.split(":")) cmd = ' '.join(cmd) if self.save_preprocessed_headers: cmd += " --save-preprocessed-headers=%s" % \ os.path.abspath(self.save_preprocessed_headers) if self.verbose or self.dry_run: status = self.execute(os.system, (cmd, ), cmd) else: f = os.popen(cmd, 'r') f.read() # Required to avoid the 'Broken pipe' error. status = f.close() # None is returned for the usual 0 return code os.chdir(cwd) if os.name == "posix" and status and status != 0: if os.WIFEXITED(status): status = os.WEXITSTATUS(status) if status != 0: sys.exit(status) elif os.WIFSIGNALED(status): log.error("ctypesgen.py killed with signal %d" % os.WTERMSIG(status)) sys.exit(2) elif os.WIFSTOPPED(status): log.error("ctypesgen.py stopped with signal %d" % os.WSTOPSIG(status)) sys.exit(2) else: log.error("ctypesgen.py exited with invalid status %d", status) sys.exit(2) if not self.dry_run: out = file("svn_all2.py", "w") for line in file("svn_all.py"): line = line.replace("restype = POINTER(svn_error_t)", "restype = SVN_ERR") if not line.startswith("FILE ="): out.write(line) out.close() cmd = "cat csvn/core/functions.py.in svn_all2.py > csvn/core/functions.py" self.execute(os.system, (cmd, ), cmd) log.info("Generated csvn/core/functions.py successfully")
def main_run_server(args): import io import signal import socket benchmark_dir, socket_name, = args update_sys_path(benchmark_dir) # Socket I/O s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) s.bind(socket_name) s.listen(1) # Read and act on commands from socket while True: stdout_file = None try: conn, addr = s.accept() except KeyboardInterrupt: break try: fd, stdout_file = tempfile.mkstemp() os.close(fd) # Read command read_size, = struct.unpack('<Q', recvall(conn, 8)) command_text = recvall(conn, read_size) if sys.version_info[0] >= 3: command_text = command_text.decode('utf-8') # Parse command command = json.loads(command_text) action = command.pop('action') if action == 'quit': break elif action == 'preimport': # Import benchmark suite before forking. # Capture I/O to a file during import. with posix_redirect_output(stdout_file, permanent=False): for benchmark in disc_benchmarks( benchmark_dir, ignore_import_errors=True): pass # Report result with io.open(stdout_file, 'r', errors='replace') as f: out = f.read() out = json.dumps(out) if sys.version_info[0] >= 3: out = out.encode('utf-8') conn.sendall(struct.pack('<Q', len(out))) conn.sendall(out) continue benchmark_id = command.pop('benchmark_id') params_str = command.pop('params_str') profile_path = command.pop('profile_path') result_file = command.pop('result_file') timeout = command.pop('timeout') cwd = command.pop('cwd') if command: raise RuntimeError( 'Command contained unknown data: {!r}'.format( command_text)) # Spawn benchmark run_args = (benchmark_dir, benchmark_id, params_str, profile_path, result_file) pid = os.fork() if pid == 0: conn.close() sys.stdin.close() exitcode = 1 try: with posix_redirect_output(stdout_file, permanent=True): try: os.chdir(cwd) main_run(run_args) exitcode = 0 except BaseException as ec: import traceback traceback.print_exc() finally: os._exit(exitcode) # Wait for results # (Poll in a loop is simplest --- also used by subprocess.py) start_time = wall_timer() is_timeout = False while True: res, status = os.waitpid(pid, os.WNOHANG) if res != 0: break if timeout is not None and wall_timer() > start_time + timeout: # Timeout if is_timeout: os.kill(pid, signal.SIGKILL) else: os.kill(pid, signal.SIGTERM) is_timeout = True time.sleep(0.05) # Report result with io.open(stdout_file, 'r', errors='replace') as f: out = f.read() # Emulate subprocess if os.WIFSIGNALED(status): retcode = -os.WTERMSIG(status) elif os.WIFEXITED(status): retcode = os.WEXITSTATUS(status) elif os.WIFSTOPPED(status): retcode = -os.WSTOPSIG(status) else: # shouldn't happen, but fail silently retcode = -128 info = {'out': out, 'errcode': -256 if is_timeout else retcode} result_text = json.dumps(info) if sys.version_info[0] >= 3: result_text = result_text.encode('utf-8') conn.sendall(struct.pack('<Q', len(result_text))) conn.sendall(result_text) except KeyboardInterrupt: break finally: conn.close() if stdout_file is not None: os.unlink(stdout_file)
# 'SIGSTOP' signal will # cause the process to stop os.kill(pid, signal.SIGSTOP) print("Signal sent, child stopped.") info = os.waitpid(pid, os.WSTOPPED) # waitpid() method returns a # tuple whose first attribute # represents child's pid # and second attribute # represnting child's status indication # os.WSTOPSIG() returns the signal number # which caused the process to stop stopSignal = os.WSTOPSIG(info[1]) print("Child stopped due to signal no:", stopSignal) print("Signal name:", signal.Signals(stopSignal).name) # send signal 'SIGCONT' # to the child process # using os.kill() method # 'SIGCONT' signal will # cause the process to continue os.kill(pid, signal.SIGCONT) print("\nSignal sent, child continued.") else: print("\nIn child process") print("Process ID:", os.getpid())
def fork_worker(self, job): """Invoked by ``work`` method. ``fork_worker`` does the actual forking to create the child process that will process the job. It's also responsible for monitoring the child process and handling hangs and crashes. Finally, the ``process`` method actually processes the job by eventually calling the Job instance's ``perform`` method. """ logger.debug('picked up job') logger.debug('job details: %s' % job) self.before_fork(job) self.child = os.fork() if self.child: self._setproctitle("Forked %s at %s" % (self.child, datetime.datetime.now())) logger.info('Forked %s at %s' % (self.child, datetime.datetime.now())) try: start = datetime.datetime.now() # waits for the result or times out while True: pid, status = os.waitpid(self.child, os.WNOHANG) if pid != 0: if os.WIFEXITED(status) and os.WEXITSTATUS( status) == 0: break if os.WIFSTOPPED(status): logger.warning("Process stopped by signal %d" % os.WSTOPSIG(status)) else: if os.WIFSIGNALED(status): raise CrashError( "Unexpected exit by signal %d" % os.WTERMSIG(status)) raise CrashError("Unexpected exit status %d" % os.WEXITSTATUS(status)) time.sleep(0.5) now = datetime.datetime.now() if self.timeout and ((now - start).seconds > self.timeout): os.kill(self.child, signal.SIGKILL) os.waitpid(-1, os.WNOHANG) raise TimeoutError("Timed out after %d seconds" % self.timeout) except OSError as ose: import errno if ose.errno != errno.EINTR: raise ose except JobError: self._handle_job_exception(job) finally: # If the child process' job called os._exit manually we need to # finish the clean up here. if self.job(): self.done_working(job) logger.debug('done waiting') else: self._setproctitle("Processing %s since %s" % (job, datetime.datetime.now())) logger.info('Processing %s since %s' % (job, datetime.datetime.now())) self.after_fork(job) # re-seed the Python PRNG after forking, otherwise # all job process will share the same sequence of # random numbers random.seed() self.process(job) os._exit(0) self.child = None
def judge(self, sub): lang = sub.lang mem_policy = True if (jcnf.POLICY[lang] == 'ALL' or jcnf.POLICY[lang] == 'MEM') else False page_size = resource.getpagesize() case_cnt = min(len(sub.case_lim), sub.case_cnt) sub.case_done = 0 case_id = 0 for case_id in range(case_cnt): sub.case_res.append({ 'res': jcnf.JUDGE_RES['init'], 'time': 0, 'mem': 0, }) exec_cmd = jcnf.EXEC_CMD[lang] exec_param = jcnf.EXEC_PARAM[lang] if lang == 'java': mem_lim = 0 for case_id in xrange(case_cnt): mem_lim = max(mem_lim, sub.case_lim[case_id]['mem']) exec_param.insert(1, '-Xmx' + str(mem_lim / 1024 + 1) + 'm') for case_id in xrange(case_cnt): lim = sub.case_lim[case_id] res = sub.case_res[case_id] exec_pid = os.fork() if exec_pid == 0: for rk, rv in jcnf.EXEC_RLIM[lang].items(): resource.setrlimit(rk, rv) try: exec_i = open(jcnf.getCasePathI(sub.pid, case_id), 'r') exec_o = open(jcnf.getExecPathO(), 'w') except: #sys.stderr.write('file read error') raise Exception('cannot handle input or output file') lim['time'] = max(lim['time'], jcnf.EXEC_MIN_TL[lang]) lim['time'] = int(lim['time'] * jcnf.EXEC_TL_RATIO[lang]) if lang == 'java': lim['time'] *= 3 rlimt = (lim['time'] - 1) // 1000 + 2 resource.setrlimit(resource.RLIMIT_CPU, (rlimt, rlimt)) rlimm = jcnf.EXEC_MAX_MEM if mem_policy: # java uses virtual machine resource.setrlimit(resource.RLIMIT_AS, (rlimm, rlimm)) os.dup2(exec_i.fileno(), 0) os.dup2(exec_o.fileno(), 1) #TOTO: ptrace pt_ret = cptrace.ptrace(cptrace.PTRACE_TRACEME, 0) if (pt_ret == -1): #sys.stderr.write('warning: ptrace error') raise Exception('child process cannot be ptraced') if exec_i: exec_i.close() if exec_o: exec_o.close() os._exit(1) os.execvp(exec_cmd, exec_param) sys.stderr.write('warning: something wrong') if exec_i: exec_i.close() if exec_o: exec_o.close() os._exit(1) else: stat_info_file = str(exec_pid).join(['/proc/', '/statm']) res['mem'] = 0 res['time'] = 0 t_prev = jclk() eax_prev = 142857 insyscall = False def killProc(): try: os.kill(exec_pid, signal.SIGKILL) except OSError: pass else: res['res'] = jcnf.JUDGE_RES['re'] threading.Timer((lim['time'] - 1) // 1000 + 10, killProc).start() try: while res['res'] == jcnf.JUDGE_RES['init']: #while True: exec_status = os.wait4(exec_pid, 0) if res['res'] != jcnf.JUDGE_RES['init']: break t_now = jclk() res['time'] += t_now - t_prev t_prev = t_now DEBUG_CNT = 0 #res['mem'] = exec_status[2].ru_minflt-360 res['mem'] = exec_status[2].ru_maxrss if os.WIFSIGNALED(exec_status[1]): #strange exited or tle? if res['time'] * 1000 > lim['time']: res['res'] = jcnf.JUDGE_RES['tle'] break elif os.WIFEXITED(exec_status[1]): #normally exited , ok break elif os.WIFSTOPPED(exec_status[1]): #sigtrap by ptra ce exec_sig = os.WSTOPSIG(exec_status[1]) if exec_sig != signal.SIGTRAP: res['res'] = jcnf.JUDGE_RES['re'] #print exec_status[0], exec_status[1], 'hehe', exec_sig cptrace.ptrace(cptrace.PTRACE_KILL, exec_pid) #strange exited? break eax_now = cptrace.ptrace( cptrace.PTRACE_PEEKUSER, exec_pid, 4 * cptrace.ORIG_EAX ) #when used in 64bit system, it should be 8*xxxx, so it is recommended to make it a const in conf if jcnf.POLICY[lang] == 'ALL': if jcnf.SYSCALL[eax_now][ 0] == 0: #prohibited syscall res['res'] = jcnf.JUDGE_RES['re'] cptrace.ptrace( cptrace.PTRACE_KILL, exec_pid ) #deprecated! should be implemented in another way break else: #TODO extend implementation pass if eax_now != eax_prev and eax_now != -1: insyscall = False if eax_now != -1: if insyscall: DEBUG_CNT += 1 #if eax_now==45 or eax_now==90 or eax_now==91: try: stat_info = open(stat_info_file, 'r') mem_now = int( stat_info.read().split(' ')[5] ) #automatically to long when exceed #res['mem'] = max(res['mem'], mem_now) stat_info.close() except: pass insyscall = False else: insyscall = True if mem_policy and res['mem'] > lim[ 'mem']: #res['mem']*page_size>lim['mem']*1024: res['res'] = jcnf.JUDGE_RES['mle'] cptrace.ptrace( cptrace.PTRACE_KILL, exec_pid ) #deprecated! should be implemented in another way break if res['time'] * 1000 > lim['time']: res['res'] = jcnf.JUDGE_RES['tle'] cptrace.ptrace( cptrace.PTRACE_KILL, exec_pid ) #deprecated! should be implemented in another way break if eax_now != -1: eax_prev = eax_now t_prev = jclk() else: #sys.stderr.write('unknown status') pass #TODO: also check total time limit? if res['res'] == jcnf.JUDGE_RES['tle']: #TODO: write log cptrace.ptrace( cptrace.PTRACE_KILL, exec_pid ) #deprecated! should be implemented in another way else: cptrace.ptrace(cptrace.PTRACE_SYSCALL, exec_pid) except: pass try: os.wait() os.kill(exec_pid, signal.SIGKILL) except Exception, e: if JDEBUG: print 'cannot kill', Exception, e pass res['mem'] = int(res['mem']) res['time'] = int(res['time'] * 1000) if res['res'] == jcnf.JUDGE_RES['init']: if os.WIFSIGNALED(exec_status[1]): res['res'] = jcnf.JUDGE_RES['re'] elif os.WIFSTOPPED(exec_status[1]) and os.WSTOPSIG( exec_status[1]) != signal.SIGTRAP: res['res'] = jcnf.JUDGE_RES['re'] if res['res'] == jcnf.JUDGE_RES['init']: df = Diff() res['res'] = df.diff(jcnf.getCasePathO(sub.pid, case_id), jcnf.getExecPathO()) sub.case_done += 1 #sub.mem += res['mem'] sub.mem = max(sub.mem, res['mem']) sub.time += res['time'] if res['res'] == jcnf.JUDGE_RES['init']: res['res'] = jcnf.JUDGE_RES['se'] # Need to calculate the scores of all test data, and thus we cannot break out when judging # if sub.block and res['res']!=jcnf.JUDGE_RES['ac']: # break t_prev = jclk() sub.status = jcnf.SUB_STATUS['done']
def RunProgram(self, program, arguments, context, result): """Run the 'program'. 'program' -- The path to the program to run. 'arguments' -- A list of the arguments to the program. This list must contain a first argument corresponding to 'argv[0]'. 'context' -- A 'Context' giving run-time parameters to the test. 'result' -- A 'Result' object. The outcome will be 'Result.PASS' when this method is called. The 'result' may be modified by this method to indicate outcomes other than 'Result.PASS' or to add annotations.""" # Construct the environment. environment = self.MakeEnvironment(context) e_stdin = self.stdin c = {} for pair in context.items(): c[pair[0]] = pair[1] for substitution in c.keys(): pattern = "$(" + substitution.upper() + ")" replacement = context[substitution] e_stdin = e_stdin.replace(pattern, replacement) basename = os.path.split(arguments[0])[-1] qm_exec = qm.executable.Filter(e_stdin, -2) try: exit_status = qm_exec.Run(arguments, environment) stdout = qm_exec.stdout stderr = qm_exec.stderr causes = [] if sys.platform != "win32": if os.WIFEXITED(exit_status): if exit_status != self.exit_code: causes.append("exit_code") result["RunProgram.exit_code"] = str(exit_status) elif os.WIFSIGNALED(exit_status): self.__cause = "Process %s terminated by signal %d." % ( basename, os.WTERMSIG(exit_status)) elif os.WIFSTOPPED(exit_status): self.__cause = "Process %s stopped by signal %d." % ( basename, os.WSTOPSIG(exit_status)) else: self.__cause = "Process %s terminated abnormally." % basename # Check to see if the standard output matches. # First strip out ISQL junk stdout_stripped = re.sub("Database:.*\n", "", stdout) stdout_stripped = re.sub("SQL>\s*", "", stdout_stripped) stdout_stripped = re.sub("CON>\s*", "", stdout_stripped) stdout_stripped = re.sub("-->\s*", "", stdout_stripped) stdout_stripped = self.__PerformSubstitutions(stdout_stripped) stdout_stripped = re.compile("^\s+", re.I + re.M).sub("", stdout_stripped) stdout_stripped = re.compile("\s+$", re.I + re.M).sub("", stdout_stripped) self.stdout_stripped = re.sub("Database:.*\n", "", self.stdout) self.stdout_stripped = re.sub("SQL>\s*", "", self.stdout_stripped) self.stdout_stripped = re.sub("CON>\s*", "", self.stdout_stripped) self.stdout_stripped = re.sub("-->\s*", "", self.stdout_stripped) self.stdout_stripped = self.__PerformSubstitutions( self.stdout_stripped) self.stdout_stripped = re.compile("^\s+", re.I + re.M).sub( "", self.stdout_stripped) self.stdout_stripped = re.compile("\s+$", re.I + re.M).sub( "", self.stdout_stripped) if stdout_stripped != self.stdout_stripped: causes.append("standard output") result["ExecTest.stdin"] = "<pre>" + e_stdin + "</pre>" result[ "ExecTest.stdout_expected"] = "<pre>" + self.stdout + "</pre>" result["ExecTest.stdout"] = "<pre>" + stdout + "</pre>" result[ "ExecTest.stdout_stripped"] = "<pre>" + stdout_stripped + "</pre>" result[ "ExecTest.stdout_stripped_expected"] = "<pre>" + self.stdout_stripped + "</pre>" result["ExecTest.stripped_diff"] = "<pre>" + '\n'.join( difflib.ndiff( stdout_stripped.splitlines(0), self.stdout_stripped.splitlines(0))) + "</pre>" # Check to see that the standard error matches. stderr_stripped = re.sub( "Use CONNECT or CREATE DATABASE to specify a database.*\n", "", stderr) if stderr_stripped != self.stderr: causes.append("standard error") result["ExecTest.stdin"] = "<pre>" + e_stdin + "</pre>" result["ExecTest.stderr"] = "<pre>" + stderr + "</pre>" result[ "ExecTest.expected_stderr"] = "<pre>" + self.stderr + "</pre>" # If anything went wrong, the test failed. if causes: result.Fail("Unexpected %s." % string.join(causes, ", ")) except: result.NoteException()
def ikos_analyzer(db_path, pp_path, opt): # Fix huge slow down when ikos-analyzer uses DROP TABLE on an existing db if os.path.isfile(db_path): os.remove(db_path) cmd = [settings.ikos_analyzer()] # analysis options cmd += [ '-a=%s' % ','.join(opt.analyses), '-d=%s' % opt.domain, '-entry-points=%s' % ','.join(opt.entry_points), '-globals-init=%s' % opt.globals_init, '-prec=%s' % opt.precision_level, '-proc=%s' % opt.procedural ] if opt.no_init_globals: cmd.append('-no-init-globals=%s' % ','.join(opt.no_init_globals)) if opt.no_liveness: cmd.append('-no-liveness') if opt.no_pointer: cmd.append('-no-pointer') if opt.no_fixpoint_profiles: cmd.append('-no-fixpoint-profiles') if opt.hardware_addresses: cmd.append('-hardware-addresses=%s' % ','.join(opt.hardware_addresses)) if opt.hardware_addresses_file: cmd.append('-hardware-addresses-file=%s' % opt.hardware_addresses_file) if opt.argc is not None: cmd.append('-argc=%d' % opt.argc) # import options cmd.append('-allow-dbg-mismatch') if opt.no_libc: cmd.append('-no-libc') if opt.no_libcpp: cmd.append('-no-libcpp') if opt.no_libikos: cmd.append('-no-libikos') # AR passes options if opt.disable_type_check: cmd.append('-disable-type-check') if opt.no_simplify_cfg: cmd.append('-no-simplify-cfg') if opt.no_simplify_upcast_comparison: cmd.append('-no-simplify-upcast-comparison') if 'gauge' in opt.domain: cmd.append('-add-loop-counters') # debug options cmd += [ '-display-checks=%s' % opt.display_checks, '-display-inv=%s' % opt.display_inv ] if opt.display_ar: cmd.append('-display-ar') if opt.display_liveness: cmd.append('-display-liveness') if opt.display_function_pointer: cmd.append('-display-function-pointer') if opt.display_pointer: cmd.append('-display-pointer') if opt.display_fixpoint_profiles: cmd.append('-display-fixpoint-profiles') if opt.generate_dot: cmd += ['-generate-dot', '-generate-dot-dir', opt.generate_dot_dir] # add -name-values if necessary if (opt.display_checks in ('all', 'fail') or opt.display_inv in ('all', 'fail') or opt.display_liveness or opt.display_fixpoint_profiles or opt.display_function_pointer or opt.display_pointer or opt.display_raw_checks): cmd.append('-name-values') # misc. options if opt.color == 'yes': cmd.append('-color=1') elif opt.color == 'no': cmd.append('-color=0') cmd.append('-log=%s' % opt.log_level) # input/output cmd += [pp_path, '-o', db_path] # set resource limit, if requested if opt.mem > 0: import resource # fails on Windows def set_limits(): mem_bytes = opt.mem * 1024 * 1024 resource.setrlimit(resource.RLIMIT_AS, [mem_bytes, mem_bytes]) else: set_limits = None # called after timeout def kill(p): try: log.error('Timeout') p.send_signal(signal.SIGALRM) except OSError: pass log.info('Running ikos analyzer') log.debug('Running %s' % command_string(cmd)) p = subprocess.Popen(cmd, preexec_fn=set_limits) timer = threading.Timer(opt.cpu, kill, [p]) if opt.cpu > 0: timer.start() try: if sys.platform.startswith('win'): return_status = p.wait() else: _, return_status = os.waitpid(p.pid, 0) finally: # kill the timer if the process has terminated already if timer.isAlive(): timer.cancel() # special case for Windows, since it does not define WIFEXITED & co. if sys.platform.startswith('win'): if return_status != 0: raise AnalyzerError('a run-time error occurred', cmd, return_status) else: return # if it did not terminate properly, propagate this error code if os.WIFEXITED(return_status) and os.WEXITSTATUS(return_status) != 0: exit_status = os.WEXITSTATUS(return_status) raise AnalyzerError('a run-time error occurred', cmd, exit_status) if os.WIFSIGNALED(return_status): signum = os.WTERMSIG(return_status) raise AnalyzerError('exited with signal %s' % signal_name(signum), cmd, signum) if os.WIFSTOPPED(return_status): signum = os.WSTOPSIG(return_status) raise AnalyzerError('exited with signal %d' % signal_name(signum), cmd, signum)
def signal(self): if os.WIFSTOPPED(self): return os.WSTOPSIG(self) if os.WIFSIGNALED(self): return os.WTERMSIG(self) return None
elif scname == 'open': decode_open(pid, bitness) elif scname == 'execve': decode_execve(pid, bitness) elif scname in ("socketcall", "bind", "connect"): decode_socketcall(pid, bitness, scname) else: print("%s()" % scname, end="") insyscall = not insyscall elif event == pinktrace.event.EVENT_EXEC: # Update bitness bitness = pinktrace.bitness.get(pid) elif event in (pinktrace.event.EVENT_GENUINE, pinktrace.event.EVENT_UNKNOWN): # Send the signal to the traced child as it was a genuine signal. sig = os.WSTOPSIG(status) elif event == pinktrace.event.EVENT_EXIT_GENUINE: exit_code = os.WEXITSTATUS(status) print("Child %d exited normally with return code %d" % (pid, exit_code)) dead = True elif event == pinktrace.event.EVENT_EXIT_SIGNAL: exit_code = 128 + os.WTERMSIG(status) print("Child %d exited with signal %d" % (pid, os.TERMSIG(status))) dead = True if dead: break sys.exit(exit_code)
def run(pov_path, target_path, *, flag=None, result=None): if result is None: result = {} if not flag: flag = os.urandom(4096) assert len(flag) == 4096 flag_fd = os.memfd_create('flag') flag_path = f'/proc/{os.getpid()}/fd/{flag_fd}' os.write(flag_fd, flag) result['flag'] = flag.decode('latin') child_conn, parent_conn = multiprocessing.Pipe(duplex=True) def dup_child_3(): os.dup2(child_conn.fileno(), 3, inheritable=True) pov_seed = str(int.from_bytes(os.urandom(3), 'little')) pov_popen = subprocess.Popen( ['qemu-cgc/i386-linux-user/qemu-i386', '-seed', pov_seed, pov_path], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL, pass_fds=(3, ), preexec_fn=dup_child_3) pov_type = b''.join(os.read(parent_conn.fileno(), 1) for _ in range(4)) pov_type = int.from_bytes(pov_type, 'little') assert pov_type == 2 os.write(parent_conn.fileno(), TYPE_2_DATA) def trace_me(): libc.ptrace(PTRACE['TRACEME'], 0, 0, 0) target_seed = str(int.from_bytes(os.urandom(3), 'little')) target_popen = subprocess.Popen([ 'qemu-cgc/i386-linux-user/qemu-i386', '-magicpregen', flag_path, '-seed', target_seed, target_path ], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL, preexec_fn=trace_me) result['interaction'] = [] while True: pid, status = os.wait() if pid == target_popen.pid: sig = os.WSTOPSIG(status) if sig and sig != signal.SIGTRAP: result['signal'] = sig break if not os.WIFSTOPPED(status): break else: continue regs = user_regs_struct() libc.ptrace(PTRACE['GETREGS'], pid, 0, ctypes.byref(regs)) syscall = SYSCALL_NAME[regs.orig_rax] syscall_start = ctypes.c_long(regs.rax).value == -errno.ENOSYS reading = SYSCALL_NAME[regs.orig_rax] == 'read' and regs.rdi == 0 writing = SYSCALL_NAME[regs.orig_rax] == 'write' and regs.rdi == 1 try: if reading and syscall_start: count = regs.rdx data = pov_popen.stdout.read1( min(count, io.DEFAULT_BUFFER_SIZE)) target_popen.stdin.write(data) target_popen.stdin.flush() result['interaction'].append( ('read', count, data.decode('latin'))) if not data: break elif writing and not syscall_start: count = regs.rdx data = target_popen.stdout.read(count) pov_popen.stdin.write(data) pov_popen.stdin.flush() result['interaction'].append( ('write', count, data.decode('latin'))) except BrokenPipeError: break libc.ptrace(PTRACE['SYSCALL'], pid, 0, 0) pov_answer = b''.join(os.read(parent_conn.fileno(), 1) for _ in range(4)) result['pov_answer'] = pov_answer.decode('latin') result['pov_answer_correct'] = pov_answer in flag
def __init__(self, status): ## # 1 IFF the status is decodable. self.valid = 0 ## # 1 IFF the process is stopped, 0 otherwise. self.stopped = 0 ## # The signal number that stopped the process. -1 if the process is # not stopped. self.stopped_by = -1 ## # 1 IFF the process called the exit() function. 0 otherwise. self.called_exit = 0 ## # The exit code passed to the exit() function. -1 if the process # did not call exit. # @note Typically, an exit_code of 0 indicates success, and a non-zero # exit_code indicates failure. Furthermore, an exit_code # of 2 usually indicates a syntax failure. self.exit_code = -1 ## # 1 IFF the process was killed by a signal (external or internal). # 0 otherwise. self.killed = 0 ## # The signal number that killed the process. -1 if the process # was not killed. self.killed_by = -1 ## # The generic message that best describes why the process terminated, # or an internal failure message. self.message = '' self.called_exit = _os.WIFEXITED(status) if self.called_exit: self.exit_code = _os.WEXITSTATUS(status) else: self.exit_code = -1 self.stopped = _os.WIFSTOPPED(status) if self.stopped: self.stopped_by = _os.WSTOPSIG(status) else: self.stopped_by = -1 self.killed = _os.WIFSIGNALED(status) if self.killed: self.killed_by = _os.WTERMSIG(status) else: self.killed_by = -1 self.message = '' count = self.called_exit + self.stopped + self.killed if not count: # This one's too little... self.message = _invalid_exit_status elif count > 1: # This one's too big... self.message = _generate_too_many_stati_message( self.called_exit, exit_code, self.stopped, self.stopped_by, self.killed, self.killed_by) elif count == 1: # This one's just right... if self.called_exit: self.message = _generate_exited_message(self.exit_code) self.valid = 1 elif self.stopped: self.message = _generate_stopped_message(self.stopped_by) self.valid = 1 elif self.killed: self.message = _generate_signaled_message(self.killed_by) self.valid = 1 else: self.message = _inconsistent_count_and_stati self.valid = 1 else: # This one's fricken imposible! self.message = _internal_error return
class ParTestCase(unittest.TestCase): """A class whose instances are single test cases. The ParTestCase starts a new process for each test this enables the isolation of tests one from the other. """ def __init__(self, methodName='runTest'): """Create an instance of the class that will use the named test method when executed. Raises a ValueError if the instance does not have a method with the specified name. """ try: self.__testMethodName = methodName testMethod = getattr(self, methodName) self.__testMethodDoc = testMethod.__doc__ except AttributeError: raise ValueError, "no such test method in %s: %s" % \ (self.__class__, methodName) def shortDescription(self): """Returns a one-line description of the test, or None if no description has been provided. The default implementation of this method returns the first line of the specified test method's docstring. """ doc = self.__testMethodDoc return doc and string.strip(string.split(doc, "\n")[0]) or None def id(self): return "%s.%s" % (self.__class__, self.__testMethodName) def __str__(self): return "%s (%s)" % (self.__testMethodName, self.__class__) def __repr__(self): return "<%s testMethod=%s>" % \ (self.__class__, self.__testMethodName) def __call__(self, result=None): if result is None: result = self.defaultTestResult() result.startTest(self) testMethod = getattr(self, self.__testMethodName) try: result.createPipe() tpid = os.fork() if tpid == 0: # # The child processes should close the read side of the pipe. # result.closePipeRd() try: self.setUp() except KeyboardInterrupt: os._exit(-1) except: result.addError(self, self.__exc_info()) os._exit(0) ok = 0 try: testMethod() ok = 1 except self.failureException, e: result.addFailure(self, self.__exc_info()) except KeyboardInterrupt: os._exit(-1) except: result.addError(self, self.__exc_info()) try: self.tearDown() except KeyboardInterrupt: os._exit(-1) except: result.addError(self, self.__exc_info()) ok = 0 if ok: result.addSuccess(self) # # IMPORTANT NOTE: # child processses of the test processes (tpid), can throw # exceptions either explicitly or implicitly through assert_ # and other unittest functions. This means that they reach # the os._exit command below. This exit command avoids that # the exceptions propogate further 'up' in the code. # os._exit(0) # # The parent process should close the write side of the pipe. # result.closePipeWr() # # Set the watchdog # test_timeout = getattr(self, '_TEST_TIMEOUT', TEST_TIMEOUT) wd = WatchDog(timeout=test_timeout) try: try: cpid, status = os.waitpid(tpid, 0) except KeyboardInterrupt: raise except: result.addError(self, self.__exc_info()) finally: # # Turn of the watchdog # wd.close() # # Check the exit status. This part is wraped in a try caluse # so that I can use the addError method. # try: if os.WIFEXITED(status) and os.WEXITSTATUS(status) != 0: if os.WEXITSTATUS(status) == 255: raise KeyboardInterrupt else: raise TestError, 'The test process exited unexpectedly with code %d' % ( os.WEXITSTATUS(status) - 256) if os.WIFSTOPPED(status): sig = os.WSTOPSIG(status) if sig in SIGNALS_DICT.keys(): sig_str = '%s(%d)' % (SIGNALS_DICT[sig], sig) else: sig_str = 'None(%d)' % sig raise TestError, 'The test process stopped unexpectedly by signal %s' % sig_str if os.WIFSIGNALED(status): sig = os.WTERMSIG(status) if sig in SIGNALS_DICT.keys(): sig_str = '%s(%d)' % (SIGNALS_DICT[sig], sig) else: sig_str = 'None(%d)' % sig raise TestError, 'The test process terminated unexpectedly by signal %s' % sig_str except KeyboardInterrupt: raise except: result.addError(self, self.__exc_info())
def stopsig(self): if os.WIFSTOPPED(self.status): return os.WSTOPSIG(self.status)
def __sig_child_handler(self, signum, frame): # Our child exits with sig 9 when all is good... so map that to 0 ret = 0 pid = None try: status = None sig = None core = False self.logger.debug("Running children: %s" % self.executor_pids) self.logger.debug("Got signal %s" % signum) pid, ret = os.wait() self.logger.debug("After wait") msg = "Child %s: wait returned code %s which means:" % (pid, ret) if os.WIFSIGNALED(ret): sig = os.WTERMSIG(ret) msg += " signalled %s" % sig if os.WIFEXITED(ret): status = os.WEXITSTATUS(ret) msg += " exited %s" % status if os.WIFSTOPPED(ret): msg += " stopped %s" % os.WSTOPSIG(ret) if os.WCOREDUMP(ret): core = True msg += " core dumped" if os.WIFCONTINUED(ret): msg += " contunied" self.logger.debug(msg) if pid in self.executor_pids: self.executor_pids.remove(pid) self.executor_rets.append((status, sig, core)) else: self.logger.error("Pid %s is not a child" % pid) # sometimes signal handler is not called, clean here zombies for pid in self.executor_pids: p, r = os.waitpid(pid, os.WNOHANG) if p != 0: self.logger.debug( "Zombie with pid %d found, exit code=%d" % (p, r)) self.executor_pids.remove(pid) #self.executor_rets.append((status, sig, core)) ret = 0 if len(self.executor_pids) == 0: self.logger.trace("Statuses of all executors: %s" % self.executor_rets) for st, sg, co in self.executor_rets: if st is not None and st != 0: ret = st if co: ret = 1 self.logger.info("Exit with code %s" % ret) sys.exit(ret) except Exception, ex: self.logger.error("Error waiting for child process: %s" % ex) if len(self.executor_pids) <= 1: self.logger.warn( "No more child processes, exit with success: pids=%s, last pid=%s, ret=%s" % (self.executor_pids, pid, ret)) sys.exit(0) else: self.logger.info("Children left: %s" % self.executor_pids)
def main(): libc = ctypes.CDLL('/lib/x86_64-linux-gnu/libc.so.6') # Your libc location may vary! libc.ptrace.argtypes = [ctypes.c_uint64, ctypes.c_uint64, ctypes.c_void_p, ctypes.c_void_p] libc.ptrace.restype = ctypes.c_uint64 pid = int(get_pid("qemu-mips-static")[:-1]) address = 0 addresses = [] libc.ptrace(PTRACE_ATTACH, pid, None, None) # Ignore these variables, they are just here to print things neatly started_work_print = False found_address_print = False only_print_address_once = True # os.waitpid returns (pid, status_number), where status_number is the signal sent from the process as for the reason we got back control. stat = os.waitpid(pid, 0) count = 0 while not os.WIFEXITED(stat[1]): # Print info if started_work_print: print("Started cracking the program. Please be patient!") started_work_print = False if found_address_print: print(f"Found address of sum: {hex(address)}") found_address_print = False # End print # Actual loop if os.WIFSTOPPED(stat[1]): if os.WSTOPSIG(stat[1]) == 19: # If we got here because of a PTRACE_ATTACH print("Attached to process!") print(f"PID: {pid}") print("Please enter \"AAAAAAAAAA\" in the password field!") started_work_print = True elif len(addresses) > 1: # Else if we know what address to write to libc.ptrace(PTRACE_POKEDATA, pid, address, 0x39050000) # 0x539 in Little Endian # Single step the program to the next instruction libc.ptrace(PTRACE_SINGLESTEP, pid, None, None) """ Since we have no way of accesssing pc, we try to get the address every 2000000 iterations until we find our string on the stack. After that, we ensure that with every instruction the value at that address is exactly 0x539, so that the comparison passes.""" if count % 2000000 == 0: if len(addresses) < 2: # The password string is saved twice in memory, once for the console and once on the stack. The second address is always the stack. addresses = get_address(pid) address = addresses[1] if len(addresses) > 1 else 0 # Printing related elif only_print_address_once: found_address_print = True only_print_address_once = False # End printing related # Next iteration count += 1 stat = os.waitpid(pid, 0) libc.ptrace(PTRACE_DETACH, pid, None, None)
def _debugger_thread_inner(main_pid, dbgproc_started, dbgthread_stop, stack_request_pipe, stack_queue, syscall_queue, syscall_filter): ptrace_options = ptrace.PTRACE_O_TRACECLONE # Attach to the tracee and wait for it to stop. ptrace.attach_and_wait(main_pid, ptrace_options) if syscall_filter is not None: filter_ = lambda sc: any(m.match(sc) for m in syscall_filter) else: filter_ = None syscall_trap = signal.SIGTRAP | 0x80 enabled = False signum = 0 syscall_state = {} sigstop_received = set() processes = {main_pid} mem_fds = {} mem_fds[main_pid] = _open_procmem(main_pid) # Notify the parent that we are ready to start tracing. dbgproc_started.set() try: # Restart the tracee and enter the tracing loop. ptrace.syscall(main_pid) while True: if dbgthread_stop.is_set(): break pid, status = ptrace.wait(-1) if os.WIFEXITED(status) or os.WIFSIGNALED(status): # Traced thread has died. processes.discard(pid) mem_fd = mem_fds.get(pid) if mem_fd is not None: try: os.close(mem_fd) except IOError: pass if not processes: break else: continue elif os.WIFSTOPPED(status): ptrace_event = ptrace.WPTRACEEVENT(status) if ptrace_event == ptrace.PTRACE_EVENT_CLONE: # A new thread has been created. new_pid = ptrace.geteventmsg(pid) # See the comment below for the explanation of this check. if new_pid not in sigstop_received: ptrace.wait_for_trace_stop(new_pid) try: ptrace.syscall(new_pid) except OSError as e: if e.errno != errno.ESRCH: # The new thread might have already died. raise else: sigstop_received.discard(new_pid) mem_fds[new_pid] = _open_procmem(new_pid) processes.add(new_pid) ptrace.syscall(pid) continue stopsig = os.WSTOPSIG(status) if stopsig != syscall_trap: # Signal-delivery-stop. # The special condition below is for cases when we # receive a SIGSTOP for a newly created thread _before_ # receiving the PTRACE_EVENT_CLONE event for its parent. # In this case we must not forward the signal, but # must record its receipt so that once we _do_ receive # PTRACE_EVENT_CLONE for the parent, we don't wait for # SIGSTOP in the child again. if (stopsig != signal.SIGSTOP or pid in processes or all(syscall.name != 'clone' for syscall in syscall_state.values() if syscall is not None)): # forward the signal signum = stopsig else: sigstop_received.add(pid) else: # Syscall-stop. syscall = syscall_state.get(pid) regs = ptrace.getregs(pid) mem_fd = mem_fds.get(pid) if syscall is None: # Syscall-enter-stop. syscall_state[pid] = ptrace.syscall_enter( pid, regs, mem_fd) else: # Syscall-exit-stop. ptrace.syscall_exit(syscall, regs, mem_fd) if enabled: # Stop tracing once the tracee executes # the magic open() in ptracer.disable(). stop_tracing = ( syscall.name == 'open' and syscall.args[0].value == b'\x03\x02\x01' ) if stop_tracing: break elif filter_ is None or filter_(syscall): # Wait for the traceback to arrive. os.write(stack_request_pipe, struct.pack('!Q', pid)) stack = stack_queue.get() if stack is None: ptrace.cont(pid) break syscall.traceback = stack syscall_queue.put_nowait(syscall) elif not enabled: # Start tracing once the tracee executes # the magic open() in ptracer.enable(). start_tracing = ( syscall.name == 'open' and syscall.args[0].value == b'\x01\x02\x03' ) if start_tracing: enabled = True syscall_state[pid] = None else: logger.error('unexpected status of traced process %s: %s', pid, status) # Continue until next syscall. ptrace.syscall(pid, signum) signum = 0 finally: for process in processes: try: ptrace.detach(process) except OSError as e: if e.errno == errno.ESRCH: pass else: raise for fd in mem_fds.values(): try: os.close(fd) except (OSError, IOError): pass
def RunProgram(self, program, arguments, stdin, context, result): """Run the 'program'. 'program' -- The path to the program to run. 'arguments' -- A list of the arguments to the program. This list must contain a first argument corresponding to 'argv[0]'. 'stdin' -- Content of standard input for the program. 'context' -- A 'Context' giving run-time parameters to the test. 'result' -- A 'Result' object. The outcome will be 'Result.PASS' when this method is called. The 'result' may be modified by this method to indicate outcomes other than 'Result.PASS' or to add annotations.""" # Construct the environment. environment = self.MakeEnvironment(context) e_stdin = stdin c = {} for pair in context.items(): c[pair[0]] = pair[1] for substitution in c.keys(): pattern = "$(" + substitution.upper() + ")" replacement = context[substitution] e_stdin = e_stdin.replace(pattern, replacement) basename = os.path.split(arguments[0])[-1] qm_exec = qm.executable.Filter(e_stdin, -2) try: exit_status = qm_exec.Run(arguments, environment) stdout = qm_exec.stdout stderr = qm_exec.stderr causes = [] if sys.platform != "win32": if os.WIFEXITED(exit_status): if exit_status != self.exit_code: causes.append("exit_code") result["RunProgram.exit_code"] = str(exit_status) elif os.WIFSIGNALED(exit_status): self.__cause = "Process %s terminated by signal %d." % ( basename, os.WTERMSIG(exit_status)) elif os.WIFSTOPPED(exit_status): self.__cause = "Process %s stopped by signal %d." % ( basename, os.WSTOPSIG(exit_status)) else: self.__cause = "Process %s terminated abnormally." % basename # Check to see that the standard error matches. if stderr: causes.append("standard error") result["RunProgram.stderr"] = "'''" + stderr + "'''" # If anything went wrong, the test failed. if causes: result.Fail("Unexpected %s." % string.join(causes, ", ")) except: result.NoteException()
def test_ptrace_syscalls(self): def process_func(): ptrace.traceme() os.kill(os.getpid(), signal.SIGSTOP) with open('/dev/null', 'w') as f: f.write('foo') rd, wr = os.pipe() os.close(rd) os.close(wr) try: process = multiprocessing.Process(target=process_func) process.start() pid, status = os.waitpid(process.pid, 0) self.assertTrue(os.WIFSTOPPED(status)) stopsig = os.WSTOPSIG(status) self.assertEqual(stopsig, signal.SIGSTOP) ptrace.setoptions(process.pid, ptrace.PTRACE_O_TRACESYSGOOD) syscalls = [] in_syscall = None while True: ptrace.syscall(process.pid) pid, status = os.waitpid(process.pid, 0) if os.WIFEXITED(status): break self.assertTrue(os.WIFSTOPPED(status)) stopsig = os.WSTOPSIG(status) self.assertTrue(stopsig & 0x80) self.assertEqual(stopsig & 0x7F, signal.SIGTRAP) regs = ptrace.getregs(process.pid) if not in_syscall: syscall = ptrace.syscall_enter(process.pid, regs) syscalls.append(syscall) in_syscall = syscall else: ptrace.syscall_exit(in_syscall, regs) in_syscall = None finally: try: os.kill(process.pid, signal.SIGKILL) except OSError as e: if e.errno == errno.ESRCH: pass else: raise syscalls = [ s for s in syscalls if s.name in {'open', 'openat', 'write', 'close'} ] self.assertEqual(len(syscalls), 5) open_call, write_call, close_call = syscalls[:3] if open_call.name == 'openat': self.assertEqual(open_call.args[1].value, b'/dev/null') else: self.assertEqual(open_call.args[0].value, b'/dev/null') fno = open_call.result.value self.assertGreater(fno, 0) self.assertIsNotNone(open_call.result.type) self.assertEqual(write_call.args[0].value, fno) self.assertEqual(write_call.args[2].value, 3) self.assertEqual(write_call.result.value, 3) self.assertEqual(close_call.args[0].value, fno)
def email_move_child(host, r): local_db = Utils.Factory.get('Database')() local_co = Utils.Factory.get('Constants')(local_db) r_id = r['request_id'] if not is_valid_request(r_id, local_db=local_db, local_co=local_co): return if dependency_pending(r['state_data'], local_db=local_db, local_co=local_co): logger.debug("Request '%d' still has deps: '%s'.", r_id, r['state_data']) return try: acc = get_account(r['entity_id'], local_db=local_db) except Errors.NotFoundError: logger.error("email_move: user %d not found", r['entity_id']) return old_server = get_email_server(r['entity_id'], local_db=local_db) new_server = Email.EmailServer(local_db) new_server.find(r['destination_id']) if old_server.entity_id == new_server.entity_id: logger.error("Trying to move %s from " % acc.account_name + "and to the same server! Deleting request") br = BofhdRequests(local_db, local_co) br.delete_request(request_id=r_id) local_db.commit() return if not email_delivery_stopped(acc.account_name): logger.debug("E-mail delivery not stopped for %s", acc.account_name) return logger.debug("User being moved: '%s'.", acc.account_name) reqlock = RequestLockHandler() if not reqlock.grab(r_id): return # Disable quota while copying so the move doesn't fail cyrus_set_quota(acc.entity_id, 0, host=new_server, local_db=local_db) # Call the script cmd = [SSH_CMD, "cerebrum@%s" % host, cereconf.IMAPSYNC_SCRIPT, '--user1', acc.account_name, '--host1', old_server.name, '--user2', acc.account_name, '--host2', new_server.name, '--authusing', cereconf.CYRUS_ADMIN, '--passfile1', '/etc/cyrus.pw', '--useheader', 'Message-ID', '--regexmess', 's/\\0/ /g', '--ssl', '--subscribe', '--nofoldersizes'] proc = subprocess.Popen(cmd, capturestderr=True, bufsize=10240, close_fds=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) pid = proc.pid logger.debug("Called cmd(%d): '%s'", pid, cmd) proc.stdin.close() # Stolen from Utils.py:spawn_and_log_output() descriptor = {proc.stdout: logger.debug, proc.stderr: logger.info} while descriptor: # select() is called for _every_ line, since we can't inspect # the buffering in Python's file object. This works OK since # select() will return "readable" for an unread EOF, and # Python won't read the EOF until the buffers are exhausted. ready, x, x = select(descriptor.keys(), [], []) for fd in ready: line = fd.readline() if line == '': fd.close() del descriptor[fd] else: descriptor[fd]("[%d] %s" % (pid, line.rstrip())) status = proc.wait() if status == EXIT_SUCCESS: logger.debug("[%d] Completed successfully", pid) elif os.WIFSIGNALED(status): # The process was killed by a signal. sig = os.WTERMSIG(status) logger.warning('[%d] Command "%r" was killed by signal %d', pid, cmd, sig) return else: # The process exited with an exit status sig = os.WSTOPSIG(status) logger.warning("[%d] Return value was %d from command %r", pid, sig, cmd) return # Need move SIEVE filters as well cmd = [cereconf.MANAGESIEVE_SCRIPT, '-v', '-a', cereconf.CYRUS_ADMIN, '-p', pwfile, acc.account_name, old_server.name, new_server.name] if Utils.spawn_and_log_output( cmd, connect_to=[old_server.name, new_server.name]) != 0: logger.warning('%s: managesieve_sync failed!', acc.account_name) return logger.info('%s: managesieve_sync completed successfully', acc.account_name) # The move was successful, update the user's server # Now set the correct quota. hq = get_email_hardquota(acc.entity_id, local_db=local_db) cyrus_set_quota(acc.entity_id, hq, host=new_server, local_db=local_db) et = Email.EmailTarget(local_db) et.find_by_target_entity(acc.entity_id) et.email_server_id = new_server.entity_id et.write_db() # We need to delete this request before adding the # delete to avoid triggering the conflicting request # test. br = BofhdRequests(local_db, local_co) br.delete_request(request_id=r_id) local_db.commit() br.add_request(r['requestee_id'], r['run_at'], local_co.bofh_email_delete, r['entity_id'], old_server.entity_id) local_db.commit() logger.info("%s: move_email success.", acc.account_name) reqlock.release()
def wait_trap(pid): while True: _, status = os.waitpid(pid, 0) if os.WIFSTOPPED(status) and os.WSTOPSIG(status) == signal.SIGTRAP: break
def main(self, command): """ start the trace with the given command :type command: string :param command: command line to trace passed through shlex.split :rtype: bool :return: false if something went wrong """ import ctypes from FingerPrint.ptrace import func as ptrace_func import FingerPrint.ptrace.cpu_info import FingerPrint.ptrace.signames files = {} # # main function to launch a process and trace it # returnValue = False self.program = command # this is to check if we are entering or returning from a system call processesStatus = dict() options = ptrace_func.PTRACE_O_TRACEFORK | ptrace_func.PTRACE_O_TRACEVFORK \ | ptrace_func.PTRACE_O_TRACECLONE | ptrace_func.PTRACE_O_TRACEEXIT \ | ptrace_func.PTRACE_O_TRACEEXEC | ptrace_func.PTRACE_O_TRACESYSGOOD #TODO add the logger #logger = getLogger() #logger.setLevel(DEBUG) # creating the debugger and setting it up child = os.fork() if child == 0: # we are in the child or traced process # traceme and execv ptrace_func.ptrace_traceme() os.execl(FingerPrint.utils.which(self.program[0]), *self.program) else: # father or tracer process # we trace the execution here logger.debug("The fingerprint process %d going to trace %d" % (os.getpid(), child)) pid, status = os.waitpid(-1, 0) if pid != child: logger.error("The process tracer could not bootstrap.") return False ptrace_func.ptrace_setoptions(child, options) ptrace_func.ptrace_syscall(child) files = TracerControlBlock.files TracerControlBlock.set_trace_function() while True: # main loop tracer # 1. wait for syscall from the children # 2. analyze what happen, if mmap syscall scan /proc/PID/maps # 3. get ready to wait for the next syscall try: # wait for all cloned children __WALL = 0x40000000 (pid, status) = os.waitpid(-1, 0x40000000) except OSError: logger.error("Tracing terminated successfully") return True if not pid > 0: logger.error("Catastrofic failure") return False event = status >> 16 signalValue = os.WSTOPSIG(status) deliverSignal = 0 if os.WIFEXITED(status): # a process died, report it and go back to wait for syscall logger.debug("The process " + str(pid) + " exited") processesStatus.pop(pid) continue if os.WIFSIGNALED(status): logger.debug("The process " + str(pid) + " exited because of a signal") processesStatus.pop(pid) continue if os.WIFCONTINUED(status): logger.debug("The process " + str(pid) + " continued") elif os.WIFSTOPPED(status) and signalValue == (signal.SIGTRAP | 0x80): # # we have a syscall # orig_rax or orig_eax contains the syscall number # taken from linux src arch/x86/syscalls/syscall_[32|64].tbl # switch on the syscal number to intercept mmap and open regs = ptrace_func.ptrace_getregs(pid) if pid not in processesStatus: #new pid tcb = TracerControlBlock(pid) processesStatus[pid] = tcb if (FingerPrint.ptrace.cpu_info.CPU_X86_64 and regs.orig_rax == 2) or \ (FingerPrint.ptrace.cpu_info.CPU_I386 and regs.orig_eax == 5):# or regs.orig_rax == 257): # # handle open (orig_rax == 2 on 64bit) or (orig_eax == 5 on 32bit) # if processesStatus[pid].enterCall: # we are entering open, regs.rsi contains the first arguments for 64bit # https://github.com/torvalds/linux/blob/master/arch/x86/kernel/entry_64.S#L585 # ebx if for 32 bits http://man7.org/linux/man-pages/man2/syscall.2.html if FingerPrint.ptrace.cpu_info.CPU_X86_64: processesStatus[pid].firstArg = regs.rdi else: processesStatus[pid].firstArg = regs.ebx processesStatus[pid].enterCall = False else: # we are exiting from a open processesStatus[pid].enterCall = True # cast from c_ulong to c_long if FingerPrint.ptrace.cpu_info.CPU_X86_64: ret_value = regs.rax else: ret_value = regs.eax returnValue = ctypes.c_long(ret_value).value if returnValue >= 0: openPath = self.readCString( processesStatus[pid].firstArg, pid) if openPath[0] != '/': #relative path we need to get the pwd openPath = "$" + processesStatus[ pid].getProcessCWD() + "$" + openPath libName = processesStatus[pid].getFileOpener() if libName not in files: files[libName] = {} if processesStatus[pid].getProcessName( ) not in files[libName]: files[libName][processesStatus[pid]. getProcessName()] = set() files[libName][processesStatus[pid]. getProcessName()].add(openPath) # else don't do anything # TODO use close to check for used files (easier to trace full path) elif (FingerPrint.ptrace.cpu_info.CPU_X86_64 and regs.orig_rax == 9)\ or (FingerPrint.ptrace.cpu_info.CPU_I386 and \ (regs.orig_eax == 90 or regs.orig_eax == 192 ) ): # # handle mmap (orig_rax == 9 64bit or orig_eax == 90 or 192 on 32bit) # if processesStatus[pid].enterCall: # we are entering mmap processesStatus[pid].enterCall = False #print "the process %d enter mmap" % pid else: # we are returning from mmap processesStatus[pid].enterCall = True processesStatus[pid].updateSharedLibraries() elif os.WIFSTOPPED(status) and ( signalValue == signal.SIGTRAP) and event != 0: # this is just to print some output to the users subChild = ptrace_func.ptrace_geteventmsg(pid) if event == ptrace_func.PTRACE_EVENT_FORK: logger.debug("The process %d forked a new process %d" % (pid, subChild)) elif event == ptrace_func.PTRACE_EVENT_VFORK: logger.debug( "The process %d vforked a new process %d" % (pid, subChild)) elif event == ptrace_func.PTRACE_EVENT_CLONE: logger.debug("The process %d cloned a new process %d" % (pid, subChild)) elif event == ptrace_func.PTRACE_EVENT_EXEC: logger.debug("The process %d run exec" % (pid)) processesStatus[pid].updateProcessInfo() elif event == ptrace_func.PTRACE_EVENT_EXIT: pass #print "the process %d is in a event exit %d" % (pid, subChild) elif os.WIFSTOPPED(status): # when a signal is delivered to one of the child and we get notified # we need to relay it properly to the child # (in particular SIGCHLD must be rerouted to the parents if not mpirun # will never end) logger.debug("Signal %s(%d) delivered to %d " % \ (FingerPrint.ptrace.signames.signalName(signalValue), signalValue, pid)) deliverSignal = signalValue else: logger.debug("This should not happen!!") # set the ptrace option and wait for the next syscall notification #ptrace_func.ptrace_setoptions(pid, options); ptrace_func.ptrace_syscall(pid, deliverSignal)
def arbos(ar_path, db_path, analyses, entry_points=None, entry_points_init_gv=None, arbos_optimize=True, arbos_optimize_cfg=True, dot_cfg=False, add_loop_counters=False, interprocedural=True, prec_level='mem', liveness=True, pointer=True, gv_init=('scalars', 'pointers'), summaries=False, pointer_summaries=False, display_invariants='off', display_checks='off', mem=-1, cpu=-1, print_command=False): # list of arbos passes passes = [] if arbos_optimize: passes.append(('pointer-shift-opt', 'ps-opt')) if add_loop_counters: passes.append(('add-loop-counters', 'add-loop-counters')) if arbos_optimize_cfg: passes.append(('branching-opt', 'branching-opt')) if entry_points_init_gv: passes.append(('inline-init-gv', 'inline-init-gv')) passes.append(('unify-exit-nodes', 'unify-exit-nodes')) if dot_cfg: passes.append(('ar-to-dot', 'cfg-dot')) passes.append(('analyzer', 'analyzer')) loads = [] pass_names = [] options = [] # build arbos parameters for lib_name, pass_name in passes: lib_path = settings.arbos_pass(lib_name) loads.append('-load=%s' % lib_path) pass_names.append('-%s' % pass_name) # libinline-init-gv options if entry_points_init_gv: for entry_point in entry_points_init_gv: options += ['--init-globals', entry_point] if arbos_optimize and gv_init != 'all': for init in gv_init: options.append('--only-%s' % init) # libanalyzer options for analysis in analyses: options += ['--analysis', analysis] if entry_points: for entry_point in entry_points: options += ['--entry-points', entry_point] if not interprocedural: options.append('--intra') if not liveness: options.append('--no-liveness') if not pointer: options.append('--no-pointer') options += ['--precision-level', prec_level] if summaries: options.append('--summaries') if pointer_summaries: options.append('--pointer-summaries') options += [ '--display-invariants', display_invariants, '--display-checks', display_checks, '--output-db', db_path ] cmd = [settings.arbos()] + loads + pass_names + options if print_command: printf(' '.join(map(sh_quote, cmd)) + '\n') return passes # set resource limit def set_limits(): if mem > 0: mem_bytes = mem * 1024 * 1024 resource.setrlimit(resource.RLIMIT_AS, [mem_bytes, mem_bytes]) # called after timeout def kill(p): try: printf('TIMEOUT\n') p.terminate() p.kill() p.wait() except OSError: pass p = subprocess.Popen(cmd, stdin=open(ar_path), preexec_fn=set_limits) timer = threading.Timer(cpu, kill, [p]) if cpu > 0: timer.start() try: pid, returnstatus, ru_child = os.wait4(p.pid, 0) finally: # kill the timer if the process has terminated already if timer.isAlive(): timer.cancel() # if it did not terminate properly, propagate this error code if os.WIFEXITED(returnstatus) and os.WEXITSTATUS(returnstatus) != 0: raise ArbosError('some run-time error occured', cmd, os.WEXITSTATUS(returnstatus)) if os.WIFSIGNALED(returnstatus): raise ArbosError('exited with signal %d' % os.WTERMSIG(returnstatus), cmd, os.WTERMSIG(returnstatus)) if os.WIFSTOPPED(returnstatus): raise ArbosError('exited with signal %d' % os.WSTOPSIG(returnstatus), cmd, os.WSTOPSIG(returnstatus)) return passes
def run_command_in_shell(command=None, blocking=False, acceptable_return_codes=None): """Runs a command in a subshell via standard-C system(). <command> The shell command to run including command line options. <blocking> This will make the code *block* until the shell command exits. It will likely only work on UNIX shells where "cmd &" makes sense. http://stackoverflow.com/questions/35817/how-to-escape-os-system-calls-in-python """ if acceptable_return_codes is None: acceptable_return_codes = [0] _log.debug('shell command >>>%s<<<', command) _log.debug('blocking: %s', blocking) _log.debug('acceptable return codes: %s', str(acceptable_return_codes)) # FIXME: command should be checked for shell exploits command = command.strip() if os.name == 'nt': # http://stackoverflow.com/questions/893203/bat-files-nonblocking-run-launch if blocking is False: if not command.startswith('start '): command = 'start "GNUmed" /B "%s"' % command # elif blocking is True: # if not command.startswith('start '): # command = 'start "GNUmed" /WAIT /B "%s"' % command else: # what the following hack does is this: the user indicated # whether she wants non-blocking external display of files # - the real way to go about this is to have a non-blocking command # in the line in the mailcap file for the relevant mime types # - as non-blocking may not be desirable when *not* displaying # files from within GNUmed the really right way would be to # add a "test" clause to the non-blocking mailcap entry which # yields true if and only if GNUmed is running # - however, this is cumbersome at best and not supported in # some mailcap implementations # - so we allow the user to attempt some control over the process # from within GNUmed by setting a configuration option # - leaving it None means to use the mailcap default or whatever # was specified in the command itself # - True means: tack " &" onto the shell command if necessary # - False means: remove " &" from the shell command if its there # - all this, of course, only works in shells which support # detaching jobs with " &" (so, most POSIX shells) if blocking is True: command = command.rstrip(' &') elif blocking is False: if not command.strip().endswith('&'): command += ' &' _log.info('running shell command >>>%s<<<', command) # FIXME: use subprocess.Popen() ret_val = os.system(command.encode(sys.getfilesystemencoding())) _log.debug('os.system() returned: [%s]', ret_val) exited_normally = False if not hasattr(os, 'WIFEXITED'): _log.error('platform does not support exit status differentiation') if ret_val in acceptable_return_codes: _log.info( 'os.system() return value contained in acceptable return codes' ) _log.info('continuing and hoping for the best') return True return exited_normally _log.debug('exited via exit(): %s', os.WIFEXITED(ret_val)) if os.WIFEXITED(ret_val): _log.debug('exit code: [%s]', os.WEXITSTATUS(ret_val)) exited_normally = (os.WEXITSTATUS(ret_val) in acceptable_return_codes) _log.debug('normal exit: %s', exited_normally) _log.debug('dumped core: %s', os.WCOREDUMP(ret_val)) _log.debug('stopped by signal: %s', os.WIFSIGNALED(ret_val)) if os.WIFSIGNALED(ret_val): try: _log.debug('STOP signal was: [%s]', os.WSTOPSIG(ret_val)) except AttributeError: _log.debug('platform does not support os.WSTOPSIG()') try: _log.debug('TERM signal was: [%s]', os.WTERMSIG(ret_val)) except AttributeError: _log.debug('platform does not support os.WTERMSIG()') return exited_normally
def do_crash(self, expect_coredump=True, expect_corefile=False, sig=signal.SIGSEGV, check_running=True, sleep=0, command=test_executable, uid=None, expect_corefile_owner=None, args=[]): '''Generate a test crash. This runs command (by default test_executable) in cwd, lets it crash, and checks that it exits with the expected return code, leaving a core file behind if expect_corefile is set, and generating a crash report if expect_coredump is set. If check_running is set (default), this will abort if test_process is already running. ''' self.assertFalse( os.path.exists('core'), '%s/core already exists, please clean up first' % os.getcwd()) pid = self.create_test_process(check_running, command, uid=uid, args=args) if sleep > 0: time.sleep(sleep) os.kill(pid, sig) result = os.waitpid(pid, 0)[1] self.assertFalse(os.WIFEXITED(result), 'test process did not exit normally') self.assertTrue(os.WIFSIGNALED(result), 'test process died due to signal') self.assertEqual(os.WCOREDUMP(result), expect_coredump) self.assertEqual(os.WSTOPSIG(result), 0, 'test process was not signaled to stop') self.assertEqual(os.WTERMSIG(result), sig, 'test process died due to proper signal') # wait max 10 seconds for apport to finish timeout = 50 while timeout >= 0: pidof = subprocess.Popen(['pidof', '-x', 'apport'], stdout=subprocess.PIPE) pidof.communicate() if pidof.returncode != 0: break time.sleep(0.2) timeout -= 1 self.assertGreater(timeout, 0) if check_running: self.assertEqual(subprocess.call(['pidof', command]), 1, 'no running test executable processes') if expect_corefile: self.assertTrue(os.path.exists('core'), 'leaves wanted core file') try: # check core file permissions st = os.stat('core') self.assertEqual(stat.S_IMODE(st.st_mode), 0o600, 'core file has correct permissions') if expect_corefile_owner is not None: self.assertEqual(st.st_uid, expect_corefile_owner, 'core file has correct owner') # check that core file is valid gdb = subprocess.Popen( ['gdb', '--batch', '--ex', 'bt', command, 'core'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) (out, err) = gdb.communicate() self.assertEqual(gdb.returncode, 0) out = out.decode() err = err.decode().strip() finally: os.unlink('core') else: if os.path.exists('core'): try: os.unlink('core') except OSError as e: sys.stderr.write( 'WARNING: cannot clean up core file %s/core: %s\n' % (os.getcwd(), str(e))) self.fail('leaves unexpected core file behind')
def RunProgram(self, program, arguments, context, result): """Run the 'program'. 'program' -- The path to the program to run. 'arguments' -- A list of the arguments to the program. This list must contain a first argument corresponding to 'argv[0]'. 'context' -- A 'Context' giving run-time parameters to the test. 'result' -- A 'Result' object. The outcome will be 'Result.PASS' when this method is called. The 'result' may be modified by this method to indicate outcomes other than 'Result.PASS' or to add annotations.""" # Construct the environment. environment = self.MakeEnvironment(context) # Create the executable. if self.timeout >= 0: timeout = self.timeout else: # If no timeout was specified, we sill run this process in a # separate process group and kill the entire process group # when the child is done executing. That means that # orphaned child processes created by the test will be # cleaned up. timeout = -2 e = qm.executable.Filter(self.stdin, timeout) # Run it. exit_status = e.Run(arguments, environment, path=program) # If the process terminated normally, check the outputs. if sys.platform == "win32" or os.WIFEXITED(exit_status): # There are no causes of failure yet. causes = [] # The target program terminated normally. Extract the # exit code, if this test checks it. if self.exit_code is None: exit_code = None elif sys.platform == "win32": exit_code = exit_status else: exit_code = os.WEXITSTATUS(exit_status) # Get the output generated by the program. stdout = e.stdout stderr = e.stderr # Record the results. result["ExecTest.exit_code"] = str(exit_code) result["ExecTest.stdout"] = result.Quote(stdout) result["ExecTest.stderr"] = result.Quote(stderr) # Check to see if the exit code matches. if exit_code != self.exit_code: causes.append("exit_code") result["ExecTest.expected_exit_code"] \ = str(self.exit_code) # Check to see if the standard output matches. if not self.__CompareText(stdout, self.stdout): causes.append("standard output") result["ExecTest.expected_stdout"] \ = result.Quote(self.stdout) # Check to see that the standard error matches. if not self.__CompareText(stderr, self.stderr): causes.append("standard error") result["ExecTest.expected_stderr"] \ = result.Quote(self.stderr) # If anything went wrong, the test failed. if causes: result.Fail("Unexpected %s." % string.join(causes, ", ")) elif os.WIFSIGNALED(exit_status): # The target program terminated with a signal. Construe # that as a test failure. signal_number = str(os.WTERMSIG(exit_status)) result.Fail("Program terminated by signal.") result["ExecTest.signal_number"] = signal_number elif os.WIFSTOPPED(exit_status): # The target program was stopped. Construe that as a # test failure. signal_number = str(os.WSTOPSIG(exit_status)) result.Fail("Program stopped by signal.") result["ExecTest.signal_number"] = signal_number else: # The target program terminated abnormally in some other # manner. (This shouldn't normally happen...) result.Fail("Program did not terminate normally.")
print("Usage: %s program [argument...]", file=sys.stderr) sys.exit(1) pid = os.fork() if not pid: # child pinktrace.trace.me() os.kill(os.getpid(), signal.SIGSTOP) try: os.execvp(sys.argv[1], sys.argv[1:]) except OSError: os._exit(-1) pid, status = os.waitpid(pid, 0) assert os.WIFSTOPPED(status), "%#x" % status assert os.WSTOPSIG(status) == signal.SIGSTOP, "%#x" % status # parent # Figure out the bitness of the child. bitness = pinktrace.bitness.get(pid) print("Child %d runs in %s mode" % (pid, pinktrace.bitness.name(bitness))) inexecve = False insyscall = False sig = 0 exit_code = 0 while True: # At this point the traced child is stopped and needs to be resumed. pinktrace.trace.syscall(pid, sig) sig = 0 pid, status = os.waitpid(pid, 0)
#!/usr/bin/env python # coding: utf-8 """ An example demonstrating the tracing fork on FreeBSD. """ from __future__ import print_function import os, signal import pinktrace.event import pinktrace.trace pid = os.fork() if not pid: # Prepare for tracing. pinktrace.trace.me() # Stop to give the parent a chance to resume execution after setting options. os.kill(os.getpid(), signal.SIGSTOP) print("hello world") else: # parent pid, status = os.waitpid(pid, 0) assert os.WIFSTOPPED(status), "%#x" % status assert os.WSTOPSIG(status) == signal.SIGSTOP, "%#x" % status # Let the child resume its execution. pinktrace.trace.resume(pid) # Wait for the child to exit. os.waitpid(pid, 0)
def execute(self, inp): self.old_code = {} self.visited_bbs = set([]) self.f.truncate() self.f.seek(0) self.f.write(bytearray(inp)) self.f.seek(0) self.f.flush() has_crashed = False pid = os.fork() if pid > 0: # parent # wait for execv to be called, waitpid returns if it was os.waitpid(pid, 0) self.base = 0 executable_mappings = [] basename = os.path.basename(self.binary) for m in fastReadProcessMappings(pid): (start, end, permissions, pathname) = m if pathname and basename in pathname and "x" in permissions: executable_mappings.append(m) #print "%s" % executable_mappings if not executable_mappings: raise Exception("Could not find mapping with executable flag") if len(executable_mappings) > 1: #raise Exception print( "Note: there are more than one executable mappings, don't know which one to use. @TODO add support" ) print(executable_mappings) # select the executable mapping with the lowest starting address (@TODO) self.selected_map = min(executable_mappings, key=lambda m: m[0]) self.base = self.selected_map[0] #selected_map.start # sometimes IDA has found the complete address already # in which case, just use the BB addresses from IDA (set base=0) self.mem = open('/proc/%d/mem' % pid, "rb+", 0) try: self.mem.seek(self.main_address) # does this code on this address look like the main-function? if self.mem.read(8) == self.main_code: # yeah, set base = 0 self.base = 0 except: pass # go through all BBs that were not visited yet (no breakpoint necessary for BBs that were already visited) if self.skip_visited_bbs: iterate_bbs = self.all_bbs - self.visited_bbs else: iterate_bbs = self.all_bbs error_while_reading = 0 for bb in iterate_bbs: #self.log.info("Set breakpoint for %x" % bb) bb = self.base + bb self.mem.seek(bb) try: self.old_code[bb] = self.mem.read(1) except: error_while_reading += 1 continue self.mem.seek(bb) self.mem.write(b"\xCC") #self.libc.ptrace(PTRACE_POKETEXT, pid, bb, "\xCC") #if error_while_reading > 0: # print("Attention: got errors while reading %d BBs (out of %d), ignoring those." % (error_while_reading, len(iterate_bbs))) if error_while_reading == len(iterate_bbs): raise Exception( "Couldn't add a single breakpoint, maybe issue with calculating correct base address?" ) #self.log.info("cont'ing ptraced process") self.libc.ptrace(PTRACE_CONT, pid, 0, 0) regs = UserRegsStruct() while True: try: (p, status) = os.waitpid(pid, 0) except: #self.log.info("process %d does not exist anymore" % pid) break if os.WIFEXITED(status): #self.log.info("process %d exited with %d (%d)" % (pid, status, os.WIFEXITED(status))) break elif os.WIFSIGNALED(status): #self.log.info("process %d crashed with %d (%d)" % (pid, status, os.WIFSIGNALED(status))) has_crashed = True break elif os.WIFSTOPPED(status): #self.log.info("received WIFSTOPPED with %d (%d), signal %d" % (status, os.WIFSTOPPED(status), os.WSTOPSIG(status))) signalNum = os.WSTOPSIG(status) if signalNum == signal.SIGTRAP: self.libc.ptrace(PTRACE_GETREGS, pid, 0, ctypes.byref(regs)) regs.rip -= 1 self.visited_bbs.add(regs.rip - self.base) #self.log.info("SIGTRAP received at %x" % regs.rip) self.mem.seek(regs.rip) self.mem.write(self.old_code[regs.rip]) self.libc.ptrace(PTRACE_SETREGS, pid, 0, ctypes.byref(regs)) self.libc.ptrace(PTRACE_CONT, pid, 0, 0) else: #self.log.info("got unknown signal %d, probably crash" % signalNum) has_crashed = True break else: #self.log.info("got unknown status %d" % pid) pass try: os.kill(pid, 9) except: pass else: # child resource.setrlimit( resource.RLIMIT_AS, (MAX_MEMORY, MAX_MEMORY)) # 50 MB process memory limit resource.setrlimit(resource.RLIMIT_CORE, (0, 0)) # Maximum size of core file nulf = os.open(os.devnull, os.O_WRONLY) os.dup2(nulf, sys.stdout.fileno()) os.dup2(nulf, sys.stderr.fileno()) os.close(nulf) self.libc.ptrace(PTRACE_TRACEME, 0, 0, 0) os.execv(self.binary, self.arguments) os._exit(0) return has_crashed