def execute(self): if self.history_with_pipe: do_full_command(str(self)) else: if self.command in shellCommandList: if self.is_part_of_pipe: child_pid = os.fork() if child_pid == 0: os.dup2(self.fd_in, sys.stdin.fileno()) os.dup2(self.fd_out, sys.stdout.fileno()) shellCommands[self.command](self.arguments) elif self.is_job: job_list.add_job(child_pid, self) else: os.wait() else: shellCommands[self.command](self.arguments) else: child_pid = os.fork() if child_pid == 0: os.dup2(self.fd_in, sys.stdin.fileno()) os.dup2(self.fd_out, sys.stdout.fileno()) os.execvp(self.command, self.arguments) elif self.is_job: job_list.add_job(child_pid, self) else: os.wait()
def stop_all_httpd(self): if not self._httpd_pids: return for pid in self._httpd_pids: os.kill(pid, signal.SIGKILL) logger.info('httpd killed. PID: {0}'.format(pid)) os.wait()
def GET(self, uid, gid): web.header("Content-type","text/plain") uid, gid = map(int, (uid, gid)) # Security if uid < 500 or gid < 500: yield "Invalid UID (%d) or GID (%d)\n" % (uid, gid) return try: pwd.getpwuid(uid) except KeyError: yield "UID (%d) does not exist\n" % (uid, gid) return for k in stop_program().GET(): yield k yield "Starting program with %d/%d\n" % (uid, gid) #p = subprocess.Popen(EXEC_SH, shell=True, # preexec_fn=lambda: change_user(uid,gid)) # This fails when running as daemon rpipe, wpipe = os.pipe() # Reference: http://ameblo.jp/oyasai10/entry-10615215673.html pid = os.fork() if pid == 0: # Child os.close(rpipe) wpipe = os.fdopen(wpipe, "w") change_user(uid,gid) p = subprocess.Popen(EXEC_SH, shell=True) wpipe.write("%d\n"%p.pid) sys.exit() else: # Parent os.close(wpipe) rpipe = os.fdopen(rpipe, "r") pid = int(rpipe.readline().strip()) open(PID_FILE, "w").write("%d"%pid) os.wait() # Wait child
def executor(command, arguments, inputredirect, outputredirect): #try: pid = os.fork() if pid == 0: Redirector(inputredirect, outputredirect) os.execvp(command, arg) os.wait()
def stop(self): """ Tells the ChromeDriver to stop and cleans up the process """ #If its dead dont worry if self.process is None: return #Tell the Server to die! import urllib2 urllib2.urlopen("http://127.0.0.1:%d/shutdown" % self.port) count = 0 while not utils.is_connectable(self.port): if count == 30: break count += 1 time.sleep(1) #Tell the Server to properly die in case try: if self.process: os.kill(self.process.pid, signal.SIGTERM) os.wait() except AttributeError: # kill may not be available under windows environment pass
def start(vserver): if os.getuid() == 0: # run directly return commands.getoutput("%s %s start" % (cfg.VSERVER, vserver)) else: # in this case assume we're called from mod_python. things aren't nearly # as simple - if we were to start the vserver directly, its init process would # inherit all our file descriptors for as long as the vserver will run, # making it impossible to restart httpd on the main server since its ip/port # will remain open. so we have to fork then close all file descriptors. pid = os.fork() if pid == 0: # in child # now close all file descriptors for fd in range(os.sysconf("SC_OPEN_MAX")): try: os.close(fd) except OSError: # ERROR (ignore) pass # only now is it OK to do our thing os.system("%s vserver-start %s > /dev/null 2>&1 &" % (cfg.OVWRAPPER, vserver)) # exit child os._exit(0) else: # wait on the child to avoid a defunct (zombie) process os.wait()
def bjsonExport(objfiles, path): ''' Creates .bjson files from existing .obj files @param: objfiles - a list of strings representing full paths to .obj files @param: path - a string to the path of the directory for .bjson files to go @return: a list of strings that contain the full paths to all of the .bjson files that were created @post: directory for 'path' is created if it wasn't already ''' # Create directory if it doesn't exist if not os.path.exists(path): os.makedirs(path) bjsonfiles = [] for objfile in objfiles: filebase = os.path.splitext(os.path.basename(objfile))[0] filename = filebase + '.bjson' outfile = os.path.join(path, filename) cmdstr = '/opt/hfs.current/bin/gwavefront ' + objfile + ' ' + outfile print("Converting '" + objfile + "' to '" + outfile + "'...") sp.Popen(cmdstr, shell=True) os.wait() print("\nCOMPLETED") bjsonfiles.append(outfile) return bjsonfiles
def fork(f): """Generator for creating a forked process from a function""" # Perform double fork r = "" if os.fork(): # Parent # Wait for children so that they don't get defunct. os.wait() # Return a function return lambda *x, **kw: r # Otherwise, we are the child # Perform second fork os.setsid() os.umask(077) os.chdir("/") if os.fork(): os._exit(0) def wrapper(*args, **kwargs): """Wrapper function to be returned from generator. Executes the function bound to the generator and then exits the process""" f(*args, **kwargs) os._exit(0) return wrapper
def watch(self): try: os.wait() except KeyboardInterrupt: print ' exit...' self.kill() sys.exit()
def Spawn(self, command, ExitCode = 0): if os.fork() == 0: if os.fork(): sys.exit(0) spawnopts = shlex.split(command) os.execvpe(spawnopts[0], spawnopts, os.environ) os.wait()
def testRoundtripAfterFork(self): """ Test that we can roundtrip encrypt / decrypt after forking without error. """ if fork() == 0: key = createKey() data = {u'user': u'aliafshar', u'id': u'91821212'} token = dataToToken(key, data) self.assertEqual(data, tokenToData(key, token)) # This is horrible, but necessary: Turn the child into a sleep # process, so trial doesn't get its knickers in a knot when the # child tries to remove the trial lock file. I.e., politically # 'vanish' the child process and trial (the parent) removes the # lock as normal. # # This is necessary because trial checks the PID of the process # when removing the lock file. So apart from having two # processes trying to remove the same lock file (which causes # one kind of error), if the child gets there first, there is a # PID-mismatch error. execl('/bin/sleep', 'sleep', '0.01') else: # The parent waits for the child and exits normally. wait()
def watcher(): """This little code snippet is from http://greenteapress.com/semaphores/threading_cleanup.py (2012-07-31) It's now possible to interrupt the testrunner via ctrl-c at any time in a platform neutral way.""" if sys.platform == 'win32': p = Process(target=main, name="MainProcess") p.start() try: p.join() except KeyboardInterrupt: print 'KeyBoardInterrupt' p.terminate() else: child = os.fork() if child == 0: main() # child runs test try: os.wait() except KeyboardInterrupt: print 'KeyBoardInterrupt' try: os.kill(child, signal.SIGKILL) except OSError: pass except OSError: pass sys.exit()
def run_forever(self, *args, **kwargs): """ Run the updater continuously. """ time.sleep(random() * self.interval) while True: self.logger.info(_('Begin container update sweep')) begin = time.time() now = time.time() expired_suppressions = \ [a for a, u in self.account_suppressions.items() if u < now] for account in expired_suppressions: del self.account_suppressions[account] pid2filename = {} # read from account ring to ensure it's fresh self.get_account_ring().get_nodes('') for path in self.get_paths(): while len(pid2filename) >= self.concurrency: pid = os.wait()[0] try: self._load_suppressions(pid2filename[pid]) finally: del pid2filename[pid] fd, tmpfilename = mkstemp() os.close(fd) pid = os.fork() if pid: pid2filename[pid] = tmpfilename else: signal.signal(signal.SIGTERM, signal.SIG_DFL) eventlet_monkey_patch() self.no_changes = 0 self.successes = 0 self.failures = 0 self.new_account_suppressions = open(tmpfilename, 'w') forkbegin = time.time() self.container_sweep(path) elapsed = time.time() - forkbegin self.logger.debug( _('Container update sweep of %(path)s completed: ' '%(elapsed).02fs, %(success)s successes, %(fail)s ' 'failures, %(no_change)s with no changes'), {'path': path, 'elapsed': elapsed, 'success': self.successes, 'fail': self.failures, 'no_change': self.no_changes}) sys.exit() while pid2filename: pid = os.wait()[0] try: self._load_suppressions(pid2filename[pid]) finally: del pid2filename[pid] elapsed = time.time() - begin self.logger.info(_('Container update sweep completed: %.02fs'), elapsed) dump_recon_cache({'container_updater_sweep': elapsed}, self.rcache, self.logger) if elapsed < self.interval: time.sleep(self.interval - elapsed)
def main(): if len(sys.argv) == 5: _, concurrency, messages, url, logd_pid = sys.argv elif len(sys.argv) == 4: _, concurrency, messages, url = sys.argv logd_pid = 0 else: print 'usage: %s <concurrency> <messages> <url> [logd pid]' % ( sys.argv[0],) sys.exit(1) concurrency = int(concurrency) messages = int(messages) logd_pid = int(logd_pid) s = time.time() child = spawn(concurrency, messages, url, logd_pid) if child: return print 'SIGHUP Loop done, waiting on punishers' while 1: try: os.wait() except OSError: print 'CHILD processes already done' break e = time.time() total = concurrency * messages print 'FIN - Sent %d messages in %d seconds (%f per second)' % ( total, e - s, total / (e - s))
def handle_files (workdir, file_fw = False, file_rv = False, hts_dir = False, htses = False, reference = False): if file_fw and file_rv: name_reads = file_from_path(file_fw)[0:-6] outdir = workdir + name_reads + '/' handle_hts (file_fw, file_rv, outdir, reference = reference) elif hts_dir and htses: process_count = 0 for fw, rv in htses: file_fw = hts_dir + fw file_rv = hts_dir + rv name_fw = file_from_path(file_fw) name_rv = file_from_path(file_rv) name_reads = name_fw[0:-6] outdir = workdir + name_reads + '/' if not os.path.exists(outdir): os.makedirs(outdir) if not MULTIPROC: if not ONLY_FIND: handle_hts (file_fw, file_rv, outdir) else: handle_hts (file_fw, file_rv, outdir) else: pid = os.fork() time.sleep(0.1) if pid == 0: if not ONLY_FIND: handle_hts (file_fw, file_rv, outdir) else: handle_hts (file_fw, file_rv, outdir) os.abort() else: process_count += 1 if process_count >= MAX_PROCESSES: os.wait() process_count -= 1 else: print "Error: handle_htses haven't get needed values" return 0
def main(): signal.signal(signal.SIGINT, signal_handler) while True: line = input('dtshell$ ') args = line.split(' ') if line == '': continue elif args[0] == 'exit': sys.exit() elif args[0] == 'cd': os.chdir(args[1]) elif '|' in args: read, write = os.pipe() pipe_location = args.index('|') pipe_exec(args[:pipe_location], args[pipe_location+1:], read, write) else: pid = os.fork() if pid == 0: execute(args) else: os.wait()
def watch(self): try: os.wait() except KeyboardInterrupt: print '\nKeyboard Interrupt caught, exiting...' self.kill() sys.exit()
def pstopngmono(pslines) : cmd = 'gs -sDEVICE=pngmono -sOutputFile=- -sPAPERSIZE=a4 -dQUIET -r100 -g800x600 -' p = popen2.Popen3(cmd, 1) sys.stdout.flush() sys.stderr.flush() pid = os.fork() if pid == 0 : p.fromchild.close() for l in pslines : p.tochild.write('%s\n' % l) p.tochild.close() os._exit(os.EX_OK) p.tochild.close() png = p.fromchild.read() p.fromchild.close() status = p.wait() if status != 0 : errmsgList = [] errmsg = p.childerr.readline() while errmsg : errmsgList.append(errmsg.strip()) errmsg = p.childerr.readline() raise StandardError, 'error running gs: %d (%s)' % (status, ', '.join(errmsgList)) os.wait() return png
def execute(args): cpid = os.fork() if cpid == 0: os.execvp(args[0], args) else: os.wait()
def function(*arg): while True: try: os.wait() except OSError: pass sleep(1)
def harvest_all(mycnf, host=None, db=None, full=False, numProc=5, url=None, stream_filter=None): if url: urls = [url] else: fobj = StringIO() nop = lambda x: True curl = MyCurl(fobj.write, nop) arcsurl = MyUrl("http://www.openarchives.org/Register/ListFriends") curl.fetch(arcsurl) fobj.seek(0) data = fobj.read() fobj.close() pat = re.compile(r"<baseurl.*?>(.*?)</baseurl>", re.I | re.S) urls = [] for m in pat.finditer(data): url = m.group(1) if "language-archives" in url and "olaca" in url: continue urls.append(url) logs = {} P = {} def printlog(pid): url = P[pid] log = logs[url] print >>sys.stderr, "**********************************************************************" print >>sys.stderr, url print >>sys.stderr, "**********************************************************************" print >>sys.stderr, file(log).read() print >>sys.stderr print >>sys.stderr os.unlink(log) N = min(10, max(1, numProc)) while urls: url = urls.pop() logs[url] = tempfile.mktemp() pid = os.fork() if pid == 0: global LOG LOG = file(logs[url], "w") harvest_single(url, mycnf, host, db, full, stream_filter) sys.exit(0) else: P[pid] = url if len(P) >= N: pid, status = os.wait() printlog(pid) del P[pid] while P: pid, status = os.wait() printlog(pid) del P[pid]
def boundedCheck(target, check, timer, logger): """ Execute the given *check* on the given *target* (*target* should be an IP address). The *timer* is used to retrieve the timeout, and useful information will be sent using the *logger*. This function will fork(), and the check will be executed in the child process. The parent process will wait for the child process, and kill it if it did not answer within the timeout specified by the *timer* implementation. """ timeout = timer.getCheckTimeout() deadline = time.time() + timeout logger.debug('starting check {0} on {1}, timeout={2}' .format(check, target, timeout)) # Use self-pipe trick: setup a SIGCHLD handler to write 1 byte to a pipe # (and select() on that pipe) pipe = os.pipe() def sigchld(sig, frame): try: os.write(pipe[1], ' ') except: pass signal.signal(signal.SIGCHLD, sigchld) pid = os.fork() if pid: # parent process: wait for the child while time.time() < deadline: timeout = max(0, deadline - time.time()) try: rfds, wfds, efds = select.select([pipe[0]],[],[],timeout) except select.error as err: if err.args[0] == errno.EINTR: continue if rfds: # something in the pipe = got a SIGCHLD logger.debug('child exited, retrieving its status') childpid, status = os.wait() logger.debug('child exit status={0}'.format(status)) retval = (status==0) else: # timeout logger.info('child timeout, killing it') os.kill(pid, signal.SIGKILL) logger.debug('reaping child process') os.wait() retval = False os.close(pipe[0]) os.close(pipe[1]) logger.debug('check result is {0}'.format(retval)) return retval else: # child process: do the check try: if check.check(target): exit(0) else: exit(1) except Exception: exit(2)
def createConnections(): #Create command connection sock = socket.create_connection((sys.argv[1], sys.argv[2])) sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) comm = "" #Build command from command line arguments for i in range(3, len(sys.argv)): comm += sys.argv[i] if i < len(sys.argv)-1: comm += " " #Fork to allow time for listening socket to set up pid = os.fork() if pid == 0: sleep(0.001) sendMessage(sock, comm) exit(0) else: #Create listening data socket, get data connection if sys.argv[3] == "-g" or sys.argv[3] == "-l": datas = createDataConnection() os.wait() return datas #Other requests don't require a dataconnection else: return sock
def procExec(self, command): pidList = [] for pkg in self.buildList: pid = os.fork() if pid == 0: # we must create command line commandLine = command # run command line print commandLine status = os.system(commandLine) if status == 0: self.successList.append(pkg) elif status != 0: self.failureList.append(pkg) # we must check pending sys.exit(status) else: pidList.append([pkg, pid]) self.restList.remove(pkg) print pidList # wait if over max processes while len(pidList) >= self.max_proc: pid_end = os.wait() for pkg_pid in pidList: if pkg_pid[1] == pid_end[0]: pidList.remove(pkg_pid) # wait all child process for pid in pidList: os.wait()
def import_tables(inst_host, inst_port, mysql_user, mysql_pass, table_file_list, backup_dir, parallelism): socket = get_socket(inst_port) parallel_count = 0 for table_file in table_file_list: db = table_file.split("/")[-2] table_name = table_file.split("/")[-1].split(".")[-2] ps = {} args = ['mysqlimport', '-h', inst_host,'-u', mysql_user, '--socket', socket, '--force','--ignore', db, backup_dir + "/" + db + "/" + table_name + ".txt"] p = subprocess.Popen(args) ps[p.pid] = p print ps parallel_count = parallel_count + 1 if parallel_count >= parallelism: print "Waiting for %d processes..." % len(ps) while ps: pid, status = os.wait() if pid in ps: del ps[pid] print "Waiting for %d processes..." % len(ps) else: parallel_count = 0 # Do this until you get a better method of handling threads while ps: print "Waiting on last import processes to complete" pid, status = os.wait() if pid in ps: del ps[pid] print "Waiting for %d processes..." % len(ps)
def watch(self): try: os.wait() except KeyboardInterrupt: print("KeyBoardInterrupt") self.kill() sys.exit()
def audit_loop(self, parent, zbo_fps, override_devices=None, **kwargs): """Parallel audit loop""" self.clear_recon_cache('ALL') self.clear_recon_cache('ZBF') once = kwargs.get('mode') == 'once' kwargs['device_dirs'] = override_devices if parent: kwargs['zero_byte_fps'] = zbo_fps self.run_audit(**kwargs) else: pids = set() if self.conf_zero_byte_fps: zbf_pid = self.fork_child(zero_byte_fps=True, **kwargs) pids.add(zbf_pid) if self.concurrency == 1: # Audit all devices in 1 process pids.add(self.fork_child(**kwargs)) else: # Divide devices amongst parallel processes set by # self.concurrency. Total number of parallel processes # is self.concurrency + 1 if zero_byte_fps. parallel_proc = self.concurrency + 1 if \ self.conf_zero_byte_fps else self.concurrency device_list = list(override_devices) if override_devices else \ listdir(self.devices) shuffle(device_list) while device_list: pid = None if len(pids) == parallel_proc: pid = os.wait()[0] pids.discard(pid) if self.conf_zero_byte_fps and pid == zbf_pid and once: # If we're only running one pass and the ZBF scanner # finished, don't bother restarting it. zbf_pid = -100 elif self.conf_zero_byte_fps and pid == zbf_pid: # When we're running forever, the ZBF scanner must # be restarted as soon as it finishes. kwargs['device_dirs'] = override_devices # sleep between ZBF scanner forks self._sleep() zbf_pid = self.fork_child(zero_byte_fps=True, **kwargs) pids.add(zbf_pid) else: kwargs['device_dirs'] = [device_list.pop()] pids.add(self.fork_child(**kwargs)) while pids: pid = os.wait()[0] # ZBF scanner must be restarted as soon as it finishes # unless we're in run-once mode if self.conf_zero_byte_fps and pid == zbf_pid and \ len(pids) > 1 and not once: kwargs['device_dirs'] = override_devices # sleep between ZBF scanner forks zbf_pid = self.fork_child(zero_byte_fps=True, sleep_between_zbf_scanner=True, **kwargs) pids.add(zbf_pid) pids.discard(pid)
def watch(self): try: os.wait() except KeyboardInterrupt: print "Ctrl-c received! Sending kill to threads..." self.kill() sys.exit()
def setup_cleaner_process(workdir): ppid = os.getpid() pid = os.fork() if pid == 0: os.setsid() pid = os.fork() if pid == 0: try: import psutil except ImportError: os._exit(1) try: psutil.Process(ppid).wait() os.killpg(ppid, signal.SIGKILL) # kill workers except Exception: pass # make sure to exit finally: for d in workdir: while os.path.exists(d): try: shutil.rmtree(d, True) except: pass os._exit(0) os.wait()
def deployData(self, wfConfig): log.debug("GenericPipelineWorkflowConfigurator:deployData") # add data deploy here if wfConfig.configuration["generic"] != None: configuration = wfConfig.configuration["generic"] if configuration.deployData != None: deployConfig = configuration.deployData dataRepository = deployConfig.dataRepository dataRepository = EnvString.resolve(dataRepository) deployScript = deployConfig.script deployScript = EnvString.resolve(deployScript) collection = deployConfig.collection if os.path.isfile(deployScript) == True: runDir = self.directories.getDefaultRunDir() deployCmd = [deployScript, runDir, dataRepository, collection] print ">>> ",deployCmd pid = os.fork() if not pid: os.execvp(deployCmd[0], deployCmd) os.wait()[0] else: log.debug("GenericPipelineWorkflowConfigurator:deployData: warning: script '%s' doesn't exist" % deployScript)
def run(pov_path, target_path, *, flag=None, result=None): if result is None: result = {} if not flag: flag = os.urandom(4096) assert len(flag) == 4096 flag_fd = os.memfd_create('flag') flag_path = f'/proc/{os.getpid()}/fd/{flag_fd}' os.write(flag_fd, flag) result['flag'] = flag.decode('latin') child_conn, parent_conn = multiprocessing.Pipe(duplex=True) def dup_child_3(): os.dup2(child_conn.fileno(), 3, inheritable=True) pov_seed = str(int.from_bytes(os.urandom(3), 'little')) pov_popen = subprocess.Popen( ['qemu-cgc/i386-linux-user/qemu-i386', '-seed', pov_seed, pov_path], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL, pass_fds=(3, ), preexec_fn=dup_child_3) pov_type = b''.join(os.read(parent_conn.fileno(), 1) for _ in range(4)) pov_type = int.from_bytes(pov_type, 'little') assert pov_type == 2 os.write(parent_conn.fileno(), TYPE_2_DATA) def trace_me(): libc.ptrace(PTRACE['TRACEME'], 0, 0, 0) target_seed = str(int.from_bytes(os.urandom(3), 'little')) target_popen = subprocess.Popen([ 'qemu-cgc/i386-linux-user/qemu-i386', '-magicpregen', flag_path, '-seed', target_seed, target_path ], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL, preexec_fn=trace_me) result['interaction'] = [] while True: pid, status = os.wait() if pid == target_popen.pid: sig = os.WSTOPSIG(status) if sig and sig != signal.SIGTRAP: result['signal'] = sig break if not os.WIFSTOPPED(status): break else: continue regs = user_regs_struct() libc.ptrace(PTRACE['GETREGS'], pid, 0, ctypes.byref(regs)) syscall = SYSCALL_NAME[regs.orig_rax] syscall_start = ctypes.c_long(regs.rax).value == -errno.ENOSYS reading = SYSCALL_NAME[regs.orig_rax] == 'read' and regs.rdi == 0 writing = SYSCALL_NAME[regs.orig_rax] == 'write' and regs.rdi == 1 try: if reading and syscall_start: count = regs.rdx data = pov_popen.stdout.read1( min(count, io.DEFAULT_BUFFER_SIZE)) target_popen.stdin.write(data) target_popen.stdin.flush() result['interaction'].append( ('read', count, data.decode('latin'))) if not data: break elif writing and not syscall_start: count = regs.rdx data = target_popen.stdout.read(count) pov_popen.stdin.write(data) pov_popen.stdin.flush() result['interaction'].append( ('write', count, data.decode('latin'))) except BrokenPipeError: break libc.ptrace(PTRACE['SYSCALL'], pid, 0, 0) pov_answer = b''.join(os.read(parent_conn.fileno(), 1) for _ in range(4)) result['pov_answer'] = pov_answer.decode('latin') result['pov_answer_correct'] = pov_answer in flag
def run_wsgi(conf_path, app_section, *args, **kwargs): """ Runs the server using the specified number of workers. :param conf_path: Path to paste.deploy style configuration file/directory :param app_section: App name from conf file to load config from """ # Load configuration, Set logger and Load request processor try: (conf, logger, log_name) = \ _initrp(conf_path, app_section, *args, **kwargs) except ConfigFileError as e: print e return # bind to address and port sock = get_socket(conf, default_port=kwargs.get('default_port', 8080)) # remaining tasks should not require elevated privileges drop_privileges(conf.get('user', 'swift')) # Ensure the application can be loaded before proceeding. loadapp(conf_path, global_conf={'log_name': log_name}) # set utils.FALLOCATE_RESERVE if desired reserve = int(conf.get('fallocate_reserve', 0)) if reserve > 0: utils.FALLOCATE_RESERVE = reserve # redirect errors to logger and close stdio capture_stdio(logger) worker_count = config_auto_int_value(conf.get('workers'), CPU_COUNT) # Useful for profiling [no forks]. if worker_count == 0: run_server(conf, logger, sock) return def kill_children(*args): """Kills the entire process group.""" logger.error('SIGTERM received') signal.signal(signal.SIGTERM, signal.SIG_IGN) running[0] = False os.killpg(0, signal.SIGTERM) def hup(*args): """Shuts down the server, but allows running requests to complete""" logger.error('SIGHUP received') signal.signal(signal.SIGHUP, signal.SIG_IGN) running[0] = False running = [True] signal.signal(signal.SIGTERM, kill_children) signal.signal(signal.SIGHUP, hup) children = [] while running[0]: while len(children) < worker_count: pid = os.fork() if pid == 0: signal.signal(signal.SIGHUP, signal.SIG_DFL) signal.signal(signal.SIGTERM, signal.SIG_DFL) run_server(conf, logger, sock) logger.notice('Child %d exiting normally' % os.getpid()) return else: logger.notice('Started child %s' % pid) children.append(pid) try: pid, status = os.wait() if os.WIFEXITED(status) or os.WIFSIGNALED(status): logger.error('Removing dead child %s' % pid) children.remove(pid) except OSError as err: if err.errno not in (errno.EINTR, errno.ECHILD): raise except KeyboardInterrupt: logger.notice('User quit') break greenio.shutdown_safe(sock) sock.close() logger.notice('Exited')
def grim_reaper(signum, frame): pid, status = os.wait() print('Child {pid} terminated with status {status}' '\n'.format(pid=pid, status=status))
ret = subprocess.Popen([ "../../../criu/criu", "dump", "-t", str(p.pid), "-v4", "--external", ttyid ]).wait() if ret: sys.exit(ret) p.wait() new_master, slave = pty.openpty() # get another pty pair os.close(master) ttyid = "fd[%d]:tty[%x:%x]" % (slave, st.st_rdev, st.st_dev) ret = subprocess.Popen([ "../../../criu/criu", "restore", "-v4", "--inherit-fd", ttyid, "--restore-sibling", "--restore-detach" ]).wait() if ret: sys.exit(ret) os.close(slave) os.waitpid(-1, os.WNOHANG) # is the process alive os.close(new_master) _, status = os.wait() if not os.WIFSIGNALED(status) or not os.WTERMSIG(status): print status sys.exit(1) print "PASS"
def watch(self): try: os.wait() except KeyboardInterrupt: self.kill() sys.exit()
# fork_js.py import os from time import sleep def f1(): sleep(3) print('第一事件') def f2(): sleep(4) print('第二事件') pid = os.fork() if pid < 0: print('error') elif pid == 0: # 创建二级子进程 p = os.fork() if p == 0: f2() # 做第二件事 else: os._exit(0) else: os.wait() # 第一级子进程退出 f1() # 做第一件事
def start(service, warnIfAlreadyStarted=True, sendEmail=True, maxWaitTime=30): '''Starts a service or the keeper itself. ''' if service == 'all': for service in services: start(service, warnIfAlreadyStarted=warnIfAlreadyStarted, sendEmail=sendEmail, maxWaitTime=maxWaitTime) return if service != 'keeper': checkRegistered(service) pids = getPIDs(service) # The service is running if len(pids) > 0: if warnIfAlreadyStarted: logging.warning( 'Tried to start a service (%s) which is already running: %s', service, ','.join(pids)) return # Before starting, try to get the latest log file previousLatestLogFile = _getLatestLogFile(service) logging.info('Starting %s.', service) # Unset LC_CTYPE in case it is still there (e.g. in OS X or, worse, when # ssh'ing from OS X to Linux using the default ssh_config) since some # CMSSW code crashes if the locale name is not valid. try: del os.environ['LC_CTYPE'] except: pass # The service is not running, start it pid = os.fork() if pid == 0: daemon.DaemonContext( working_directory=getPath(service), umask=0077, ).open() # Run the service's starting script piping its output to rotatelogs # FIXME: Fix the services so that they do proper logging themselves extraCommandLine = '2>&1 | LD_LIBRARY_PATH=/lib64:/usr/lib64 /usr/sbin/rotatelogs %s %s' % ( getLogPath(service), config.logsSize) if service == 'keeper': os.execlp('bash', 'bash', '-c', './keeper.py keep ' + extraCommandLine) else: run(service, config.servicesConfiguration[service]['filename'], extraCommandLine=extraCommandLine) # Wait until the service has started wait(service, maxWaitTime=maxWaitTime, forStart=True) # Clean up the process table os.wait() # Alert users if sendEmail and config.getProductionLevel() != 'private': subject = '[keeper@' + socket.gethostname( ) + '] Started ' + service + ' service.' body = subject try: _sendEmail('*****@*****.**', config.startedServiceEmailAddresses, [], subject, body) except Exception: logging.error('The email "' + subject + '"could not be sent.') # Try to remove the old hard link to the previous latest log file logHardLink = getLogPath(service) try: os.remove(logHardLink) except Exception: pass # Wait until the service creates some output (i.e. until rotatelogs has created a new file) startTime = time.time() maxWaitTime = 20 while True: if time.time() - startTime > maxWaitTime: raise Exception( 'Service %s did not create any output after %s seconds.' % (service, maxWaitTime)) latestLogFile = _getLatestLogFile(service) # If there is a log file if latestLogFile is not None: # If there was not a previous log file, latestLogFile is the new one. # If there was a previous log file, latestLogFile should be different than the old one. if previousLatestLogFile is None or previousLatestLogFile != latestLogFile: break time.sleep(1) # Create the new hard link try: os.link(latestLogFile, logHardLink) except Exception as e: logging.warning('Could not create hard link from %s to %s: %s', latestLogFile, logHardLink, e) logging.info('Started %s: %s', service, ','.join(getPIDs(service)))
if pid == 0: os.setsid() try: pid = os.fork() except OSError, e: raise Exception,"Exception occured %s [%d]"%(e.strerror,e.errno) os._exit(0) if pid == 0: os.chdir(RUNNING_DIR) os.umask(027) else: os._exit(0) else: os.wait() os._exit(0) # Parent of First Child exits # STDIN STDOUT STDERR to /dev/null fd = os.open(os.devnull,os.O_RDWR) os.dup(fd) # STDOUT os.dup(fd) # STDERR # signal.signal(signal.SIGHUP, signal.SIG_IGN) # signal.signal(signal.SIGTERM,signal.SIG_IGN) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument('--socket', default = DEFAULT_HOST + ':' + str(DEFAULT_PORT), help='host:port') parser.add_argument('--queue-size', type=int, default=DEFAULT_QUEUE_SIZE, help='Max requests per child') parser.add_argument('--count', type=int,default=DEFAULT_COUNT, help='Num of workers')
os.close(fd) path(pipeCommand2) if '&' in userInput: # To run in background userInput = userInput.split('&')[0] args = userInput.split() if '>' in userInput: #If > in input, send to #redirect method for output redirection redirect('>', userInput) elif '<' in userInput: #Input redirection redirect('<', userInput) else: if '/' in args[0]: #If '/' in user input, try the given path program = args[0] try: os.execve(program, args, os.environ) except FileNotFoundError: #If not found, give error pass else: path(args) #If no redirection/path/piping #send to find path else: # parent (forked ok) if not '&' in userInput: #If not '&', no background task, wait #for child #os.write(1, ("Parent: My pid=%d. Child's pid=%d\n" %(pid, rc)).encode()) childPidCode = os.wait() #os.write(1, ("Parent: Child %d terminated with exit code %d\n" %childPidCode).encode())
def run_django(): signal.signal(signal.SIGCHLD, lambda _, __: os.wait()) wait_for_port(9092) # Runs django repassing cli parameters subprocess.run(["python", "manage.py", "runserver"] + sys.argv[1:])
def _wait(self): ''' Wait for all processes to exit. ''' self.log("Now launching process watcher and waiting for all child " "processes to exit.") watcher_report_path = \ self.get_run().add_temporary_file('watcher-report', suffix='.yaml') watcher_pid = self._launch_process_watcher(watcher_report_path) ProcessPool.process_watcher_pid = watcher_pid pid = None first_failed_pid = None was_reporter = None failed_pids = set() while True: if len(self.running_procs) == 0: break try: # wait for the next child process to exit pid, exit_code_with_signal = os.wait() signal_number = exit_code_with_signal & 255 exit_code = exit_code_with_signal >> 8 name = 'unkown name' if pid in self.proc_details.keys(): name = self.proc_details[pid]['name'] logger.info("PID %s (%s), Signal: %s, Exit code: %s" % (pid, name, signal_number, exit_code)) if pid == watcher_pid: ProcessPool.process_watcher_pid = None try: with open(watcher_report_path, 'r') as f: self.process_watcher_report = yaml.load( f, Loader=yaml.FullLoader) except IOError as e: logger.warning( "Couldn't load watcher report from %s." % watcher_report_path) logger.debug("Reading the watcher failed with: %s" % e) raise # the process watcher has terminated, which is cool, I guess # (if it's the last child process, anyway) continue try: # remove pid from self.running_procs self.running_procs.remove(pid) except KeyError as e: if pid != os.getpid(): logger.debug("Caught a process which we " "didn't know: %d.\n" % pid) if pid in self.proc_details: self.proc_details[pid]['end_time'] = datetime.datetime.now( ) what_happened = "has exited with exit code %d" % exit_code if signal_number > 0: what_happened = "has received signal %d" % signal_number if signal_number in ProcessPool.SIGNAL_NAMES: what_happened = ( "has received %s (signal number %d)" % (ProcessPool.SIGNAL_NAMES[signal_number], signal_number)) if pid in self.proc_details: self.log( "%s (PID %d) %s." % (self.proc_details[pid]['name'], pid, what_happened)) else: self.log("PID %d %s." % (pid, what_happened)) if pid in self.proc_details: if signal_number == 0: self.proc_details[pid]['exit_code'] = exit_code else: self.proc_details[pid]['signal'] = signal_number if signal_number in ProcessPool.SIGNAL_NAMES: self.proc_details[pid][ 'signal_name'] = ProcessPool.SIGNAL_NAMES[ signal_number] # now kill it's predecessor if 'use_stdin_of' in self.proc_details[pid]: kpid = self.proc_details[pid]['use_stdin_of'] self.log( "Now killing %d, the predecessor of %d (%s)." % (kpid, pid, self.proc_details[pid]['name'])) if kpid in self.proc_details.keys(): logger.debug( 'PID %s is "%s".' % (kpid, self.proc_details[kpid]['name'])) self.ok_to_fail.add(kpid) try: os.kill(kpid, signal.SIGPIPE) except OSError as e: if e.errno == errno.ESRCH: self.log("Couldn't kill %d: no such " "process." % kpid) pass else: raise if pid in self.copy_process_reports: if first_failed_pid is None: was_reporter = True report_path = self.copy_process_reports[pid] report = None if os.path.exists(report_path): with open(report_path, 'r') as f: report = yaml.load(f, Loader=yaml.FullLoader) if report is not None: self.proc_details[pid].update(report) elif first_failed_pid is None: was_reporter = False except TimeoutException as e: error = traceback.format_exception(*sys.exc_info())[-1] logger.error(error) self.log("Timeout, killing all child processes now.") ProcessPool.kill_all_child_processes() except OSError as e: if e.errno == errno.ECHILD: # no more children running, we are done logger.debug("ProcessPool: There are no child " "processes left, exiting.\n") signal.alarm(0) self.log("Cancelling timeout (if there was one), all " "child processes have exited.") break elif e.errno == errno.EINTR: # a system call was interrupted, pfft. pass else: raise else: if exit_code_with_signal != 0: if pid not in self.ok_to_fail: # Oops, something went wrong. See what happens and # terminate all child processes in a few seconds. if first_failed_pid is None: first_failed_pid = pid failed_pids.add(pid) signal.signal(signal.SIGALRM, timeout_handler) name = 'unkown' if pid in self.proc_details.keys(): name = self.proc_details[pid]['name'] self.log( 'Terminating all children of "%s" in %d seconds...' % (name, ProcessPool.SIGTERM_TIMEOUT)) signal.alarm(ProcessPool.SIGTERM_TIMEOUT) else: name = 'unkown name' if pid in self.proc_details.keys(): name = self.proc_details[pid]['name'] logger.debug('PID %s (%s) was expected to fail ' 'because the kill signal was send. ' 'Now killing its copy processes.' % (pid, name)) for kpid in self.copy_processes_for_pid[pid]: try: os.kill(kpid, signal.SIGTERM) except OSError as e: if e.errno == errno.ESRCH: # the copy process already exited pass else: raise else: logger.debug( "Killed copy process of %d (%s): %d" % (pid, name, kpid)) # now wait for the watcher process, if it still exists try: os.waitpid(watcher_pid, 0) try: with open(watcher_report_path, 'r') as f: self.process_watcher_report = yaml.load( f, Loader=yaml.FullLoader) except IOError as e: logger.warning("Couldn't load watcher report from %s." % watcher_report_path) logger.debug("Reading the watcher failed with: %s" % e) except OSError as e: if e.errno == errno.ESRCH: pass elif e.errno == errno.ECHILD: pass else: raise logger.debug('Watcher report:\n%s' % yaml.dump(self.process_watcher_report)) if first_failed_pid: if was_reporter: log = 'Reporter crashed %s exit with code %s' % \ (first_failed_pid, exit_code_with_signal) if report is not None: log += ' while writing into "%s".' % \ self.proc_details[first_failed_pid]['report_path'] else: for pid in failed_pids: name = 'unkown name' if pid in self.proc_details.keys(): name = self.proc_details[pid]['name'] stderr_listener = self.copy_processes_for_pid[pid][1] report = self.proc_details[stderr_listener]['tail'] if report and report != '': logger.error('stderr tail of %s (%s):\n%s' % (pid, name, report)) log = "Pipeline crashed while working in %s" % \ self.get_run().get_temp_output_directory() self.log(log) raise UAPError(log)
running = [True] signal.signal(signal.SIGTERM, kill_children) signal.signal(signal.SIGHUP, hup) children = [] while running[0]: while len(children) < worker_count: pid = os.fork() if pid == 0: signal.signal(signal.SIGHUP, signal.SIG_DFL) signal.signal(signal.SIGTERM, signal.SIG_DFL) run_server() logger.info('Child %d exiting normally' % os.getpid()) return else: logger.info('Started child %s' % pid) children.append(pid) try: pid, status = os.wait() if os.WIFEXITED(status) or os.WIFSIGNALED(status): logger.error('Removing dead child %s' % pid) children.remove(pid) except OSError, err: if err.errno not in (errno.EINTR, errno.ECHILD): raise except KeyboardInterrupt: logger.info('User quit') break greenio.shutdown_safe(sock) sock.close() logger.info('Exited')
while finished < len(arglist): returnchild, returnindex, value = recvmessage(fromchild) if isinstance(value, Exception): raise value outlist[returnindex] = value finished += 1 # If there are still values left to process, hand one back out # to the child that just finished if argindex < len(arglist): sendmessage(children[returnchild]['tochild'], (argindex, arglist[argindex])) argindex += 1 # Kill the child processes [sendmessage(child['tochild'], None) for child in children] [os.wait() for child in children] return outlist def parallelizable(maxchildren=2, perproc=None): """Mark a function as eligible for parallelized execution. The function will run across a number of processes equal to maxchildren, perproc times the number of processors installed on the system, or the number of times the function needs to be run to process all data passed to it - whichever is least.""" if perproc is not None: processors = 4 # hand-waving maxchildren = min(maxchildren, perproc * processors) def decorate(f): """Set the parallel_maxchildren attribute to the value appropriate for this function"""
def execute(args): #if empty then return if len(args) == 0: return #exit command elif args[0].lower() == "exit": sys.exit(0) #cd changing directory command elif args[0].lower() == "cd": try: #cd .. if len(args) == 1: os.chdir("..") else: #other directory os.chdir(args[1]) except: #file directory does not exist os.write(1, ("cd %s: Directory does not exists") % args[1].encode()) pass #piping elif "|" in args: pipe(args) else: rc = os.fork() background = True #if & in args, then its in background if "&" in args: args.remove("&") background = False elif rc < 0: os.write(2, ("fork failed, returning %d\n" % rc).encode()) sys.exit(1) elif rc == 0: #execute specified program if "/" in args[0]: program = args[0] try: os.execve(program, args, os.environ) except FileNotFoundError: pass #Redirect happens elif ">" in args or "<" in args: redirect(args) #No specifications, just execute program else: #trying each directory in path for dir in re.split(":", os.environ['PATH']): program = "%s/%s" % (dir, args[0]) try: os.execve(program, args, os.environ) #trying to execute except FileNotFoundError: pass #could not execute os.write(2, ("Could not execute %s\n" % args[0]).encode()) sys.exit(1) #terminate with error else: #if parent running in background we wait for child if background == True: os.wait()
def waitforworkers(): for _pid in pids: st = _exitstatus(os.wait()[1]) if st and not problem[0]: problem[0] = st killworkers()
def parallel_runs(config_files_list=[], topology_files_list=[], output_dir='./outputs/parallel_runs/', max_parallel_runs=3): r''' Executes multiple SCALE-Sim commands parallely in multiple threads Args: config_files_list: List of all config files(along with directory path) for which SCALE-Sim needs to be executed. topology_files_list: List of all topoly files(along with directory path) for which SCALE-Sim needs to be executed. output_dir: The Directory path where all the SCALE-Sim runs needs to be stored. max_parallel_runs: Maximum number of parallel SCALE-Sim runs to be executed Examples: parallel_runs( config_files_list=['list','of','config_files'],\ topology_files_list=['list','of','topology_files'],\ output_dir='full_name_of_output_folder',\ max_parallel_runs= 2) ''' run_count = 1 processes = set() # Parallel processes for topology_file in topology_files_list: for config_file in config_files_list: top_dir = "../../../" arch_config = "-arch_config=" + top_dir + config_file arch_network = "-network=" + top_dir + topology_file scale_sim_command = [ "python", top_dir + "scale.py", arch_config, arch_network ] print("INFO:: run_count:" + str(run_count)) topology_file_name = topology_file.split('/')[-1][:-4] config_file_name = config_file.split('/')[-1][:-4] config_file_name = topology_file_name + "_" + config_file_name output_file_dir = output_dir + "/./" + config_file_name if not os.path.exists(output_file_dir): os.system("mkdir -p " + output_file_dir) else: t = time.time() new_output_file_dir = output_file_dir + "_" + str(t) os.system("mv " + output_file_dir + " " + new_output_file_dir) os.system("mkdir " + output_file_dir) os.system("cd " + output_file_dir) std_out_file = open(output_file_dir + '/' + config_file_name + '.txt', mode='w+') # print(scale_sim_command) processes.add( subprocess.Popen(scale_sim_command, cwd=output_file_dir, stdout=std_out_file)) std_out_file.close() if (len(processes) >= max_parallel_runs): os.wait() processes.difference_update([\ p for p in processes if p.poll() is not None]) os.system("cd ../../../") run_count = run_count + 1 for p in processes: if p.poll() is None: p.wait()
def handler1(signum,frame): #Action performed for SIGCHLD signal try: os.wait() print "child exited" except OSError: pass
def update_packages(text_files, bin_files, stage_dir, package_dir): import os print "Updating packages...." os.chdir(stage_dir) # -g appends, -q for quiet operation zip_base_args = " -gqu " # -l causes line ending conversion for windows zip_text_args = " -l " zip_file = stage_dir + "/zip-archive.zip" # -r appends, -f specifies file. tar_args = "-uf " tar_file = stage_dir + "/tar-archive.tar" # Zip binary files print "\tAdding binary files to zip...." p = subprocess.Popen(shlex.split("xargs zip " + zip_base_args + zip_file), stdin=subprocess.PIPE, stdout=subprocess.PIPE, close_fds=True) instream, outstream = (p.stdin, p.stdout) instream.write(bin_files) instream.close() outstream.close() # Need to wait for zip process spawned by popen2 to complete # before proceeding. os.wait() print "\tAdding text files to zip....." p = subprocess.Popen(shlex.split("xargs zip " + zip_base_args + zip_text_args + zip_file), stdin=subprocess.PIPE, stdout=subprocess.PIPE, close_fds=True) instream, outstream = (p.stdin, p.stdout) instream.write(text_files) instream.close() outstream.close() # Need to wait for zip process spawned by popen2 to complete # before proceeding. os.wait() # Tar files print "\tAdding to tar file...." if (not os.path.exists(tar_file)): open(tar_file, 'w').close() p = subprocess.Popen(shlex.split("xargs tar " + tar_args + tar_file), stdin=subprocess.PIPE, stdout=subprocess.PIPE, close_fds=True) instream, outstream = (p.stdin, p.stdout) instream.write(' ' + bin_files + ' ' + text_files) instream.close() print outstream.read() outstream.close() os.wait()
try: dat = os.read(child_pty, 4096) except OSError as e: if e.errno == errno.EIO: break continue if not dat: break try: sys.stdout.write(dat) except (OSError, IOError): pass os._exit(os.wait()[1]) import shutil import hashlib import importlib import subprocess import traceback from multiprocessing import Process from common.basedir import BASEDIR sys.path.append(os.path.join(BASEDIR, "pyextra")) os.environ['BASEDIR'] = BASEDIR import usb1 import zmq from setproctitle import setproctitle
def main(): major = int(sys.version[0]) minor = int(sys.version[2]) if major <= 2 and minor < 5: sys.stdout.write( 'This program can not work (is not tested) with your python version (< 2.5)\n' ) sys.stdout.flush() sys.exit(1) cli_named_pipe = os.environ.get('exabgp_cli_pipe', '') if cli_named_pipe: from exabgp.application.control import main as control control(cli_named_pipe) sys.exit(0) options = docopt.docopt(usage, help=False) if options["--run"]: sys.argv = sys.argv[sys.argv.index('--run') + 1:] if sys.argv[0] == 'healthcheck': from exabgp.application import run_healthcheck run_healthcheck() elif sys.argv[0] == 'cli': from exabgp.application import run_cli run_cli() else: sys.stdout.write(usage) sys.stdout.flush() sys.exit(0) return root = root_folder(options, [ '/bin/exabgp', '/sbin/exabgp', '/lib/exabgp/application/bgp.py', '/lib/exabgp/application/control.py' ]) etc = root + '/etc/exabgp' os.environ['EXABGP_ETC'] = etc # This is not most pretty if options["--version"]: sys.stdout.write('ExaBGP : %s\n' % version) sys.stdout.write('Python : %s\n' % sys.version.replace('\n', ' ')) sys.stdout.write('Uname : %s\n' % ' '.join(platform.uname()[:5])) sys.stdout.write('Root : %s\n' % root) sys.stdout.flush() sys.exit(0) envfile = get_envfile(options, etc) env = get_env(envfile) # Must be done before setting the logger as it modify its behaviour if options["--debug"]: env.log.all = True env.log.level = syslog.LOG_DEBUG logger = Logger() from exabgp.configuration.setup import environment if options["--decode"]: decode = ''.join(options["--decode"]).replace(':', '').replace(' ', '') if not is_bgp(decode): sys.stdout.write(usage) sys.stdout.write('Environment values are:\n%s\n\n' % '\n'.join(' - %s' % _ for _ in environment.default())) sys.stdout.write( 'The BGP message must be an hexadecimal string.\n\n') sys.stdout.write( 'All colons or spaces are ignored, for example:\n\n') sys.stdout.write(' --decode 001E0200000007900F0003000101\n') sys.stdout.write( ' --decode 001E:02:0000:0007:900F:0003:0001:01\n') sys.stdout.write( ' --decode FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF001E0200000007900F0003000101\n' ) sys.stdout.write( ' --decode FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF:001E:02:0000:0007:900F:0003:0001:01\n' ) sys.stdout.write( ' --decode \'FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF 001E02 00000007900F0003000101\n\'' ) sys.stdout.flush() sys.exit(1) else: decode = '' duration = options["--signal"] if duration and duration.isdigit(): pid = os.fork() if pid: import time import signal try: time.sleep(int(duration)) os.kill(pid, signal.SIGUSR1) except KeyboardInterrupt: pass try: pid, code = os.wait() sys.exit(code) except KeyboardInterrupt: try: pid, code = os.wait() sys.exit(code) except Exception: sys.exit(0) if options["--help"]: sys.stdout.write(usage) sys.stdout.write('Environment values are:\n' + '\n'.join(' - %s' % _ for _ in environment.default())) sys.stdout.flush() sys.exit(0) if options["--decode"]: env.log.parser = True env.debug.route = decode env.tcp.bind = '' if options["--profile"]: env.profile.enable = True if options["--profile"].lower() in ['1', 'true']: env.profile.file = True elif options["--profile"].lower() in ['0', 'false']: env.profile.file = False else: env.profile.file = options["--profile"] if envfile and not os.path.isfile(envfile): comment = 'environment file missing\ngenerate it using "exabgp --fi > %s"' % envfile else: comment = '' if options["--full-ini"] or options["--fi"]: for line in environment.iter_ini(): sys.stdout.write('%s\n' % line) sys.stdout.flush() sys.exit(0) if options["--full-env"] or options["--fe"]: print() for line in environment.iter_env(): sys.stdout.write('%s\n' % line) sys.stdout.flush() sys.exit(0) if options["--diff-ini"] or options["--di"]: for line in environment.iter_ini(True): sys.stdout.write('%s\n' % line) sys.stdout.flush() sys.exit(0) if options["--diff-env"] or options["--de"]: for line in environment.iter_env(True): sys.stdout.write('%s\n' % line) sys.stdout.flush() sys.exit(0) if options["--once"]: env.tcp.once = True if options["--pdb"]: # The following may fail on old version of python (but is required for debug.py) os.environ['PDB'] = 'true' env.debug.pdb = True if options["--test"]: env.debug.selfcheck = True env.log.parser = True if options["--memory"]: env.debug.memory = True configurations = [] # check the file only once that we have parsed all the command line options and allowed them to run if options["<configuration>"]: for f in options["<configuration>"]: normalised = os.path.realpath(os.path.normpath(f)) if os.path.isfile(normalised): configurations.append(normalised) continue if f.startswith('etc/exabgp'): normalised = os.path.join(etc, f[11:]) if os.path.isfile(normalised): configurations.append(normalised) continue logger.debug( 'one of the arguments passed as configuration is not a file (%s)' % f, 'error', 'configuration') sys.exit(1) else: sys.stdout.write(usage) sys.stdout.write('Environment values are:\n%s\n\n' % '\n'.join(' - %s' % _ for _ in environment.default())) sys.stdout.write('no configuration file provided') sys.stdout.flush() sys.exit(1) from exabgp.bgp.message.update.attribute import Attribute Attribute.caching = env.cache.attributes if env.debug.rotate or len(configurations) == 1: run(env, comment, configurations, root, options["--validate"]) if not (env.log.destination in ('syslog', 'stdout', 'stderr') or env.log.destination.startswith('host:')): logger.error( 'can not log to files when running multiple configuration (as we fork)', 'configuration') sys.exit(1) try: # run each configuration in its own process pids = [] for configuration in configurations: pid = os.fork() if pid == 0: run(env, comment, [configuration], root, options["--validate"], os.getpid()) else: pids.append(pid) # If we get a ^C / SIGTERM, ignore just continue waiting for our child process import signal signal.signal(signal.SIGINT, signal.SIG_IGN) # wait for the forked processes for pid in pids: os.waitpid(pid, 0) except OSError as exc: logger.critical( 'can not fork, errno %d : %s' % (exc.errno, exc.strerror), 'reactor') sys.exit(1)
sys.exit("No arguments") # SSH_ASKPASS is ignored unless DISPLAY is set. # That is the normal use case for this utility script. if os.getenv('DISPLAY') is None: os.environ['DISPLAY'] = "phony" # Create a child process in all cases so we can ctrl-c it even if it is disassociated child = os.fork() if child == 0: os.setsid() try: os.execvp(sys.argv[1], sys.argv[1:]) except OSError as err: sys.exit(str(err)) sys.exit("execvp") # sys.stderr.write("my PID " + str(os.getpid()) + " child " + str(child) + "\n") if child == -1: sys.exit("fork") # We're the parent try: t = os.wait() except OSError: # sys.stderr.write("Error waiting for child process\n"); sys.exit(1) # sys.stderr.write("CHILD: " + str(t) + '\n') if t[0] != child: sys.exit("Found an unknown child process from os.wait()") sys.exit(1 if t[1] else 0)
def map(f, *a, **kw): """ forkmap.map(..., n=nprocessors), same as map(...). n must be a keyword arg; default n is number of physical processors. """ def writeobj(pipe, obj): s = cPickle.dumps(obj) s = struct.pack('i', -len(s)) + s os.write(pipe, s) def readobj(pipe): n = struct.unpack('i', os.read(pipe, 4))[0] s = '' an = abs(n) while len(s) < an: s += os.read(pipe, min(65536, an-len(s))) return cPickle.loads(s) n = kw.get('n', nproc) if n == 1: return builtin_map(f, *a) if len(a) == 1: L = a[0] else: L = zip(*a) try: len(L) except TypeError: L = list(L) n = min(n, len(L)) ans = [None] * len(L) pipes = [os.pipe() for i in range(n-1)] for i in range(n): if i < n-1 and not os.fork(): # Child, and not last processor try: try: if len(a) == 1: obj = builtin_map(f, L[i*len(L)//n:(i+1)*len(L)//n]) else: obj = [f(*x) for x in L[i*len(L)//n:(i+1)*len(L)//n]] except Exception as obj: pass writeobj(pipes[i][1], obj) except: traceback.print_exc() finally: os._exit(0) elif i == n-1: # Parent fork, and last processor try: if len(a) == 1: ans[i*len(L)//n:] = builtin_map(f, L[i*len(L)//n:]) else: ans[i*len(L)//n:] = [f(*x) for x in L[i*len(L)//n:]] for k in range(n-1): obj = readobj(pipes[k][0]) if isinstance(obj, Exception): raise obj ans[k*len(L)//n:(k+1)*len(L)//n] = obj finally: for j in range(n-1): os.close(pipes[j][0]) os.close(pipes[j][1]) os.wait() return ans
''' 僵尸进程 和 孤儿进程 僵尸进程: 子进程先死掉,父进程还没死掉的父进程 孤儿进程:父进程死了,子进程还没死掉的子进程 在操作系统中,子进程死掉后,内存等资源都由父进程进行管理回收和释放 所以 创建出来的子进程我们要自己进行管理,把他们收集 父进程用os.wait() 可以进行资源回收,会返回两个值 进程的pid 和 一个整数0代表成功 负数代表失败 ''' import os pid = os.fork() #子进程会进入这个分支 if pid == 0: print("我是子进程%s,我的父进程是%s" % (os.getpid(), os.getppid())) #父进程会进入这个分支 else: print("我是父进程%s,我创建了子进程%s" % (os.getpid(), pid)) print("我要收集子进程资源:") son_pid, result = os.wait() print(son_pid, result)
def zombie(): os.wait()
def child_wait(): pid, status = os.wait() pid_pool.remove(pid) return os.WEXITSTATUS(status)
def __call__(self, f, inputs): """ Parallel iterator using ``fork()``. INPUT: - ``f`` -- a Python function that need not be pickleable or anything else! - ``inputs`` -- a list of pickleable pairs ``(args, kwds)``, where ``args`` is a tuple and ``kwds`` is a dictionary. OUTPUT: EXAMPLES:: sage: F = sage.parallel.use_fork.p_iter_fork(2,3) sage: sorted(list( F( (lambda x: x^2), [([10],{}), ([20],{})]))) [(([10], {}), 100), (([20], {}), 400)] sage: sorted(list( F( (lambda x, y: x^2+y), [([10],{'y':1}), ([20],{'y':2})]))) [(([10], {'y': 1}), 101), (([20], {'y': 2}), 402)] TESTS: The output of functions decorated with :func:parallel is read as a pickle by the parent process. We intentionally break the unpickling and demonstrate that this failure is handled gracefully (an exception is displayed and an empty list is returned):: sage: Polygen = parallel(polygen) sage: list(Polygen([QQ])) [(((Rational Field,), {}), x)] sage: from sage.structure.sage_object import unpickle_override, register_unpickle_override sage: register_unpickle_override('sage.rings.polynomial.polynomial_rational_flint', 'Polynomial_rational_flint', Integer) sage: L = list(Polygen([QQ])) ('__init__() takes at most 2 positional arguments (4 given)', <type 'sage.rings.integer.Integer'>, (Univariate Polynomial Ring in x over Rational Field, [0, 1], False, True)) sage: L [] Fix the unpickling:: sage: del unpickle_override[('sage.rings.polynomial.polynomial_rational_flint', 'Polynomial_rational_flint')] sage: list(Polygen([QQ,QQ])) [(((Rational Field,), {}), x), (((Rational Field,), {}), x)] """ n = self.ncpus v = list(inputs) import os, sys, signal from sage.structure.sage_object import load from sage.misc.all import tmp_dir, walltime dir = tmp_dir() timeout = self.timeout workers = {} try: while len(v) > 0 or len(workers) > 0: # Spawn up to n subprocesses while len(v) > 0 and len(workers) < n: # Subprocesses shouldn't inherit unflushed buffers (cf. #11778): sys.stdout.flush() sys.stderr.flush() pid = os.fork() # The way fork works is that pid returns the # nonzero pid of the subprocess for the master # process and returns 0 for the subprocess. if pid: # This is the parent master process. workers[pid] = [v[0], walltime(), ''] del v[0] else: # This is the subprocess. self._subprocess(f, dir, v[0]) if len(workers) > 0: # Now wait for one subprocess to finish and report the result. # However, wait at most the time since the oldest process started. if timeout: oldest = min([X[1] for X in workers.values()]) alarm(max(timeout - (walltime() - oldest), 0.1)) try: pid = os.wait()[0] cancel_alarm() w = workers.pop(pid) except AlarmInterrupt: cancel_alarm() # Kill workers that are too old for pid, X in workers.iteritems(): if walltime() - X[1] > timeout: if self.verbose: print( "Killing subprocess %s with input %s which took too long" % (pid, X[0])) os.kill(pid, signal.SIGKILL) X[-1] = ' (timed out)' except KeyError: # Some other process exited, not our problem... pass else: # collect data from process that successfully terminated sobj = os.path.join(dir, '%s.sobj' % pid) if not os.path.exists(sobj): X = "NO DATA" + w[-1] # the message field else: X = load(sobj, compress=False) os.unlink(sobj) out = os.path.join(dir, '%s.out' % pid) if not os.path.exists(out): output = "NO OUTPUT" else: output = open(out).read() os.unlink(out) if output.strip(): print output, yield (w[0], X) except Exception as msg: print msg finally: # Clean up all temporary files. try: for X in os.listdir(dir): os.unlink(os.path.join(dir, X)) os.rmdir(dir) except OSError as msg: if self.verbose: print msg # Send "kill -9" signal to workers that are left. if len(workers) > 0: if self.verbose: print "Killing any remaining workers..." sys.stdout.flush() for pid in workers.keys(): try: os.kill(pid, signal.SIGKILL) os.waitpid(pid, 0) except OSError as msg: if self.verbose: print msg
'''wait 处理僵尸进程 通过wait阻塞函数来监听子进程结束状态,当子进程结束后, wait语句之后才会执行''' import os import time pid = os.fork() if pid < 0: print('Error') elif pid == 0: time.sleep(0) print('Child process', os.getpid()) os._exit(3) #进程退出 else: pid, status = os.wait() #阻塞等待回收子进程 print('parent process', pid) print('status', status) while True: #让父进程不退出 pass
C.logger.debug( f"{name}: Execution completed @ {strftime('%H:%M:%S')}. Waiting {pool_seconds} seconds ..." ) else: # GV Parent main loop process continues # GV WAIT FOR PROCESS # GV child_pid logger.info( f"{name}: Child Process started as pid={child_pid} @ {strftime('%H:%M:%S')}." ) if logger.level < logging.INFO: print( f"{name}: Child Process started as pid={child_pid} @ {strftime('%H:%M:%S')}." ) try: # GV WAIT FOR PROCESS # GV child_pid childProcExitInfo = os.wait() signal = childProcExitInfo[1] % 256 status = int(childProcExitInfo[1] / 256) message = f"{name}: Child process %d exited with exit info = %d (signal %d status %d)" % ( childProcExitInfo[0], childProcExitInfo[1], signal, status, ) logger.info(message) logger.info(f"{name}: Waiting {pool_seconds} seconds ...") print(message) print( f"{name}: {time.strftime('%H:%M:%S')} Waiting {pool_seconds} seconds ..." ) sleep(pool_seconds)
def test_terminate_jobs(self): # Executes it self recursively and sleeps for 100 seconds with open("dummy_executable", "w") as f: f.write( """#!/usr/bin/env python import sys, os, time counter = eval(sys.argv[1]) if counter > 0: os.fork() os.execv(sys.argv[0],[sys.argv[0], str(counter - 1) ]) else: time.sleep(100)""" ) executable = os.path.realpath("dummy_executable") os.chmod("dummy_executable", stat.S_IRWXU | stat.S_IRWXO | stat.S_IRWXG) self.job_list = { "umask": "0002", "DATA_ROOT": "", "global_environment": {}, "global_update_path": {}, "jobList": [ { "name": "dummy_executable", "executable": executable, "target_file": None, "error_file": None, "start_file": None, "stdout": "dummy.stdout", "stderr": "dummy.stderr", "stdin": None, "argList": ["3"], "environment": None, "exec_env": None, "license_path": None, "max_running_minutes": None, "max_running": None, "min_arg": 1, "arg_types": [], "max_arg": None, } ], "run_id": "", "ert_pid": "", } with open("jobs.json", "w") as f: f.write(json.dumps(self.job_list)) # macOS doesn't provide /usr/bin/setsid, so we roll our own with open("setsid", "w") as f: f.write( dedent( """\ #!/usr/bin/env python import os import sys os.setsid() os.execvp(sys.argv[1], sys.argv[1:]) """ ) ) os.chmod("setsid", 0o755) job_dispatch_script = importlib.util.find_spec("job_runner.job_dispatch").origin job_dispatch_process = Popen( [ os.getcwd() + "/setsid", sys.executable, job_dispatch_script, os.getcwd(), ] ) p = psutil.Process(job_dispatch_process.pid) # Three levels of processes should spawn 8 children in total wait_until(lambda: self.assertEqual(len(p.children(recursive=True)), 8)) p.terminate() wait_until(lambda: self.assertEqual(len(p.children(recursive=True)), 0)) os.wait() # allow os to clean up zombie processes
def run(program, *args): pid = os.fork() if not pid: os.execvp(program, (program, ) + args) return os.wait()[0]