def fork(f): """Generator for creating a forked process from a function""" # Perform double fork r = "" if os.fork(): # Parent # Wait for children so that they don't get defunct. os.wait() # Return a function return lambda *x, **kw: r # Otherwise, we are the child # Perform second fork os.setsid() os.umask(077) os.chdir("/") if os.fork(): os._exit(0) def wrapper(*args, **kwargs): """Wrapper function to be returned from generator. Executes the function bound to the generator and then exits the process""" f(*args, **kwargs) os._exit(0) return wrapper
def daemonize( errfile ): """ Detach process and become a daemon. """ pid = os.fork() if pid: os._exit(0) os.setsid() signal.signal(signal.SIGHUP, signal.SIG_IGN) os.umask(0) pid = os.fork() if pid: os._exit(0) os.chdir("/") for fd in range(0,20): try: os.close(fd) except OSError: pass sys.stdin = open("/dev/null","r") sys.stdout = open("/dev/null","w") sys.stderr = ErrorLog( errfile )
def detach(): try: if os.fork() != 0: # Exit from parent process. sys.exit(0) except OSError as error: print >>sys.stderr, "fork failed: %s" % error.message sys.exit(1) os.setsid() os.umask(0) try: if os.fork() != 0: # Exit from parent process. sys.exit(0) except OSError as error: print >>sys.stderr, "fork failed: %s" % error.message sys.exit(1) sys.stdout.flush() sys.stderr.flush() stdin = open("/dev/null", "r") stdout = open("/dev/null", "a+") stderr = open("/dev/null", "a+") os.dup2(stdin.fileno(), sys.stdin.fileno()) os.dup2(stdout.fileno(), sys.stdout.fileno()) os.dup2(stderr.fileno(), sys.stderr.fileno())
def daemonize(keepfd=None, chdir='/'): os.umask(0) if chdir: os.chdir(chdir) else: os.chdir('/') os.setgid(os.getgid()) # relinquish elevations os.setuid(os.getuid()) # relinquish elevations # Double fork to daemonize if os.fork() > 0: os._exit(0) # Parent exits os.setsid() # Obtain new process group if os.fork() > 0: os._exit(0) # Parent exits # Signal handling signal.signal(signal.SIGTERM, signal.SIG_IGN) signal.signal(signal.SIGINT, signal.SIG_IGN) # Close open files maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1] if maxfd == resource.RLIM_INFINITY: maxfd = 256 for fd in reversed(range(maxfd)): try: if fd != keepfd: os.close(fd) except OSError: _, exc, _ = sys.exc_info() if exc.errno != errno.EBADF: raise # Redirect I/O to /dev/null os.dup2(os.open(os.devnull, os.O_RDWR), sys.stdin.fileno()) os.dup2(os.open(os.devnull, os.O_RDWR), sys.stdout.fileno()) os.dup2(os.open(os.devnull, os.O_RDWR), sys.stderr.fileno())
def daemonize(): try: pid=os.fork() if pid>0: sys.exit(0) except OSError as e: print e sys.exit(1) os.chdir('/') os.umask(0) os.setsid() try: pid=os.fork() if pid>0: sys.exit(0) except OSError as e: print e sys.exit(1) for f in sys.stdout, sys.stderr: f.flush() si=file('/dev/null','r') so=file('/dev/null','a+') se=file('/dev/null','a+',0) os.dup2(si.fileno(), sys.stdin.fileno()) os.dup2(so.fileno(), sys.stdout.fileno()) os.dup2(se.fileno(), sys.stderr.fileno())
def daemonize(self): """ Fork off as a daemon """ # pylint: disable=protected-access # An object is accessed for a non-existent member. # Access to a protected member of a client class # Make a non-session-leader child process try: pid = os.fork() # @UndefinedVariable - only available in UNIX if pid != 0: os._exit(0) except OSError as error: sys.stderr.write('fork #1 failed: {error_num}: {error_message}\n'.format (error_num=error.errno, error_message=error.strerror)) sys.exit(1) os.setsid() # @UndefinedVariable - only available in UNIX # https://github.com/SickRage/SickRage/issues/2969 # http://www.microhowto.info/howto/cause_a_process_to_become_a_daemon_in_c.html#idp23920 # https://www.safaribooksonline.com/library/view/python-cookbook/0596001673/ch06s08.html # Previous code simply set the umask to whatever it was because it was ANDing instead of OR-ing # Daemons traditionally run with umask 0 anyways and this should not have repercussions os.umask(0) # Make the child a session-leader by detaching from the terminal try: pid = os.fork() # @UndefinedVariable - only available in UNIX if pid != 0: os._exit(0) except OSError as error: sys.stderr.write('fork #2 failed: Error {error_num}: {error_message}\n'.format (error_num=error.errno, error_message=error.strerror)) sys.exit(1) # Write pid if self.create_pid: pid = os.getpid() logger.log('Writing PID: {pid} to {filename}'.format(pid=pid, filename=self.pid_file)) try: with io.open(self.pid_file, 'w') as f_pid: f_pid.write('{0}\n'.format(pid)) except EnvironmentError as error: logger.log_error_and_exit('Unable to write PID file: {filename} Error {error_num}: {error_message}'.format (filename=self.pid_file, error_num=error.errno, error_message=error.strerror)) # Redirect all output sys.stdout.flush() sys.stderr.flush() devnull = getattr(os, 'devnull', '/dev/null') stdin = file(devnull) stdout = file(devnull, 'a+') stderr = file(devnull, 'a+') os.dup2(stdin.fileno(), getattr(sys.stdin, 'device', sys.stdin).fileno()) os.dup2(stdout.fileno(), getattr(sys.stdout, 'device', sys.stdout).fileno()) os.dup2(stderr.fileno(), getattr(sys.stderr, 'device', sys.stderr).fileno())
def daemonize(prog_name, pid_file_path="/var/run", dont_exit=False, main=None): """ This will do the fork dance to daemonize the Python script. You have a couple options in using this. 1) Call it with just a prog_name and the current script forks like normal then continues running. 2) Add dont_exit=True and it will both fork a new process *and* keep the parent. 3) Set main to a function and that function will become the new main method of the process, and the process will exit when that function ends. """ if os.fork() == 0: os.setsid() signal.signal(signal.SIGHUP, signal.SIG_IGN) pid = os.fork() if pid != 0: os._exit(0) else: pid_remove_dead(prog_name, pid_file_path=pid_file_path) pid_store(prog_name, pid_file_path=pid_file_path) if main: main() os._exit(0) elif dont_exit: return True else: os._exit(0)
def daemon(working_directory='/', pidfile=None, stdin=None, stdout=None, stderr=None): stdin = stdin or '/dev/null' stdout = stdout or '/dev/null' stderr = stderr or '/dev/null' pid = os.fork() if pid != 0: sys.exit(0) os.chdir(working_directory) os.setsid() # Create new session and sets process group. os.umask(2) pid = os.fork() # Will have INIT (pid 1) as parent process... if pid != 0: # if pid is not child... sys.exit(0) sys.stdout.flush() sys.stderr.flush() si = file(stdin, "r") so = file(stdout, "a+") se = file(stderr, "a+", 0) os.dup2(si.fileno(), sys.stdin.fileno()) os.dup2(so.fileno(), sys.stdout.fileno()) os.dup2(se.fileno(), sys.stderr.fileno()) if pidfile: writeto(pidfile, str(os.getpid()))
def _daemonize(): pid = os.fork() if pid > 0: # exit first parent sys.exit(0) # decouple from parent environment os.chdir(WORKDIR) os.setsid() os.umask(0) # do second fork pid = os.fork() if pid > 0: # exit from second parent sys.exit(0) # redirect standard file descriptors sys.stdout.flush() sys.stderr.flush() si = open(LOG_FILE, 'r') so = open(LOG_FILE, 'a+') se = open(LOG_FILE, 'a+', 0) os.dup2(si.fileno(), sys.stdin.fileno()) os.dup2(so.fileno(), sys.stdout.fileno()) os.dup2(se.fileno(), sys.stderr.fileno()) # write pidfile pid = str(os.getpid()) f = open(PID_FILE,'w') f.write("%s\n" % pid) f.close() atexit.register(lambda: os.remove(PID_FILE))
def _fork(self): """ .. method:: _fork() Forks a process. If ``doubleFork`` was set when creating the instance, then use a double fork, which avoids zombies but loses much useful communication with the daemon - use it for stuff you want to launch and then forget about. Returns the forked process id. """ if not self.doubleFork: return os.fork() else: # double fork pid1 = os.fork() if pid1 == 0: # child pid2 = os.fork() if pid2 == 0: # grandchild return 0 else: # get the PID back via exit status sys.exit(pid2) else: p, status = os.waitpid(pid1, 0) pid = status / 256 return pid
def daemonize(): """\ Standard daemonization of a process. http://www.svbug.com/documentation/comp.unix.programmer-FAQ/faq_2.html#SEC16 """ if not 'GUNICORN_FD' in os.environ: if os.fork(): os._exit(0) os.setsid() if os.fork(): os._exit(0) os.umask(0) maxfd = get_maxfd() # Iterate through and close all file descriptors. for fd in range(0, maxfd): try: os.close(fd) except OSError: # ERROR, fd wasn't open to begin with (ignored) pass os.open(REDIRECT_TO, os.O_RDWR) os.dup2(0, 1) os.dup2(0, 2)
def _daemonize(): # Fork once. try: pid = os.fork() if pid > 0: os._exit(0) except OSError: return # Set some options to detach from the terminal. os.chdir('/') os.setsid() os.umask(0) # Fork again. try: pid = os.fork() if pid > 0: os._exit(0) except OSError: return # Find the OS /dev/null equivalent. nullfile = getattr(os, 'devnull', '/dev/null') # Redirect all standard I/O to /dev/null. sys.stdout.flush() sys.stderr.flush() si = file(nullfile, 'r') so = file(nullfile, 'a+') se = file(nullfile, 'a+', 0) os.dup2(si.fileno(), sys.stdin.fileno()) os.dup2(so.fileno(), sys.stdout.fileno()) os.dup2(se.fileno(), sys.stderr.fileno())
def _daemonize(self): if not self.config.NODETACH: # fork so the parent can exist if os.fork(): return -1 # deconnect from tty and create a new session os.setsid() # fork again so the parent, (the session group leader), can exit. # as a non-session group leader, we can never regain a controlling # terminal. if os.fork(): return -1 # move to the root to avoit mount pb os.chdir("/") # set paranoid umask os.umask(0o77) # write pid in a file f = open(self._pid_file, "w") f.write(str(os.getpid())) f.close() # close standard descriptors sys.stdin.close() sys.stdout.close() sys.stderr.close() # put signal handler signal.signal(signal.SIGTERM, self.signal_handler) signal.signal(signal.SIGHUP, self.signal_handler)
def rpc_run(self, pcode, user, visitor): uid = 61018 # localtime = time.asctime(time.localtime(time.time())) # userdir = '/tmp/' + (localtime[8]+localtime[17]+localtime[18]+localtime[14]+localtime[15]) userdir = "/tmp" + "/" + user.replace("/", "9") if not os.path.exists(userdir): os.mkdir(userdir) os.chmod(userdir, 0770) # print "Directory created " + userdir db = zoodb.cred_setup() person = db.query(zoodb.Cred).get(user) if not person: return None token = person.token (sa, sb) = socket.socketpair(socket.AF_UNIX, socket.SOCK_STREAM, 0) pid = os.fork() if pid == 0: if os.fork() <= 0: sa.close() ProfileAPIServer(user, visitor, token).run_sock(sb) sys.exit(0) else: sys.exit(0) sb.close() os.waitpid(pid, 0) sandbox = sandboxlib.Sandbox(userdir, uid, "/profilesvc/lockfile") with rpclib.RpcClient(sa) as profile_api_client: return sandbox.run(lambda: run_profile(pcode, profile_api_client))
def commit(self, now_playing=None): for config in scrobbler_config: pidfile = config.get('pidfile') password = config.get('password') scrobbler_url = config.get('scrobbler_url') username = config.get('username') cachefile = config.get('cachefile') if ((pidfile is None) or (password is None) or (scrobbler_url is None) or (username is None) or (cachefile is None)): raise Exception('Broken config! Something is missing.') if os.path.exists(pidfile): "commit already running maybe waiting for network timeout or something, doing nothing" logger.info('Commit already running. Not commiting. (%s)' % pidfile) continue logger.debug('Forking') if not os.fork(): os.setsid() pid = os.fork() if pid: fo = file(pidfile, 'w') fo.write(str(pid)) fo.close() logger.debug('Wrote pidfile') sys.exit(0) else: try: self._real_commit(now_playing, cachefile, username, password, scrobbler_url) finally: if os.path.exists(pidfile): os.remove(pidfile) logger.debug('Deleted pidfile')
def daemonize(stdin='/dev/null', stdout='/dev/null', stderr='/dev/null'): LOG.debug("Daemonize") # first fork pid = os.fork() if pid > 0: sys.exit(0) os.chdir('/') os.setsid() os.umask(0) # second fork pid = os.fork() if pid > 0: sys.exit(0) # redirect standard file descriptors sys.stdout.flush() sys.stderr.flush() si = file(stdin, 'r') so = file(stdout, "a+") se = file(stderr, "a+", 0) os.dup2(si.fileno(), sys.stdin.fileno()) os.dup2(so.fileno(), sys.stdout.fileno()) os.dup2(se.fileno(), sys.stderr.fileno())
def daemonize(): """\ Standard daemonization of a process. Code is basd on the ActiveState recipe at: http://code.activestate.com/recipes/278731/ """ if not 'GUNICORN_FD' in os.environ: if os.fork() == 0: os.setsid() if os.fork() != 0: os.umask(0) else: os._exit(0) else: os._exit(0) maxfd = get_maxfd() # Iterate through and close all file descriptors. for fd in range(0, maxfd): try: os.close(fd) except OSError: # ERROR, fd wasn't open to begin with (ignored) pass os.open(REDIRECT_TO, os.O_RDWR) os.dup2(0, 1) os.dup2(0, 2)
def minion_async_run(retriever, method, args): """ This is a simpler invocation for minion side async usage. """ # to avoid confusion of job id's (we use the same job database) # minion jobs contain the string "minion". job_id = "%s-minion" % pprint.pformat(time.time()) __update_status(job_id, JOB_ID_RUNNING, -1) pid = os.fork() if pid != 0: os.waitpid(pid, 0) return job_id else: # daemonize! os.umask(077) os.chdir('/') os.setsid() if os.fork(): os._exit(0) try: function_ref = retriever(method) rc = function_ref(*args) except Exception, e: (t, v, tb) = sys.exc_info() rc = cm_utils.nice_exception(t,v,tb) __update_status(job_id, JOB_ID_FINISHED, rc) os._exit(0)
def daemonize(self): import sys try: pid = os.fork() if pid > 0: # Exit first parent sys.exit(0) except OSError as e: print(sys.stderr, "fork #1 failed: %d (%s)" % (e.errno, e.strerror)) sys.exit(1) # Decouple from parent environment os.chdir("/") os.setsid() os.umask(0) # Do second fork try: pid = os.fork() if pid > 0: # Exit from second parent; print eventual PID before exiting print("Daemon PID %d" % pid) sys.exit(0) except OSError as e: print(sys.stderr, "fork #2 failed: %d (%s)" % (e.errno, e.strerror)) sys.exit(1) self.args.command()
def daemonize(pidfile): ''' Turn this process into a daemon. - make sure the sys.std(in|out|err) are completely cut off - make our parent PID 1 Write our new PID to the pidfile. From A.M. Kuuchling (possibly originally Greg Ward) with modification from Oren Tirosh, and finally a small mod from me. ''' # Fork once if os.fork() != 0: os._exit(0) # Create new session os.setsid() # Second fork to force PPID=1 pid = os.fork() if pid: pidfile = open(pidfile, 'w') pidfile.write(str(pid)) pidfile.close() os._exit(0) os.chdir("/") # close off std(in|out|err), redirect to devnull so the file # descriptors can't be used again devnull = os.open('/dev/null', 0) os.dup2(devnull, 0) os.dup2(devnull, 1) os.dup2(devnull, 2)
def daemonize(): """ Do the UNIX double-fork magic for properly detaching process """ try: pid = os.fork() if pid > 0: # Exit first parent sys.exit(0) except OSError as e: print("First fork failed: %d (%s)\n" % (e.errno, e.strerror), file=sys.stderr) sys.exit(1) # Decouple from parent environment os.setsid() os.umask(0o007) # Do second fork try: pid = os.fork() if pid > 0: # Exit from second parent sys.exit(0) except OSError as e: print("Second fork failed: %d (%s)\n" % (e.errno, e.strerror), file=sys.stderr) sys.exit(1)
def daemonize(): """Daemonizes the current process using the standard double-fork. This function does not affect standard input, output, or error. :returns: The PID of the daemonized process. """ # Fork once. try: pid = os.fork() if pid > 0: os._exit(0) except OSError: return # Set some options to detach from the terminal. os.chdir('/') os.setsid() os.umask(0) # Fork again. try: pid = os.fork() if pid > 0: os._exit(0) except OSError: return os.setsid() return os.getpid()
def pipeline(command): try: if '&' in command: amper=1 else: amper=0 #child process child=os.fork() if child==0 : while '|' in command: #get '|' index position pipe_index=command.index('|') r,w=os.pipe() grand_child=os.fork() if grand_child==0: os.dup2(w,1)#replace to w in index 1 os.close(w)#close w os.close(r)# close stand in os.execvp(command[0],command[0:pipe_index])# execute command before '|' os.dup2(r,0) os.close(r) os.close(w) del command[:pipe_index+1] os.execvp(command[0],command) else: if amper==0: os.waitpid(child,0) except: print("Unexpected error2:", sys.exc_info()[0])
def _detach_process(self): """Detach the process via the standard double-fork method with some extra magic.""" # First fork to return control to the shell pid = os.fork() if pid > 0: # Wait for the first child, because it's going to wait and # check to make sure the second child is actually running # before exiting os.waitpid(pid, 0) os._exit(0) # Become a process group and session group leader os.setsid() # Fork again so the session group leader can exit and to ensure # we can never regain a controlling terminal pid = os.fork() if pid > 0: time.sleep(1) # After waiting one second, check to make sure the second # child hasn't become a zombie already status = os.waitpid(pid, os.WNOHANG) if status[0] == pid: # The child is already gone for some reason exitcode = status[1] % 255 self._emit_failed() self._emit_error('Child exited immediately with non-zero exit ' 'code {code}'.format(code=exitcode)) os._exit(exitcode) else: self._emit_ok() os._exit(0) self._reset_file_descriptors()
def launch(func): child = os.fork() if child > 0: return grandchild = os.fork() os.chdir(os.path.expanduser("~")) os.setsid() os.umask(0) sys.stdout.flush() sys.stderr.flush() stdin = file(os.devnull, "r") stdout = file(os.devnull, "a+") stderr = file(os.devnull, "a+", 0) os.dup2(stdin.fileno(), sys.stdin.fileno()) os.dup2(stdout.fileno(), sys.stdout.fileno()) os.dup2(stderr.fileno(), sys.stderr.fileno()) if grandchild > 0: sys.exit(0) func() sys.exit(0)
def fork_daemon(): # Adapted from Chad J. Schroeder's recipe # @see http://code.activestate.com/recipes/278731/ pid = os.fork() if (pid == 0): # parent 2 os.setsid() pid = os.fork() if (pid == 0): # child os.chdir('/') os.umask(022) else: # parent 2 # print "Parent 2 (", pid, ") exiting" os._exit(0) else: # parent 1 # print "Parent 1 (", pid, ") exiting" os._exit(0) # print "Child changing i/o" try: fd_inp = os.open(stdin, os.O_RDONLY) os.dup2(fd_inp, 0) fd_out = os.open(stdout, os.O_WRONLY|os.O_CREAT, 0600) os.dup2(fd_out, 1) fd_err = os.open(stderr, os.O_WRONLY|os.O_CREAT, 0600) os.dup2(fd_err, 2) except Exception, e: err = "Error with duping I/O, e=%s" % str(e) raise PDError(err)
def become_daemon(our_home_dir='.', out_log='/dev/null', err_log='/dev/null', umask=0o022): "Robustly turn into a UNIX daemon, running in our_home_dir." # First fork try: if os.fork() > 0: sys.exit(0) # kill off parent except OSError as e: sys.stderr.write("fork #1 failed: (%d) %s\n" % (e.errno, e.strerror)) sys.exit(1) os.setsid() os.chdir(our_home_dir) os.umask(umask) # Second fork try: if os.fork() > 0: os._exit(0) except OSError as e: sys.stderr.write("fork #2 failed: (%d) %s\n" % (e.errno, e.strerror)) os._exit(1) si = open('/dev/null', 'r') so = open(out_log, 'a+', buffering) se = open(err_log, 'a+', buffering) os.dup2(si.fileno(), sys.stdin.fileno()) os.dup2(so.fileno(), sys.stdout.fileno()) os.dup2(se.fileno(), sys.stderr.fileno()) # Set custom file descriptors so that they get proper buffering. sys.stdout, sys.stderr = so, se
def daemonize(self): """ See Advanced Programming in the UNIX, Sec 13.3 """ try: pid = os.fork() if pid > 0: os.waitpid(pid, 0) #parent return instead of exit to give control return pid except OSError as e: raise Exception("%s [%d]" % (e.strerror, e.errno)) os.setsid() """ fork again to make sure the daemon is not session leader, which prevents it from acquiring controlling terminal """ try: pid = os.fork() if pid > 0: #parent os._exit(0) except OSError as e: raise Exception("%s [%d]" % (e.strerror, e.errno)) self.cleanup_handles() os._exit(0)
def daemon(): print "INFO: Starting as a daemon" try: pid = os.fork() if pid > 0: sys.exit(0) except OSError: sys.exit(1) os.chdir(autosubliminal.PATH) os.setsid() os.umask(0) try: pid = os.fork() if pid > 0: sys.exit(0) except OSError: sys.exit(1) print "INFO: Disabling console output for daemon" cherrypy.log.screen = False sys.stdin.close() sys.stdout.flush() sys.stderr.flush()
def __init__(self): self.child = os.fork() if self.child == 0: return else: self.watch()
def _execute_child(self, args, executable, preexec_fn, close_fds, cwd, env, universal_newlines, startupinfo, creationflags, shell, to_close, p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite): """Execute program (POSIX version)""" if isinstance(args, types.StringTypes): args = [args] else: args = list(args) if shell: args = ['/bin/sh', '-c'] + args if executable: args[0] = executable if executable is None: executable = args[0] def _close_in_parent(fd): os.close(fd) to_close.remove(fd) errpipe_read, errpipe_write = self.pipe_cloexec() try: try: gc_was_enabled = gc.isenabled() gc.disable() try: self.pid = os.fork() except: if gc_was_enabled: gc.enable() raise self._child_created = True if self.pid == 0: try: if p2cwrite is not None: os.close(p2cwrite) if c2pread is not None: os.close(c2pread) if errread is not None: os.close(errread) os.close(errpipe_read) if c2pwrite == 0: c2pwrite = os.dup(c2pwrite) if errwrite == 0 or errwrite == 1: errwrite = os.dup(errwrite) def _dup2(a, b): if a == b: self._set_cloexec_flag(a, False) elif a is not None: os.dup2(a, b) return _dup2(p2cread, 0) _dup2(c2pwrite, 1) _dup2(errwrite, 2) closed = {None} for fd in [p2cread, c2pwrite, errwrite]: if fd not in closed and fd > 2: os.close(fd) closed.add(fd) if cwd is not None: os.chdir(cwd) if preexec_fn: preexec_fn() if close_fds: self._close_fds(but=errpipe_write) if env is None: os.execvp(executable, args) else: os.execvpe(executable, args, env) except: exc_type, exc_value, tb = sys.exc_info() exc_lines = traceback.format_exception(exc_type, exc_value, tb) exc_value.child_traceback = ''.join(exc_lines) os.write(errpipe_write, pickle.dumps(exc_value)) os._exit(255) if gc_was_enabled: gc.enable() finally: os.close(errpipe_write) data = _eintr_retry_call(os.read, errpipe_read, 1048576) finally: if p2cread is not None and p2cwrite is not None: _close_in_parent(p2cread) if c2pwrite is not None and c2pread is not None: _close_in_parent(c2pwrite) if errwrite is not None and errread is not None: _close_in_parent(errwrite) os.close(errpipe_read) if data != '': try: _eintr_retry_call(os.waitpid, self.pid, 0) except OSError as e: if e.errno != errno.ECHILD: raise child_exception = pickle.loads(data) raise child_exception return
def daemonize(self): """ Fork off as a daemon """ # pylint: disable=no-member,protected-access # An object is accessed for a non-existent member. # Access to a protected member of a client class # Make a non-session-leader child process try: pid = os.fork() # @UndefinedVariable - only available in UNIX if pid != 0: os._exit(0) except OSError as e: sys.stderr.write(u"fork #1 failed: %d (%s)\n" % (e.errno, e.strerror)) sys.exit(1) os.setsid() # @UndefinedVariable - only available in UNIX # https://github.com/SickRage/sickrage-issues/issues/2969 # http://www.microhowto.info/howto/cause_a_process_to_become_a_daemon_in_c.html#idp23920 # https://www.safaribooksonline.com/library/view/python-cookbook/0596001673/ch06s08.html # Previous code simply set the umask to whatever it was because it was ANDing instead of ORring # Daemons traditionally run with umask 0 anyways and this should not have repercussions os.umask(0) # Make the child a session-leader by detaching from the terminal try: pid = os.fork() # @UndefinedVariable - only available in UNIX if pid != 0: os._exit(0) except OSError as e: sys.stderr.write(u"fork #2 failed: %d (%s)\n" % (e.errno, e.strerror)) sys.exit(1) # Write pid if self.CREATEPID: pid = str(os.getpid()) logger.log(u"Writing PID: " + pid + " to " + str(self.PIDFILE)) try: file(self.PIDFILE, 'w').write("%s\n" % pid) except IOError as e: logger.log_error_and_exit(u"Unable to write PID file: " + self.PIDFILE + " Error: " + str(e.strerror) + " [" + str(e.errno) + "]") # Redirect all output sys.stdout.flush() sys.stderr.flush() devnull = getattr(os, 'devnull', '/dev/null') stdin = file(devnull, 'r') stdout = file(devnull, 'a+') stderr = file(devnull, 'a+') os.dup2(stdin.fileno(), getattr(sys.stdin, 'device', sys.stdin).fileno()) os.dup2(stdout.fileno(), getattr(sys.stdout, 'device', sys.stdout).fileno()) os.dup2(stderr.fileno(), getattr(sys.stderr, 'device', sys.stderr).fileno())
def daemonize(pidfile): try: pid = os.fork() if pid > 0: # exit first parent os._exit(0) except OSError as exc: raise error.ControlPlaneError('ERROR: fork #1 failed: %s' % exc) # decouple from parent environment try: os.chdir('/') os.setsid() except OSError: pass os.umask(0) # do second fork try: pid = os.fork() if pid > 0: # exit from second parent os._exit(0) except OSError as exc: raise error.ControlPlaneError('ERROR: fork #2 failed: %s' % exc) def signal_cb(s, f): raise KeyboardInterrupt for s in (signal.SIGTERM, signal.SIGINT, signal.SIGHUP, signal.SIGQUIT): signal.signal(s, signal_cb) # write pidfile def atexit_cb(): try: if pidfile: os.remove(pidfile) except OSError: pass atexit.register(atexit_cb) try: if pidfile: fd, nm = tempfile.mkstemp(dir=os.path.dirname(pidfile)) os.write(fd, ('%d\n' % os.getpid()).encode('utf-8')) os.close(fd) os.rename(nm, pidfile) except Exception as exc: raise error.ControlPlaneError('Failed to create PID file %s: %s' % (pidfile, exc)) # redirect standard file descriptors sys.stdout.flush() sys.stderr.flush() si = open(os.devnull, 'r') so = open(os.devnull, 'a+') se = open(os.devnull, 'a+') os.dup2(si.fileno(), sys.stdin.fileno()) os.dup2(so.fileno(), sys.stdout.fileno()) os.dup2(se.fileno(), sys.stderr.fileno())
# Since abstract namespace sockets are specific to Linux, this program will # only work on Linux. import os import socket import requests_unixsocket def handle_response(): # Listens on an abstract namespace socket and sends one HTTP response sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) sock.bind('\0test_socket') sock.listen(1) client_sock, addr = sock.accept() client_sock.recv(1024) client_sock.sendall(b'HTTP/1.0 200 OK\r\n') client_sock.sendall(b'Content-Type: text/plain\r\n\r\n') client_sock.sendall(b'Hello world!') if os.fork() == 0: # child handle_response() else: # parent try: session = requests_unixsocket.Session() res = session.get('http+unix://\0test_socket/get') print(res.text) finally: os.wait()
def mk_daemon(): 'начинает демонизацию' if os.fork(): sys.exit() os.setsid() sys.stdin = open('/dev/null') os.dup2(sys.stdin.fileno(), sys.__stdin__.fileno())
def _start_service(self, config, worker_id): self._slowdown_respawn_if_needed() pid = os.fork() if pid != 0: return pid # reset parent signals signal.signal(signal.SIGINT, signal.SIG_DFL) signal.signal(signal.SIGALRM, signal.SIG_DFL) signal.signal(signal.SIGTERM, signal.SIG_DFL) signal.signal(signal.SIGHUP, signal.SIG_DFL) # Close write to ensure only parent has it open os.close(self.writepipe) _spawn(self._watch_parent_process) # Reseed random number generator random.seed() # Create and run a new service with _exit_on_exception(): catched_signals = { signal.SIGHUP: None, signal.SIGTERM: None, } def signal_delayer(sig, frame): signal.signal(signal.SIGTERM, signal.SIG_IGN) LOG.info('Caught signal (%s) during service initialisation, ' 'delaying it' % sig) catched_signals[sig] = frame # Setup temporary signals signal.signal(signal.SIGHUP, signal_delayer) signal.signal(signal.SIGTERM, signal_delayer) # Initialize the service process args = tuple() if config.args is None else config.args kwargs = dict() if config.kwargs is None else config.kwargs self._current_process = config.service(worker_id, *args, **kwargs) # Setup final signals if catched_signals[signal.SIGTERM] is not None: self._current_process._clean_exit( signal.SIGTERM, catched_signals[signal.SIGTERM]) signal.signal(signal.SIGTERM, self._current_process._clean_exit) if catched_signals[signal.SIGHUP] is not None: self._current_process._reload(signal.SIGHUP, catched_signals[signal.SIGHUP]) signal.signal(signal.SIGHUP, self._current_process._reload) # Start the main thread _spawn(self._current_process._run) # Wait forever # NOTE(sileht): we cannot use threading.Event().wait() or # threading.Thread().join() because of # https://bugs.python.org/issue5315 while True: time.sleep(1000000000)
#!/usr/bin/python import os import sys import dbus import functools from io import open from time import sleep from lib.bus import get_bus import signal from lib.config import PULSEAUDIO_PATH from lib.fader import PulseAudioStreamFader signal.signal(signal.SIGUSR1, signal.SIG_IGN) fd_out, fd_in = os.pipe() core_pid = os.getpid() child_pid = os.fork() if not child_pid: # Stream changes monitoring subprocess thread. from dbus.mainloop.glib import DBusGMainLoop from gi.repository import GLib os.close(fd_out) pipe = open(fd_in, 'wb', buffering=0) pipe.write(b'\n') # Wait for main process to get ready. DBusGMainLoop(set_as_default=True) loop = GLib.MainLoop() signal.signal(signal.SIGUSR1, lambda sig, frm: loop.quit())
message = myUnpickler.load() # print the string we received print "Server Received: %s" % message print "------------------Server Starting------------------" # bind server on local address soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) soc.bind(("localhost", 10014)) soc.listen(10) # loop infinitely to handle all incoming connections while True: # wait for client connection clientSoc, addr = soc.accept() print "A connection from %s:%d is here" % (addr[0], addr[1]) # handle in new thread if (os.fork() == 0): # send acceptance message to client clientSoc.send("Accepted connection from %s:%d" % (addr[0], addr[1])) # receive and handle data from client server(clientSoc) # exit the handling thread exit(0) print "--------------------Server Exit--------------------" soc.close()
pid += 1 packet=IPv6(src=SRC_OUT6, dst=DST_IN6)/ \ ICMPv6EchoRequest(id=pid, data=payload) frag = [] frag.append(IPv6ExtHdrFragment(nh=58, id=pid, m=1)/ \ str(packet)[40:48]) frag.append(IPv6ExtHdrFragment(nh=58, id=pid, offset=1, m=1)/ \ str(packet)[48:56]) frag.append(IPv6ExtHdrFragment(nh=58, id=pid, offset=2)/ \ str(packet)[56:64]) eth = [] for i in range(3): pkt = IPv6(src=SRC_OUT6, dst=DST_IN6) / frag[p[i]] eth.append(Ether(src=SRC_MAC, dst=DST_MAC) / pkt) if os.fork() == 0: time.sleep(1) sendp(eth, iface=SRC_IF) os._exit(0) ans = sniff(iface=SRC_IF, timeout=3, filter="ip6 and src " + DST_IN6 + " and dst " + SRC_OUT6 + " and icmp6") for a in ans: if a and a.type == ETH_P_IPV6 and \ ipv6nh[a.payload.nh] == 'ICMPv6' and \ icmp6types[a.payload.payload.type] == 'Echo Reply': id = a.payload.payload.id print "id=%#x" % (id) if id != pid:
def main(): # noqa args = parse_arguments() for path in args.path: sys.path.append(path) try: if args.pid_file: setup_pidfile(args.pid_file) except RuntimeError as e: logger = setup_parent_logging(args, stream=args.log_file or sys.stderr) logger.critical(e) return RET_PIDFILE worker_pipes = [] worker_processes = [] for worker_id in range(args.processes): read_fd, write_fd = os.pipe() pid = os.fork() if pid != 0: os.close(write_fd) worker_pipes.append(os.fdopen(read_fd)) worker_processes.append(pid) continue os.close(read_fd) return worker_process(args, worker_id, write_fd) parent_read_fd, parent_write_fd = os.pipe() parent_read_pipe = os.fdopen(parent_read_fd) parent_write_pipe = os.fdopen(parent_write_fd, "w") logger = setup_parent_logging(args, stream=parent_write_pipe) logger.info("Dramatiq %r is booting up." % __version__) if args.pid_file: atexit.register(remove_pidfile, args.pid_file, logger) running, reload_process = True, False if HAS_WATCHDOG and args.watch: if args.watch_use_polling: observer_class = watchdog.observers.polling.PollingObserver else: observer_class = watchdog_gevent.Observer file_event_handler = SourceChangesHandler(patterns=["*.py"]) file_watcher = observer_class() file_watcher.schedule(file_event_handler, args.watch, recursive=True) file_watcher.start() def watch_logs(worker_pipes): nonlocal running log_file = args.log_file or sys.stderr selector = selectors.DefaultSelector() for pipe in [parent_read_pipe] + worker_pipes: selector.register(pipe, selectors.EVENT_READ) buffers = defaultdict(str) while running: events = selector.select(timeout=1) for key, mask in events: data = os.read(key.fd, BUFSIZE) if not data: selector.unregister(key.fileobj) log_file.write(buffers[key.fd]) log_file.flush() continue buffers[key.fd] += data.decode("utf-8") while buffers[key.fd]: index = buffers[key.fd].find("\n") if index == -1: break line = buffers[key.fd][:index + 1] buffers[key.fd] = buffers[key.fd][index + 1:] log_file.write(line) log_file.flush() logger.debug("Closing selector...") selector.close() log_watcher = Thread(target=watch_logs, args=(worker_pipes, ), daemon=True) log_watcher.start() def sighandler(signum, frame): nonlocal reload_process, worker_processes reload_process = signum == signal.SIGHUP signum = { signal.SIGINT: signal.SIGTERM, signal.SIGTERM: signal.SIGTERM, signal.SIGHUP: signal.SIGHUP, }[signum] logger.info("Sending %r to worker processes...", signum.name) for pid in worker_processes: try: os.kill(pid, signum) except OSError: # pragma: no cover logger.warning("Failed to send %r to pid %d.", signum.name, pid) retcode = RET_OK signal.signal(signal.SIGINT, sighandler) signal.signal(signal.SIGTERM, sighandler) signal.signal(signal.SIGHUP, sighandler) for pid in worker_processes: pid, rc = os.waitpid(pid, 0) retcode = max(retcode, rc >> 8) running = False if HAS_WATCHDOG and args.watch: file_watcher.stop() file_watcher.join() log_watcher.join() for pipe in [parent_read_pipe, parent_write_pipe, *worker_pipes]: pipe.close() if reload_process: if sys.argv[0].endswith("/dramatiq/__main__.py"): return os.execvp("python", ["python", "-m", "dramatiq", *sys.argv[1:]]) return os.execvp(sys.argv[0], sys.argv) return retcode
p0_fileList = [] p1_fileList = [] p2_fileList = [] p3_fileList = [] for index, file in enumerate(pipe): if index % 4 == 1: p0_fileList.append(file.rstrip()) if index % 4 == 2: p1_fileList.append(file.rstrip()) elif index % 4 == 3: p2_fileList.append(file.rstrip()) else: p3_fileList.append(file.rstrip()) pipe.close() pid = os.fork() if pid: pid2 = os.fork() if pid2: if RUN: solveProblems(p0_fileList) os.waitpid(pid2, 0) else: if RUN: solveProblems(p1_fileList) sys.exit(0) else: pid3 = os.fork() if pid3: if RUN: solveProblems(p2_fileList)
def test_mount_namespace(self): text = """ #include <uapi/linux/ptrace.h> BPF_TABLE("array", int, u64, stats, 1); static void incr(int idx) { u64 *ptr = stats.lookup(&idx); if (ptr) ++(*ptr); } int count(struct pt_regs *ctx) { bpf_trace_printk("count() uprobe fired"); u32 pid = bpf_get_current_pid_tgid(); if (pid == PID) incr(0); return 0; }""" # Need to import libc from ctypes to access unshare(2) libc = ctypes.CDLL("libc.so.6", use_errno=True) # Need to find path to libz.so.1 libz_path = None p = subprocess.Popen(["ldconfig", "-p"], stdout=subprocess.PIPE) for l in p.stdout: n = l.split() if n[0] == "libz.so.1": libz_path = n[-1] p.wait() p = None self.assertIsNotNone(libz_path) # fork a child that we'll place in a separate mount namespace child_pid = os.fork() if child_pid == 0: # Unshare CLONE_NEWNS if libc.unshare(0x00020000) == -1: e = ctypes.get_errno() raise OSError(e, errno.errorcode[e]) # Remount root MS_REC|MS_PRIVATE if libc.mount(None, "/", None, (1<<14)|(1<<18) , None) == -1: e = ctypes.get_errno() raise OSError(e, errno.errorcode[e]) if libc.mount("tmpfs", "/tmp", "tmpfs", 0, None) == -1: e = ctypes.get_errno() raise OSError(e, errno.errorcode[e]) shutil.copy(libz_path, "/tmp") libz = ctypes.CDLL("/tmp/libz.so.1") time.sleep(1) libz.zlibVersion() time.sleep(5) os._exit(0) libname = "/tmp/libz.so.1" symname = "zlibVersion" text = text.replace("PID", "%d" % child_pid) b = bcc.BPF(text=text) b.attach_uprobe(name=libname, sym=symname, fn_name="count", pid=child_pid) b.attach_uretprobe(name=libname, sym=symname, fn_name="count", pid=child_pid) time.sleep(1) self.assertEqual(b["stats"][ctypes.c_int(0)].value, 2) b.detach_uretprobe(name=libname, sym=symname, pid=child_pid) b.detach_uprobe(name=libname, sym=symname, pid=child_pid) os.wait()
def fork(cls): """fork wrapper that restarts errhandler thread in child""" pid = os.fork() if not pid: cls.init_errhandler() return pid
def graph_draw(g, pos=None, size=(15, 15), pin=False, layout=None, maxiter=None, ratio="fill", overlap=True, sep=None, splines=False, vsize=0.105, penwidth=1.0, elen=None, gprops={}, vprops={}, eprops={}, vcolor="#a40000", ecolor="#2e3436", vcmap=None, vnorm=True, ecmap=None, enorm=True, vorder=None, eorder=None, output="", output_format="auto", fork=False, return_string=False): r"""Draw a graph using graphviz. Parameters ---------- g : :class:`~graph_tool.Graph` Graph to be drawn. pos : :class:`~graph_tool.PropertyMap` or tuple of :class:`~graph_tool.PropertyMap` (optional, default: ``None``) Vertex property maps containing the x and y coordinates of the vertices. size : tuple of scalars (optional, default: ``(15,15)``) Size (in centimeters) of the canvas. pin : bool or :class:`~graph_tool.PropertyMap` (default: ``False``) If ``True``, the vertices are not moved from their initial position. If a :class:`~graph_tool.PropertyMap` is passed, it is used to pin nodes individually. layout : string (default: ``"neato" if g.num_vertices() <= 1000 else "sfdp"``) Layout engine to be used. Possible values are ``"neato"``, ``"fdp"``, ``"dot"``, ``"circo"``, ``"twopi"`` and ``"arf"``. maxiter : int (default: ``None``) If specified, limits the maximum number of iterations. ratio : string or float (default: ``"fill"``) Sets the aspect ratio (drawing height/drawing width) for the drawing. Note that this is adjusted before the ``size`` attribute constraints are enforced. If ``ratio`` is numeric, it is taken as the desired aspect ratio. Then, if the actual aspect ratio is less than the desired ratio, the drawing height is scaled up to achieve the desired ratio; if the actual ratio is greater than that desired ratio, the drawing width is scaled up. If ``ratio == "fill"`` and the size attribute is set, node positions are scaled, separately in both x and y, so that the final drawing exactly fills the specified size. If ``ratio == "compress"`` and the size attribute is set, dot attempts to compress the initial layout to fit in the given size. This achieves a tighter packing of nodes but reduces the balance and symmetry. This feature only works in dot. If ``ratio == "expand"``, the size attribute is set, and both the width and the height of the graph are less than the value in size, node positions are scaled uniformly until at least one dimension fits size exactly. Note that this is distinct from using size as the desired size, as here the drawing is expanded before edges are generated and all node and text sizes remain unchanged. If ``ratio == "auto"``, the page attribute is set and the graph cannot be drawn on a single page, then size is set to an "ideal" value. In particular, the size in a given dimension will be the smallest integral multiple of the page size in that dimension which is at least half the current size. The two dimensions are then scaled independently to the new size. This feature only works in dot. overlap : bool or string (default: ``"prism"``) Determines if and how node overlaps should be removed. Nodes are first enlarged using the sep attribute. If ``True``, overlaps are retained. If the value is ``"scale"``, overlaps are removed by uniformly scaling in x and y. If the value is ``False``, node overlaps are removed by a Voronoi-based technique. If the value is ``"scalexy"``, x and y are separately scaled to remove overlaps. If sfdp is available, one can set overlap to ``"prism"`` to use a proximity graph-based algorithm for overlap removal. This is the preferred technique, though ``"scale"`` and ``False`` can work well with small graphs. This technique starts with a small scaling up, controlled by the overlap_scaling attribute, which can remove a significant portion of the overlap. The prism option also accepts an optional non-negative integer suffix. This can be used to control the number of attempts made at overlap removal. By default, ``overlap == "prism"`` is equivalent to ``overlap == "prism1000"``. Setting ``overlap == "prism0"`` causes only the scaling phase to be run. If the value is ``"compress"``, the layout will be scaled down as much as possible without introducing any overlaps, obviously assuming there are none to begin with. sep : float (default: ``None``) Specifies margin to leave around nodes when removing node overlap. This guarantees a minimal non-zero distance between nodes. splines : bool (default: ``False``) If ``True``, the edges are drawn as splines and routed around the vertices. vsize : float, :class:`~graph_tool.PropertyMap`, or tuple (default: ``0.105``) Default vertex size (width and height). If a tuple is specified, the first value should be a property map, and the second is a scale factor. penwidth : float, :class:`~graph_tool.PropertyMap` or tuple (default: ``1.0``) Specifies the width of the pen, in points, used to draw lines and curves, including the boundaries of edges and clusters. It has no effect on text. If a tuple is specified, the first value should be a property map, and the second is a scale factor. elen : float or :class:`~graph_tool.PropertyMap` (default: ``None``) Preferred edge length, in inches. gprops : dict (default: ``{}``) Additional graph properties, as a dictionary. The keys are the property names, and the values must be convertible to string. vprops : dict (default: ``{}``) Additional vertex properties, as a dictionary. The keys are the property names, and the values must be convertible to string, or vertex property maps, with values convertible to strings. eprops : dict (default: ``{}``) Additional edge properties, as a dictionary. The keys are the property names, and the values must be convertible to string, or edge property maps, with values convertible to strings. vcolor : string or :class:`~graph_tool.PropertyMap` (default: ``"#a40000"``) Drawing color for vertices. If the valued supplied is a property map, the values must be scalar types, whose color values are obtained from the ``vcmap`` argument. ecolor : string or :class:`~graph_tool.PropertyMap` (default: ``"#2e3436"``) Drawing color for edges. If the valued supplied is a property map, the values must be scalar types, whose color values are obtained from the ``ecmap`` argument. vcmap : :class:`matplotlib.colors.Colormap` (default: :class:`matplotlib.cm.jet`) Vertex color map. vnorm : bool (default: ``True``) Normalize vertex color values to the [0,1] range. ecmap : :class:`matplotlib.colors.Colormap` (default: :class:`matplotlib.cm.jet`) Edge color map. enorm : bool (default: ``True``) Normalize edge color values to the [0,1] range. vorder : :class:`~graph_tool.PropertyMap` (default: ``None``) Scalar vertex property map which specifies the order with which vertices are drawn. eorder : :class:`~graph_tool.PropertyMap` (default: ``None``) Scalar edge property map which specifies the order with which edges are drawn. output : string (default: ``""``) Output file name. output_format : string (default: ``"auto"``) Output file format. Possible values are ``"auto"``, ``"xlib"``, ``"ps"``, ``"svg"``, ``"svgz"``, ``"fig"``, ``"mif"``, ``"hpgl"``, ``"pcl"``, ``"png"``, ``"gif"``, ``"dia"``, ``"imap"``, ``"cmapx"``. If the value is ``"auto"``, the format is guessed from the ``output`` parameter, or ``xlib`` if it is empty. If the value is ``None``, no output is produced. fork : bool (default: ``False``) If ``True``, the program is forked before drawing. This is used as a work-around for a bug in graphviz, where the ``exit()`` function is called, which would cause the calling program to end. This is always assumed ``True``, if ``output_format == 'xlib'``. return_string : bool (default: ``False``) If ``True``, a string containing the rendered graph as binary data is returned (defaults to png format). Returns ------- pos : :class:`~graph_tool.PropertyMap` Vector vertex property map with the x and y coordinates of the vertices. gv : gv.digraph or gv.graph (optional, only if ``returngv == True``) Internally used graphviz graph. Notes ----- This function is a wrapper for the [graphviz] routines. Extensive additional documentation for the graph, vertex and edge properties is available at: http://www.graphviz.org/doc/info/attrs.html. Examples -------- >>> from numpy import * >>> from numpy.random import seed, zipf >>> seed(42) >>> g = gt.random_graph(1000, lambda: min(zipf(2.4), 40), ... lambda i, j: exp(abs(i - j)), directed=False) >>> # extract largest component >>> g = gt.GraphView(g, vfilt=gt.label_largest_component(g)) >>> deg = g.degree_property_map("out") >>> deg.a = 2 * (sqrt(deg.a) * 0.5 + 0.4) >>> ebet = gt.betweenness(g)[1] >>> ebet.a *= 4000 >>> ebet.a += 10 >>> gt.graph_draw(g, vsize=deg, vcolor=deg, vorder=deg, elen=10, ... ecolor=ebet, eorder=ebet, penwidth=ebet, ... overlap="prism", output="graph-draw.pdf") <...> .. figure:: graph-draw.* :align: center Kamada-Kawai force-directed layout of a graph with a power-law degree distribution, and dissortative degree correlation. The vertex size and color indicate the degree, and the edge color and width the edge betweeness centrality. References ---------- .. [graphviz] http://www.graphviz.org """ if output != "" and output is not None: output = os.path.expanduser(output) # check opening file for writing, since graphviz will bork if it is not # possible to open file if os.path.dirname(output) != "" and \ not os.access(os.path.dirname(output), os.W_OK): raise IOError("cannot write to " + os.path.dirname(output)) has_layout = False try: gvg = libgv.agopen("G", 1 if g.is_directed() else 0) if layout is None: if pin == False: layout = "neato" if g.num_vertices() <= 1000 else "sfdp" else: layout = "neato" if layout == "arf": layout = "neato" pos = arf_layout(g, pos=pos) pin = True if pos is not None: # copy user-supplied property if isinstance(pos, PropertyMap): pos = ungroup_vector_property(pos, [0, 1]) else: pos = (g.copy_property(pos[0]), g.copy_property(pos[1])) if type(vsize) == tuple: s = g.new_vertex_property("double") g.copy_property(vsize[0], s) s.a *= vsize[1] vsize = s if type(penwidth) == tuple: s = g.new_edge_property("double") g.copy_property(penwidth[0], s) s.a *= penwidth[1] penwidth = s # main graph properties aset(gvg, "outputorder", "edgesfirst") aset(gvg, "mode", "major") if type(overlap) is bool: overlap = "true" if overlap else "false" else: overlap = str(overlap) aset(gvg, "overlap", overlap) if sep is not None: aset(gvg, "sep", sep) if splines: aset(gvg, "splines", "true") aset(gvg, "ratio", ratio) # size is in centimeters... convert to inches aset(gvg, "size", "%f,%f" % (size[0] / 2.54, size[1] / 2.54)) if maxiter is not None: aset(gvg, "maxiter", maxiter) seed = numpy.random.randint(sys.maxint) aset(gvg, "start", "%d" % seed) # apply all user supplied graph properties for k, val in gprops.iteritems(): if isinstance(val, PropertyMap): aset(gvg, k, val[g]) else: aset(gvg, k, val) # normalize color properties if (isinstance(vcolor, PropertyMap) and vcolor.value_type() != "string"): minmax = [float("inf"), -float("inf")] for v in g.vertices(): c = vcolor[v] minmax[0] = min(c, minmax[0]) minmax[1] = max(c, minmax[1]) if minmax[0] == minmax[1]: minmax[1] += 1 if vnorm: vnorm = matplotlib.colors.normalize(vmin=minmax[0], vmax=minmax[1]) else: vnorm = lambda x: x if (isinstance(ecolor, PropertyMap) and ecolor.value_type() != "string"): minmax = [float("inf"), -float("inf")] for e in g.edges(): c = ecolor[e] minmax[0] = min(c, minmax[0]) minmax[1] = max(c, minmax[1]) if minmax[0] == minmax[1]: minmax[1] += 1 if enorm: enorm = matplotlib.colors.normalize(vmin=minmax[0], vmax=minmax[1]) else: enorm = lambda x: x if vcmap is None: vcmap = matplotlib.cm.jet if ecmap is None: ecmap = matplotlib.cm.jet # add nodes if vorder is not None: vertices = sorted(g.vertices(), lambda a, b: cmp(vorder[a], vorder[b])) else: vertices = g.vertices() for v in vertices: n = libgv.agnode(gvg, str(int(v))) if type(vsize) == PropertyMap: vw = vh = vsize[v] else: vw = vh = vsize aset(n, "shape", "circle") aset(n, "width", "%g" % vw) aset(n, "height", "%g" % vh) aset(n, "style", "filled") aset(n, "color", "#2e3436") # apply color if isinstance(vcolor, str): aset(n, "fillcolor", vcolor) else: color = vcolor[v] if isinstance(color, str): aset(n, "fillcolor", color) else: color = tuple( [int(c * 255.0) for c in vcmap(vnorm(color))]) aset(n, "fillcolor", "#%.2x%.2x%.2x%.2x" % color) aset(n, "label", "") # user supplied position if pos is not None: if isinstance(pin, bool): pin_val = pin else: pin_val = pin[v] aset( n, "pos", "%f,%f%s" % (pos[0][v], pos[1][v], "!" if pin_val else "")) aset(n, "pin", pin_val) # apply all user supplied properties for k, val in vprops.iteritems(): if isinstance(val, PropertyMap): aset(n, k, val[v]) else: aset(n, k, val) # add edges if eorder is not None: edges = sorted(g.edges(), lambda a, b: cmp(eorder[a], eorder[b])) else: edges = g.edges() for e in edges: ge = libgv.agedge(gvg, libgv.agnode(gvg, str(int(e.source()))), libgv.agnode(gvg, str(int(e.target())))) aset(ge, "arrowsize", "0.3") if g.is_directed(): aset(ge, "arrowhead", "vee") # apply color if isinstance(ecolor, str): aset(ge, "color", ecolor) else: color = ecolor[e] if isinstance(color, str): aset(ge, "color", color) else: color = tuple( [int(c * 255.0) for c in ecmap(enorm(color))]) aset(ge, "color", "#%.2x%.2x%.2x%.2x" % color) # apply edge length if elen is not None: if isinstance(elen, PropertyMap): aset(ge, "len", elen[e]) else: aset(ge, "len", elen) # apply width if penwidth is not None: if isinstance(penwidth, PropertyMap): aset(ge, "penwidth", penwidth[e]) else: aset(ge, "penwidth", penwidth) # apply all user supplied properties for k, v in eprops.iteritems(): if isinstance(v, PropertyMap): aset(ge, k, v[e]) else: aset(ge, k, v) libgv.gvLayout(gvc, gvg, layout) has_layout = True retv = libgv.gvRender(gvc, gvg, "dot", None) # retrieve positions only if pos == None: pos = (g.new_vertex_property("double"), g.new_vertex_property("double")) for v in g.vertices(): n = libgv.agnode(gvg, str(int(v))) p = aget(n, "pos") p = p.split(",") pos[0][v] = float(p[0]) pos[1][v] = float(p[1]) # I don't get this, but it seems necessary pos[0].a /= 100 pos[1].a /= 100 pos = group_vector_property(pos) if return_string: if output_format == "auto": output_format = "png" if hasattr(libc, "open_memstream"): buf = ctypes.c_char_p() buf_len = ctypes.c_size_t() fstream = libc.open_memstream(ctypes.byref(buf), ctypes.byref(buf_len)) libgv.gvRender(gvc, gvg, output_format, fstream) libc.fclose(fstream) data = copy.copy(ctypes.string_at(buf, buf_len.value)) libc.free(buf) else: # write to temporary file, if open_memstream is not available output = tempfile.mkstemp()[1] libgv.gvRenderFilename(gvc, gvg, output_format, output) data = open(output).read() os.remove(output) else: if output_format == "auto": if output == "": output_format = "xlib" elif output is not None: output_format = output.split(".")[-1] # if using xlib we need to fork the process, otherwise good ol' # graphviz will call exit() when the window is closed if output_format == "xlib" or fork: pid = os.fork() if pid == 0: libgv.gvRenderFilename(gvc, gvg, output_format, output) os._exit(0) # since we forked, it's good to be sure if output_format != "xlib": os.wait() elif output is not None: libgv.gvRenderFilename(gvc, gvg, output_format, output) ret = [pos] if return_string: ret.append(data) finally: if has_layout: libgv.gvFreeLayout(gvc, gvg) libgv.agclose(gvg) if len(ret) > 1: return tuple(ret) else: return ret[0]
s.bind(ADDR) s.listen(5) #循环等待接收客户端连接请求 print("Listen to the port 8888...") while True: try: c, addr = s.accept() except KeyboardInterrupt: sys.exit("退出服务器") except Exception as e: print("Error:", e) continue #创建新的进程处理客户端请求 pid = os.fork() if pid < 0: pass if pid == 0: p = os.fork() if p == 0: #二级子进程 s.close() client_hander(c) #处理具体请求 sys.exit(0) #子进程处理完即退出 else: os._exit(0) #退出一级子进程 #父进程或者创建进程失败都等待下一个客户端连接 else: c.close() os.wait()
def main(): shell.check_python() config = shell.get_config(False) shell.log_shadowsocks_version() daemon.daemon_exec(config) try: import resource logging.info('current process RLIMIT_NOFILE resource: soft %d hard %d' % resource.getrlimit(resource.RLIMIT_NOFILE)) except ImportError: pass if config['port_password']: pass else: config['port_password'] = {} server_port = config['server_port'] if type(server_port) == list: for a_server_port in server_port: config['port_password'][a_server_port] = config['password'] else: config['port_password'][str(server_port)] = config['password'] if not config.get('dns_ipv6', False): asyncdns.IPV6_CONNECTION_SUPPORT = False if config.get('manager_address', 0): logging.info('entering manager mode') manager.run(config) return tcp_servers = [] udp_servers = [] dns_resolver = asyncdns.DNSResolver() if int(config['workers']) > 1: stat_counter_dict = None else: stat_counter_dict = {} port_password = config['port_password'] config_password = config.get('password', 'm') del config['port_password'] for port, password_obfs in port_password.items(): method = config["method"] protocol = config.get("protocol", 'origin') protocol_param = config.get("protocol_param", '') obfs = config.get("obfs", 'plain') obfs_param = config.get("obfs_param", '') bind = config.get("out_bind", '') bindv6 = config.get("out_bindv6", '') if type(password_obfs) == list: password = password_obfs[0] obfs = common.to_str(password_obfs[1]) if len(password_obfs) > 2: protocol = common.to_str(password_obfs[2]) elif type(password_obfs) == dict: password = password_obfs.get('password', config_password) method = common.to_str(password_obfs.get('method', method)) protocol = common.to_str(password_obfs.get('protocol', protocol)) protocol_param = common.to_str(password_obfs.get('protocol_param', protocol_param)) obfs = common.to_str(password_obfs.get('obfs', obfs)) obfs_param = common.to_str(password_obfs.get('obfs_param', obfs_param)) bind = password_obfs.get('out_bind', bind) bindv6 = password_obfs.get('out_bindv6', bindv6) else: password = password_obfs a_config = config.copy() ipv6_ok = False logging.info("server start with protocol[%s] password [%s] method [%s] obfs [%s] obfs_param [%s]" % (protocol, password, method, obfs, obfs_param)) if 'server_ipv6' in a_config: try: if len(a_config['server_ipv6']) > 2 and a_config['server_ipv6'][0] == "[" and a_config['server_ipv6'][-1] == "]": a_config['server_ipv6'] = a_config['server_ipv6'][1:-1] a_config['server_port'] = int(port) a_config['password'] = password a_config['method'] = method a_config['protocol'] = protocol a_config['protocol_param'] = protocol_param a_config['obfs'] = obfs a_config['obfs_param'] = obfs_param a_config['out_bind'] = bind a_config['out_bindv6'] = bindv6 a_config['server'] = a_config['server_ipv6'] logging.info("starting server at [%s]:%d" % (a_config['server'], int(port))) tcp_servers.append(tcprelay.TCPRelay(a_config, dns_resolver, False, stat_counter=stat_counter_dict)) udp_servers.append(udprelay.UDPRelay(a_config, dns_resolver, False, stat_counter=stat_counter_dict)) if a_config['server_ipv6'] == b"::": ipv6_ok = True except Exception as e: shell.print_exception(e) try: a_config = config.copy() a_config['server_port'] = int(port) a_config['password'] = password a_config['method'] = method a_config['protocol'] = protocol a_config['protocol_param'] = protocol_param a_config['obfs'] = obfs a_config['obfs_param'] = obfs_param a_config['out_bind'] = bind a_config['out_bindv6'] = bindv6 logging.info("starting server at %s:%d" % (a_config['server'], int(port))) tcp_servers.append(tcprelay.TCPRelay(a_config, dns_resolver, False, stat_counter=stat_counter_dict)) udp_servers.append(udprelay.UDPRelay(a_config, dns_resolver, False, stat_counter=stat_counter_dict)) except Exception as e: if not ipv6_ok: shell.print_exception(e) def run_server(): def child_handler(signum, _): logging.warn('received SIGQUIT, doing graceful shutting down..') list(map(lambda s: s.close(next_tick=True), tcp_servers + udp_servers)) signal.signal(getattr(signal, 'SIGQUIT', signal.SIGTERM), child_handler) def int_handler(signum, _): sys.exit(1) signal.signal(signal.SIGINT, int_handler) try: loop = eventloop.EventLoop() dns_resolver.add_to_loop(loop) list(map(lambda s: s.add_to_loop(loop), tcp_servers + udp_servers)) daemon.set_user(config.get('user', None)) loop.run() except Exception as e: shell.print_exception(e) sys.exit(1) if int(config['workers']) > 1: if os.name == 'posix': children = [] is_child = False for i in range(0, int(config['workers'])): r = os.fork() if r == 0: logging.info('worker started') is_child = True run_server() break else: children.append(r) if not is_child: def handler(signum, _): for pid in children: try: os.kill(pid, signum) os.waitpid(pid, 0) except OSError: # child may already exited pass sys.exit() signal.signal(signal.SIGTERM, handler) signal.signal(signal.SIGQUIT, handler) signal.signal(signal.SIGINT, handler) # master for a_tcp_server in tcp_servers: a_tcp_server.close() for a_udp_server in udp_servers: a_udp_server.close() dns_resolver.close() for child in children: os.waitpid(child, 0) else: logging.warn('worker is only available on Unix/Linux') run_server() else: run_server()
# coding: utf-8 import os import time res = os.fork() print('res == %d'%res) if res == 0: print('我是子进程,我的pid是:%d,我的父进程id是:%d'%(os.getpid(),os.getppid())) else: print('我是父进程,我的pid是:%d'%os.getpid())
#Manejo de señales def handler(s, f): print("Soy el proceso PID %d recibi la señal %s de mi padre %d" % (os.getpid(), s, os.getppid())) #Registro de señales signal.signal(signal.SIGUSR1, handler) def hijo1(): print("Soy el proceso Hijo %d " % os.getppid()) pid = os.fork() if pid == 0: signal.pause() os._exit(0) pid1 = os.fork() if pid1 == 0: signal.pause() os._exit(0) pid2 = os.fork() if pid2 == 0: signal.pause()
port = 7186 # Communication Port my_host = '192.168.1.15' # Adresse fixe de la machine name = input("Choisir votre nom >> ") # Selection du pseudo socket_io = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) socket_io.bind(('', port)) # Bind toute les adresses possibles du port TSAP = (host, port) mreq = struct.pack("4sl", socket.inet_aton(host), socket.INADDR_ANY) socket_io.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq) socket_io.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_LOOP, 1) # Option pour envoyer un message en unicast pid = os.fork() # Fork pour lecture ecriture if pid: # Condition de lecture while True: # Boucle infinie (data, (ip, port)) = socket_io.recvfrom( 1000) # Variable reception de message + ip et port if ip == my_host: # Si c'est mon IP je n'affiche pas continue out = data.decode('utf-8') # Decode Byte python3 out = out.replace('\n', '') sender = out.split(':')[0] # Variable du pseudo de l'envoyeur message = out.split(':')[1] # Varialbe du message with open('users.json') as json_file: # Import du fichier JSON list_users = json.load(json_file) try: exist = list_users[sender] # Verification si le pseudo existe except Exception:
class Detacher(object): _daemon = False if sys.platform != 'win32': @classmethod def detach(cls): import signal try: # Fork a child process so the parent can exit. This will return control # to the command line or shell. This is required so that the new process # is guaranteed not to be a process group leader. We have this guarantee # because the process GID of the parent is inherited by the child, but # the child gets a new PID, making it impossible for its PID to equal its # PGID. pid = os.fork() except OSError, e: return e.errno, e.strerror # ERROR (return a tuple) if (pid == 0): # The first child. # Next we call os.setsid() to become the session leader of this new # session. The process also becomes the process group leader of the # new process group. Since a controlling terminal is associated with a # session, and this new session has not yet acquired a controlling # terminal our process now has no controlling terminal. This shouldn't # fail, since we're guaranteed that the child is not a process group # leader. os.setsid() # When the first child terminates, all processes in the second child # are sent a SIGHUP, so it's ignored. signal.signal(signal.SIGHUP, signal.SIG_IGN) try: # Fork a second child to prevent zombies. Since the first child is # a session leader without a controlling terminal, it's possible for # it to acquire one by opening a terminal in the future. This second # fork guarantees that the child is no longer a session leader, thus # preventing the daemon from ever acquiring a controlling terminal. pid = os.fork() # Fork a second child. except OSError, e: return e.errno, e.strerror # ERROR (return a tuple) if (pid == 0): # The second child. if cls._daemon: # Ensure that the daemon doesn't keep any directory in use. Failure # to do this could make a filesystem unmountable. os.chdir("/") # Give the child complete control over permissions. os.umask(0) else: sys.stdout.flush() sys.stdout.write(str(pid)) sys.stdout.flush() os._exit( 0 ) # Exit parent (the first child) of the second child. else: os._exit(0) # Exit parent of the first child. # Close all open files. Try the system configuration variable, SC_OPEN_MAX, # for the maximum number of open files to close. If it doesn't exist, use # the default value (configurable). try: maxfd = os.sysconf("SC_OPEN_MAX") except (AttributeError, ValueError): maxfd = 256 # default maximum maxfd = 4 for fd in range(0, maxfd): try: os.close(fd) except OSError: # ERROR (ignore) pass # Redirect the standard file descriptors to /dev/null. null = '/dev/null' os.open(null, os.O_RDONLY) # standard input (0) os.open(null, os.O_RDWR) # standard output (1) os.open(null, os.O_RDWR) # standard error (2) return 0
#!/user/bin/python3 #!coding=utf-8 import os import time #下面的语句会创建一个新的进程 ret=os.fork() if ret==0: print("process--1--01---") time.sleep(1) print("process--1--02---") time.sleep(1) print("process--1--03---") time.sleep(1) print("process--1-Over....") else: print("process--2--01---") time.sleep(1) print("process--2--02---") time.sleep(1) print("process--2--03---") time.sleep(1) print("process--2-Over....")
def daemonize(stdout=os.devnull, stderr=None, stdin=os.devnull, pidfile=None, startmsg='started with pid %s'): """ This forks the current process into a daemon. The stdin, stdout, and stderr arguments are file names that will be opened and be used to replace the standard file descriptors in sys.stdin, sys.stdout, and sys.stderr. These arguments are optional and default to /dev/null. Note that stderr is opened unbuffered, so if it shares a file with stdout then interleaved output may not appear in the order that you expect. """ # Do first fork. try: pid = os.fork() if pid > 0: sys.exit(0) # Exit first parent. except OSError as e: sys.stderr.write("fork #1 failed: (%d) %s%s" % (e.errno, e.strerror, os.linesep)) sys.exit(1) # Decouple from parent environment. os.chdir("/") os.umask(0) os.setsid() # interestingly enough, we MUST open STDOUT explicitly before we # fork the second time. # Otherwise, the duping of sys.stdout won't work, # and we will not be able to capture stdout sys.stdout.write('') # Do second fork. try: pid = os.fork() if pid > 0: sys.exit(0) # Exit second parent. except OSError as e: sys.stderr.write("fork #2 failed: (%d) %s%s" % (e.errno, e.strerror, os.linesep)) sys.exit(1) # Open file descriptors and print start message if not stderr: stderr = stdout si = open(stdin, 'rb') so = open(stdout, 'w+b') se = open(stderr, 'w+b', 0) pid = str(os.getpid()) sys.stderr.write("%s%s" % (startmsg, os.linesep) % pid) sys.stderr.flush() if pidfile: open(pidfile, 'w+b').write("%s%s" % (pid, os.linesep)) # Redirect standard file descriptors. os.dup2(si.fileno(), sys.stdin.fileno()) os.dup2(so.fileno(), sys.stdout.fileno()) os.dup2(se.fileno(), sys.stderr.fileno())
def spawn_gtkwave_interactive(dump_path, save_path, quiet=False): # pragma: no cover """Spawn gtkwave process in interactive mode. A process pipeline is constructed such that the contents of the VCD dump file at *dump_path* are displayed interactively as the dump file is being written (i.e. with :class:`~vcd.writer.VCDWriter`. The process pipeline built is approximately equivalent to:: $ tail -f dump_path | shmidcat | gtkwave -vI save_path The ``tail``, ``shmidcat``, and ``gtkwave`` executables must be found in ``$PATH``. .. Warning:: This function does not work on Windows. .. Note:: A child python process of the caller will remain running until the GTKWave window is closed. This process ensures that the various other child processes are properly reaped. :param str dump_path: path to VCD dump file. The dump file must exist, but be empty. :param str save_path: path to GTKWave save file. The save file will be read immediately by GTKWave and thus must be completely written. :param bool quiet: quiet GTKWave's output by closing its `stdout` and `stderr` file descriptors. """ import signal stdin_fd, stdout_fd, stderr_fd = 0, 1, 2 if not os.fork(): shmidcat_rd_fd, tail_wr_fd = os.pipe() tail_pid = os.fork() if not tail_pid: os.close(shmidcat_rd_fd) os.dup2(tail_wr_fd, stdout_fd) os.execlp('tail', 'tail', '-n', '+0', '-f', dump_path) os.close(tail_wr_fd) gtkwave_rd_fd, shmidcat_wr_fd = os.pipe() shmidcat_pid = os.fork() if not shmidcat_pid: os.close(gtkwave_rd_fd) os.dup2(shmidcat_rd_fd, stdin_fd) os.dup2(shmidcat_wr_fd, stdout_fd) os.execlp('shmidcat', 'shmidcat') os.close(shmidcat_rd_fd) os.close(shmidcat_wr_fd) gtkwave_pid = os.fork() if not gtkwave_pid: os.dup2(gtkwave_rd_fd, stdin_fd) if quiet: devnull = open(os.devnull, 'w') os.dup2(devnull.fileno(), stdout_fd) os.dup2(devnull.fileno(), stderr_fd) os.execlp('gtkwave', 'gtkwave', '--vcd', '--interactive', save_path) # The first forked process exists to do this cleanup... os.waitpid(gtkwave_pid, 0) os.kill(tail_pid, signal.SIGTERM) os.kill(shmidcat_pid, signal.SIGTERM) os._exit(0)
def start(service, warnIfAlreadyStarted=True, sendEmail=True, maxWaitTime=30): '''Starts a service or the keeper itself. ''' if service == 'all': for service in services: start(service, warnIfAlreadyStarted=warnIfAlreadyStarted, sendEmail=sendEmail, maxWaitTime=maxWaitTime) return if service != 'keeper': checkRegistered(service) pids = getPIDs(service) # The service is running if len(pids) > 0: if warnIfAlreadyStarted: logging.warning( 'Tried to start a service (%s) which is already running: %s', service, ','.join(pids)) return # Before starting, try to get the latest log file previousLatestLogFile = _getLatestLogFile(service) logging.info('Starting %s.', service) # Unset LC_CTYPE in case it is still there (e.g. in OS X or, worse, when # ssh'ing from OS X to Linux using the default ssh_config) since some # CMSSW code crashes if the locale name is not valid. try: del os.environ['LC_CTYPE'] except: pass # The service is not running, start it pid = os.fork() if pid == 0: daemon.DaemonContext( working_directory=getPath(service), umask=0077, ).open() # Run the service's starting script piping its output to rotatelogs # FIXME: Fix the services so that they do proper logging themselves extraCommandLine = '2>&1 | LD_LIBRARY_PATH=/lib64:/usr/lib64 /usr/sbin/rotatelogs %s %s' % ( getLogPath(service), config.logsSize) if service == 'keeper': os.execlp('bash', 'bash', '-c', './keeper.py keep ' + extraCommandLine) else: run(service, config.servicesConfiguration[service]['filename'], extraCommandLine=extraCommandLine) # Wait until the service has started wait(service, maxWaitTime=maxWaitTime, forStart=True) # Clean up the process table os.wait() # Alert users if sendEmail and config.getProductionLevel() != 'private': subject = '[keeper@' + socket.gethostname( ) + '] Started ' + service + ' service.' body = subject try: _sendEmail('*****@*****.**', config.startedServiceEmailAddresses, [], subject, body) except Exception: logging.error('The email "' + subject + '"could not be sent.') # Try to remove the old hard link to the previous latest log file logHardLink = getLogPath(service) try: os.remove(logHardLink) except Exception: pass # Wait until the service creates some output (i.e. until rotatelogs has created a new file) startTime = time.time() maxWaitTime = 20 while True: if time.time() - startTime > maxWaitTime: raise Exception( 'Service %s did not create any output after %s seconds.' % (service, maxWaitTime)) latestLogFile = _getLatestLogFile(service) # If there is a log file if latestLogFile is not None: # If there was not a previous log file, latestLogFile is the new one. # If there was a previous log file, latestLogFile should be different than the old one. if previousLatestLogFile is None or previousLatestLogFile != latestLogFile: break time.sleep(1) # Create the new hard link try: os.link(latestLogFile, logHardLink) except Exception as e: logging.warning('Could not create hard link from %s to %s: %s', latestLogFile, logHardLink, e) logging.info('Started %s: %s', service, ','.join(getPIDs(service)))
def daemonize(pidfile): """ Run temboard as a background daemon. Inspired by: http://www.jejik.com/articles/2007/02/a_simple_unix_linux_daemon_in_python/ """ # Try to read pidfile try: with open(pidfile, 'r') as pf: pid = int(pf.read().strip()) except IOError: pid = None # If pidfile exists, yet another temboard is probably running. if pid: sys.stderr.write("FATAL: pidfile %s already exist.\n" % pidfile) sys.exit(1) # Try to write pidfile. try: with open(pidfile, 'w+') as pf: pf.write("\0") except IOError: sys.stderr.write("FATAL: can't write pidfile %s.\n" % pidfile) sys.exit(1) # First fork. try: pid = os.fork() if pid > 0: # Exit first parent. sys.exit(0) except OSError as e: sys.stderr.write("FATAL: fork failed: %d (%s)\n" % (e.errno, e.strerror)) sys.exit(1) # Decouple from parent environment. os.chdir("/") os.setsid() os.umask(0) # Do second fork. try: pid = os.fork() if pid > 0: # Exit from second parent. sys.exit(0) except OSError as e: sys.stderr.write("FATAL: fork failed: %d (%s)\n" % (e.errno, e.strerror)) sys.exit(1) # Redirect standard file descriptors. sys.stdout.flush() sys.stderr.flush() si = file('/dev/null', 'r') so = file('/dev/null', 'a+') se = file('/dev/null', 'a+', 0) os.dup2(si.fileno(), sys.stdin.fileno()) os.dup2(so.fileno(), sys.stdout.fileno()) os.dup2(se.fileno(), sys.stderr.fileno()) # write pidfile atexit.register(remove_pidfile, pidfile) pid = str(os.getpid()) with open(pidfile, 'w+') as pf: pf.write("%s\n" % pid) global PIDFILE PIDFILE = pidfile
sys.stdout.flush() sys.stderr.flush() # Do first fork. try: pid = os.fork() if pid > 0: os._exit(0) # Exit first parent. except OSError, e: sys.stderr.write("fork #1 failed: (%d) %s\n" % (e.errno, e.strerror)) sys.exit(1) # Decouple from parent environment. #os.chdir("/") os.umask(0) os.setsid() # Do second fork. try: pid = os.fork() if pid > 0: os._exit(0) # Exit second parent. except OSError, e: sys.stderr.write("fork #2 failed: (%d) %s\n" % (e.errno, e.strerror)) sys.exit(1) # Open file descriptors and print start message if not stderr: stderr = stdout si = file(stdin, 'r') so = file(stdout, 'a+') se = file(stderr, 'a+', 0) #unbuffered pid = str(os.getpid()) sys.stderr.write("%s\t\t\t [\033[32m OK \033[0m]\n" % startmsg % pid) sys.stderr.flush() if pidfile:
def main(argv): parser = argparse.ArgumentParser( description="Configure stb-tester to use a local X11 program as " "input/output.", epilog=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) subparsers = parser.add_subparsers(dest='subcommand') run_parser = subparsers.add_parser('run') run_parser.add_argument('-b', '--background', action="store_true", help="Run virtual-stb in background") run_parser.add_argument('-v', '--verbose', action="store_true", help="Print xorg logs to console") run_parser.add_argument( '--x-keymap', help="Filename of file mapping key names to X keysyms") run_parser.add_argument('command', nargs=1) run_parser.add_argument('args', nargs=argparse.REMAINDER) stop_parser = subparsers.add_parser('stop') stop_parser.add_argument('-f', '--force', action="store_true", help="Ignore errors") args = parser.parse_args(argv[1:]) if args.subcommand == 'run': # Do run our `finally` teardown blocks on SIGTERM signal.signal(signal.SIGTERM, lambda _signo, _frame: sys.exit(0)) write_end = None if args.background: read_end, write_end = multiprocessing.Pipe(duplex=False) pid = os.fork() if pid: # Parent - wait for child to be ready write_end.close() read_end.recv() return 0 else: # Child read_end.close() with virtual_stb(args.command + args.args, verbose=args.verbose, x_keymap=args.x_keymap) as (child, config): for k, v in config.items(): set_config('global', k, v) try: if write_end is not None: write_end.send(True) write_end.close() child.wait() finally: for k in config.keys(): set_config('global', k, None) elif args.subcommand == 'stop': try: pid = get_config('global', 'vstb_pid', None) set_config('global', 'vstb_pid', None) os.kill(int(pid), signal.SIGTERM) while True: try: os.kill(int(pid), 0) time.sleep(0.1) except OSError as e: if e.errno == errno.ESRCH: return 0 else: raise except Exception: # pylint: disable=broad-except if not args.force: raise
class Daemon(object): """ A generic daemon class. Usage: subclass the Daemon class and override the run() method, or call with Daemon(pidfile, cmd=main). The resulting daemon can be immediately invoked. """ def __init__(self, pidfile, cmd=None, stdin='/dev/null', stdout='/dev/null', stderr='/dev/null'): self.stdin = stdin self.stdout = stdout self.stderr = stderr self.pidfile = pidfile self.cmd = cmd def invoke(self): """ Assume the command is "daemon start|stop|restart|debug [args]" and invoke the daemon. """ command = sys.argv[1] del sys.argv[1] if command == 'start': self.start() elif command == 'stop': self.stop() elif command == 'restart': self.restart() elif command == 'debug': self.run() else: print "command should be start, stop, restart or debug" sys.exit(1) sys.exit(0) def daemonize(self): """ do the UNIX double-fork magic, see Stevens' "Advanced Programming in the UNIX Environment" for details (ISBN 0201563177) http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16 """ try: pid = os.fork() if pid > 0: # exit first parent sys.exit(0) except OSError, e: sys.stderr.write("fork #1 failed: %d (%s)\n" % (e.errno, e.strerror)) sys.exit(1) # decouple from parent environment os.chdir("/") os.setsid() os.umask(0) # do second fork try: pid = os.fork() if pid > 0: # exit from second parent sys.exit(0) except OSError, e: sys.stderr.write("fork #2 failed: %d (%s)\n" % (e.errno, e.strerror)) sys.exit(1)
def handleUSR2(num, frame): forking = os.fork() if forking == 0: os.execlp("/usr/bin/xclock", "xclock")
def main(): options, method = parse_options() log.init(options.logfile, options.loglevel) if not options.dump and options.daemonize: if os.fork() > 0: os._exit(0) os.chdir("/") os.setsid() if os.fork() > 0: os._exit(0) log.init_stdio() try: config = coil.parse(DEFAULT_CONFIG) if method.defaults: if isinstance(method.defaults, str): config.merge(coil.parse(method.defaults)) else: config.merge(coil.struct.Struct(method.defaults)) if options.config: config.merge(coil.parse_file(options.config)) except coil.errors.CoilError, ex: log.error("Error parsing config: %s" % ex) sys.exit(1)