def daemonize(): """ Do the UNIX double-fork magic for properly detaching process """ try: pid = os.fork() if pid > 0: # Exit first parent sys.exit(0) except OSError as e: print("First fork failed: %d (%s)\n" % (e.errno, e.strerror), file=sys.stderr) sys.exit(1) # Decouple from parent environment os.setsid() os.umask(0o007) # Do second fork try: pid = os.fork() if pid > 0: # Exit from second parent sys.exit(0) except OSError as e: print("Second fork failed: %d (%s)\n" % (e.errno, e.strerror), file=sys.stderr) sys.exit(1)
def onConnect(): # if keyAgent and options['agent']: # cc = protocol.ClientCreator(reactor, SSHAgentForwardingLocal, conn) # cc.connectUNIX(os.environ['SSH_AUTH_SOCK']) if hasattr(conn.transport, 'sendIgnore'): _KeepAlive(conn) if options.localForwards: for localPort, hostport in options.localForwards: s = reactor.listenTCP(localPort, forwarding.SSHListenForwardingFactory(conn, hostport, SSHListenClientForwardingChannel)) conn.localForwards.append(s) if options.remoteForwards: for remotePort, hostport in options.remoteForwards: log.msg('asking for remote forwarding for %s:%s' % (remotePort, hostport)) conn.requestRemoteForwarding(remotePort, hostport) reactor.addSystemEventTrigger('before', 'shutdown', beforeShutdown) if not options['noshell'] or options['agent']: conn.openChannel(SSHSession()) if options['fork']: if os.fork(): os._exit(0) os.setsid() for i in range(3): try: os.close(i) except OSError as e: import errno if e.errno != errno.EBADF: raise
def main(): try: server = None for arg in sys.argv: if(arg == '-d' or arg == '--daemon-mode'): GitAutoDeploy.daemon = True GitAutoDeploy.quiet = True if(arg == '-q' or arg == '--quiet'): GitAutoDeploy.quiet = True if(GitAutoDeploy.daemon): pid = os.fork() if(pid != 0): sys.exit() os.setsid() if(not GitAutoDeploy.quiet): print 'Github Autodeploy Service v0.2 started' else: print 'Github Autodeploy Service v 0.2 started in daemon mode' server = HTTPServer(('', GitAutoDeploy.getConfig()['port']), GitAutoDeploy) server.serve_forever() except (KeyboardInterrupt, SystemExit) as e: if(e): print >> sys.stderr, e if(not server is None): server.socket.close() if(not GitAutoDeploy.quiet): print 'Goodbye'
def preexec(): # Set session id. os.setsid() # Set umask to default to safe file permissions for root. os.umask(0o27) # Switch to a "safe" directory. os.chdir("/")
def preexec_fn(): streams = [sys.stdin] if self.close_child_stdout: streams.append(sys.stdout) if self.close_child_stderr: streams.append(sys.stderr) self._null_streams(streams) os.setsid() for limit, value in self.rlimits.items(): res = getattr(resource, 'RLIMIT_%s' % limit.upper(), None) if res is None: raise ValueError('unknown rlimit "%s"' % limit) # TODO(petef): support hard/soft limits resource.setrlimit(res, (value, value)) if self.gid: try: os.setgid(self.gid) except OverflowError: if not ctypes: raise # versions of python < 2.6.2 don't manage unsigned int for # groups like on osx or fedora os.setgid(-ctypes.c_int(-self.gid).value) if self.uid: os.setuid(self.uid)
def daemonize(): try: pid=os.fork() if pid>0: sys.exit(0) except OSError as e: print e sys.exit(1) os.chdir('/') os.umask(0) os.setsid() try: pid=os.fork() if pid>0: sys.exit(0) except OSError as e: print e sys.exit(1) for f in sys.stdout, sys.stderr: f.flush() si=file('/dev/null','r') so=file('/dev/null','a+') se=file('/dev/null','a+',0) os.dup2(si.fileno(), sys.stdin.fileno()) os.dup2(so.fileno(), sys.stdout.fileno()) os.dup2(se.fileno(), sys.stderr.fileno())
def _daemonize(self): if not self.config.NODETACH: # fork so the parent can exist if os.fork(): return -1 # deconnect from tty and create a new session os.setsid() # fork again so the parent, (the session group leader), can exit. # as a non-session group leader, we can never regain a controlling # terminal. if os.fork(): return -1 # move to the root to avoit mount pb os.chdir("/") # set paranoid umask os.umask(0o77) # write pid in a file f = open(self._pid_file, "w") f.write(str(os.getpid())) f.close() # close standard descriptors sys.stdin.close() sys.stdout.close() sys.stderr.close() # put signal handler signal.signal(signal.SIGTERM, self.signal_handler) signal.signal(signal.SIGHUP, self.signal_handler)
def daemonize(prog_name, pid_file_path="/var/run", dont_exit=False, main=None): """ This will do the fork dance to daemonize the Python script. You have a couple options in using this. 1) Call it with just a prog_name and the current script forks like normal then continues running. 2) Add dont_exit=True and it will both fork a new process *and* keep the parent. 3) Set main to a function and that function will become the new main method of the process, and the process will exit when that function ends. """ if os.fork() == 0: os.setsid() signal.signal(signal.SIGHUP, signal.SIG_IGN) pid = os.fork() if pid != 0: os._exit(0) else: pid_remove_dead(prog_name, pid_file_path=pid_file_path) pid_store(prog_name, pid_file_path=pid_file_path) if main: main() os._exit(0) elif dont_exit: return True else: os._exit(0)
def detach(): try: if os.fork() != 0: # Exit from parent process. sys.exit(0) except OSError as error: print >>sys.stderr, "fork failed: %s" % error.message sys.exit(1) os.setsid() os.umask(0) try: if os.fork() != 0: # Exit from parent process. sys.exit(0) except OSError as error: print >>sys.stderr, "fork failed: %s" % error.message sys.exit(1) sys.stdout.flush() sys.stderr.flush() stdin = open("/dev/null", "r") stdout = open("/dev/null", "a+") stderr = open("/dev/null", "a+") os.dup2(stdin.fileno(), sys.stdin.fileno()) os.dup2(stdout.fileno(), sys.stdout.fileno()) os.dup2(stderr.fileno(), sys.stderr.fileno())
def _daemonize(): pid = os.fork() if pid > 0: # exit first parent sys.exit(0) # decouple from parent environment os.chdir(WORKDIR) os.setsid() os.umask(0) # do second fork pid = os.fork() if pid > 0: # exit from second parent sys.exit(0) # redirect standard file descriptors sys.stdout.flush() sys.stderr.flush() si = open(LOG_FILE, 'r') so = open(LOG_FILE, 'a+') se = open(LOG_FILE, 'a+', 0) os.dup2(si.fileno(), sys.stdin.fileno()) os.dup2(so.fileno(), sys.stdout.fileno()) os.dup2(se.fileno(), sys.stderr.fileno()) # write pidfile pid = str(os.getpid()) f = open(PID_FILE,'w') f.write("%s\n" % pid) f.close() atexit.register(lambda: os.remove(PID_FILE))
def daemonize(self): """ Fork off as a daemon """ # pylint: disable=protected-access # An object is accessed for a non-existent member. # Access to a protected member of a client class # Make a non-session-leader child process try: pid = os.fork() # @UndefinedVariable - only available in UNIX if pid != 0: os._exit(0) except OSError as error: sys.stderr.write('fork #1 failed: {error_num}: {error_message}\n'.format (error_num=error.errno, error_message=error.strerror)) sys.exit(1) os.setsid() # @UndefinedVariable - only available in UNIX # https://github.com/SickRage/SickRage/issues/2969 # http://www.microhowto.info/howto/cause_a_process_to_become_a_daemon_in_c.html#idp23920 # https://www.safaribooksonline.com/library/view/python-cookbook/0596001673/ch06s08.html # Previous code simply set the umask to whatever it was because it was ANDing instead of OR-ing # Daemons traditionally run with umask 0 anyways and this should not have repercussions os.umask(0) # Make the child a session-leader by detaching from the terminal try: pid = os.fork() # @UndefinedVariable - only available in UNIX if pid != 0: os._exit(0) except OSError as error: sys.stderr.write('fork #2 failed: Error {error_num}: {error_message}\n'.format (error_num=error.errno, error_message=error.strerror)) sys.exit(1) # Write pid if self.create_pid: pid = os.getpid() logger.log('Writing PID: {pid} to {filename}'.format(pid=pid, filename=self.pid_file)) try: with io.open(self.pid_file, 'w') as f_pid: f_pid.write('{0}\n'.format(pid)) except EnvironmentError as error: logger.log_error_and_exit('Unable to write PID file: {filename} Error {error_num}: {error_message}'.format (filename=self.pid_file, error_num=error.errno, error_message=error.strerror)) # Redirect all output sys.stdout.flush() sys.stderr.flush() devnull = getattr(os, 'devnull', '/dev/null') stdin = file(devnull) stdout = file(devnull, 'a+') stderr = file(devnull, 'a+') os.dup2(stdin.fileno(), getattr(sys.stdin, 'device', sys.stdin).fileno()) os.dup2(stdout.fileno(), getattr(sys.stdout, 'device', sys.stdout).fileno()) os.dup2(stderr.fileno(), getattr(sys.stderr, 'device', sys.stderr).fileno())
def daemonize(): """\ Standard daemonization of a process. http://www.svbug.com/documentation/comp.unix.programmer-FAQ/faq_2.html#SEC16 """ if not 'GUNICORN_FD' in os.environ: if os.fork(): os._exit(0) os.setsid() if os.fork(): os._exit(0) os.umask(0) maxfd = get_maxfd() # Iterate through and close all file descriptors. for fd in range(0, maxfd): try: os.close(fd) except OSError: # ERROR, fd wasn't open to begin with (ignored) pass os.open(REDIRECT_TO, os.O_RDWR) os.dup2(0, 1) os.dup2(0, 2)
def daemon(working_directory='/', pidfile=None, stdin=None, stdout=None, stderr=None): stdin = stdin or '/dev/null' stdout = stdout or '/dev/null' stderr = stderr or '/dev/null' pid = os.fork() if pid != 0: sys.exit(0) os.chdir(working_directory) os.setsid() # Create new session and sets process group. os.umask(2) pid = os.fork() # Will have INIT (pid 1) as parent process... if pid != 0: # if pid is not child... sys.exit(0) sys.stdout.flush() sys.stderr.flush() si = file(stdin, "r") so = file(stdout, "a+") se = file(stderr, "a+", 0) os.dup2(si.fileno(), sys.stdin.fileno()) os.dup2(so.fileno(), sys.stdout.fileno()) os.dup2(se.fileno(), sys.stderr.fileno()) if pidfile: writeto(pidfile, str(os.getpid()))
def daemon_spawn(self, pre_fork_opts=None, post_fork_parent_opts=None, post_fork_child_opts=None): """Perform a single-fork to run a subprocess and write the child pid file. Use this if your post_fork_child block invokes a subprocess via subprocess.Popen(). In this case, a second fork such as used in daemonize() is extraneous given that Popen() also forks. Using this daemonization method vs daemonize() leaves the responsibility of writing the pid to the caller to allow for library-agnostic flexibility in subprocess execution. """ self.purge_metadata() self.pre_fork(**pre_fork_opts or {}) pid = os.fork() if pid == 0: try: os.setsid() os.chdir(self._buildroot) self.post_fork_child(**post_fork_child_opts or {}) except Exception: logger.critical(traceback.format_exc()) os._exit(0) else: try: self.post_fork_parent(**post_fork_parent_opts or {}) except Exception: logger.critical(traceback.format_exc())
def daemonize( errfile ): """ Detach process and become a daemon. """ pid = os.fork() if pid: os._exit(0) os.setsid() signal.signal(signal.SIGHUP, signal.SIG_IGN) os.umask(0) pid = os.fork() if pid: os._exit(0) os.chdir("/") for fd in range(0,20): try: os.close(fd) except OSError: pass sys.stdin = open("/dev/null","r") sys.stdout = open("/dev/null","w") sys.stderr = ErrorLog( errfile )
def fork(): """fork() -> (pid, master_fd) Fork and make the child a session leader with a controlling terminal.""" try: pid, fd = os.forkpty() except (AttributeError, OSError): pass else: if pid == CHILD: try: os.setsid() except OSError: pass return (pid, fd) master_fd, slave_fd = openpty() pid = os.fork() if pid == CHILD: os.setsid() os.close(master_fd) os.dup2(slave_fd, STDIN_FILENO) os.dup2(slave_fd, STDOUT_FILENO) os.dup2(slave_fd, STDERR_FILENO) if slave_fd > STDERR_FILENO: os.close(slave_fd) tmp_fd = os.open(os.ttyname(STDOUT_FILENO), os.O_RDWR) os.close(tmp_fd) else: os.close(slave_fd) return (pid, master_fd)
def main(): try: server = None for arg in sys.argv: if arg == '-d' or arg == '--daemon-mode': GitLabAutoDeploy.daemon = True GitLabAutoDeploy.quiet = True if arg == '-q' or arg == '--quiet': GitLabAutoDeploy.quiet = True if GitLabAutoDeploy.daemon: pid = os.fork() if pid != 0: sys.exit() os.setsid() if not GitLabAutoDeploy.quiet: print 'Github Autodeploy Service v0.2 started' else: print 'Github Autodeploy Service v 0.2 started in daemon mode' server = HTTPServer(('', GitLabAutoDeploy.getConfig()['port']), GitLabAutoDeploy) server.serve_forever() except (KeyboardInterrupt, SystemExit) as e: if e: # wtf, why is this creating a new line? print >> sys.stderr, e if not server is None: server.socket.close() if not GitLabAutoDeploy.quiet: print 'Goodbye'
def _daemonize(): # Fork once. try: pid = os.fork() if pid > 0: os._exit(0) except OSError: return # Set some options to detach from the terminal. os.chdir('/') os.setsid() os.umask(0) # Fork again. try: pid = os.fork() if pid > 0: os._exit(0) except OSError: return # Find the OS /dev/null equivalent. nullfile = getattr(os, 'devnull', '/dev/null') # Redirect all standard I/O to /dev/null. sys.stdout.flush() sys.stderr.flush() si = file(nullfile, 'r') so = file(nullfile, 'a+') se = file(nullfile, 'a+', 0) os.dup2(si.fileno(), sys.stdin.fileno()) os.dup2(so.fileno(), sys.stdout.fileno()) os.dup2(se.fileno(), sys.stderr.fileno())
def preUpdate(self): """ Make adjustments before executing the command in a child process. """ os.setsid() signal.signal(signal.SIGTERM, signal.SIG_IGN)
def create_daemon(_pidfile): global pidfile pidfile = _pidfile if os.path.isfile(pidfile): print('pid file[' + pidfile + '] still exist. please check your system.') os._exit(1) if not os.path.isdir(os.path.dirname(pidfile)): os.mkdir(os.path.dirname(pidfile)) pid = os.fork() if pid == 0: os.setsid() with open(pidfile, 'w') as f: f.write(str(os.getpid())) os.chdir('/') os.umask(0) else: # parent goes bye bye os._exit(0) si = os.open('/dev/null', os.O_RDONLY) so = os.open('/dev/null', os.O_RDWR) se = os.open('/dev/null', os.O_RDWR) os.dup2(si, sys.stdin.fileno()) os.dup2(so, sys.stdout.fileno()) os.dup2(se, sys.stderr.fileno()) os.close(si) os.close(so) os.close(se) signal.signal(signal.SIGINT, signal_handler) signal.signal(signal.SIGHUP, signal_handler) signal.signal(signal.SIGTERM, signal_handler)
def commit(self, now_playing=None): for config in scrobbler_config: pidfile = config.get('pidfile') password = config.get('password') scrobbler_url = config.get('scrobbler_url') username = config.get('username') cachefile = config.get('cachefile') if ((pidfile is None) or (password is None) or (scrobbler_url is None) or (username is None) or (cachefile is None)): raise Exception('Broken config! Something is missing.') if os.path.exists(pidfile): "commit already running maybe waiting for network timeout or something, doing nothing" logger.info('Commit already running. Not commiting. (%s)' % pidfile) continue logger.debug('Forking') if not os.fork(): os.setsid() pid = os.fork() if pid: fo = file(pidfile, 'w') fo.write(str(pid)) fo.close() logger.debug('Wrote pidfile') sys.exit(0) else: try: self._real_commit(now_playing, cachefile, username, password, scrobbler_url) finally: if os.path.exists(pidfile): os.remove(pidfile) logger.debug('Deleted pidfile')
def _has_sudo(self, result): _master, slave = pty.openpty() os.setsid() fcntl.ioctl(slave, termios.TIOCSCTTY, 0) out, err, exit = run_command(['sudo', '-l', '-U', self.user[USER_NAME], 'sudo']) if exit == 0: debug("User %s is allowed to run sudo" % self.user[USER_NAME]) # sudo allows a wide range of configurations, such as controlling # which binaries the user can execute with sudo. # For now, we will just check whether the user is allowed to run # any command with sudo. out, err, exit = run_command(['sudo', '-l', '-U', self.user[USER_NAME]]) for line in out.split('\n'): if line and re.search("(ALL)", line): result.value = 1 debug("User %s can run any command with sudo" % result.value) return debug("User %s can only run some commands with sudo" % self.user[USER_NAME]) else: debug("User %s is not allowed to run sudo" % self.user[USER_NAME])
def daemonize(): """\ Standard daemonization of a process. Code is basd on the ActiveState recipe at: http://code.activestate.com/recipes/278731/ """ if not 'GUNICORN_FD' in os.environ: if os.fork() == 0: os.setsid() if os.fork() != 0: os.umask(0) else: os._exit(0) else: os._exit(0) maxfd = get_maxfd() # Iterate through and close all file descriptors. for fd in range(0, maxfd): try: os.close(fd) except OSError: # ERROR, fd wasn't open to begin with (ignored) pass os.open(REDIRECT_TO, os.O_RDWR) os.dup2(0, 1) os.dup2(0, 2)
def main(): try: server = None for arg in sys.argv: if(arg == '-d' or arg == '--daemon-mode'): GitAutoDeploy.daemon = True GitAutoDeploy.quiet = True if(arg == '-q' or arg == '--quiet'): GitAutoDeploy.quiet = True if(GitAutoDeploy.daemon): pid = os.fork() if(pid != 0): sys.exit() os.setsid() port = GitAutoDeploy.getConfig()['port'] server = HTTPServer(('', port), GitAutoDeploy) GitAutoDeploy.log('Github Autodeploy Service start listen: %i' % port) server.serve_forever() except (KeyboardInterrupt, SystemExit) as e: GitAutoDeploy.log('stop') if(e): GitAutoDeploy.log(e) if(not server is None): server.socket.close()
def daemonize(self): import sys try: pid = os.fork() if pid > 0: # Exit first parent sys.exit(0) except OSError as e: print(sys.stderr, "fork #1 failed: %d (%s)" % (e.errno, e.strerror)) sys.exit(1) # Decouple from parent environment os.chdir("/") os.setsid() os.umask(0) # Do second fork try: pid = os.fork() if pid > 0: # Exit from second parent; print eventual PID before exiting print("Daemon PID %d" % pid) sys.exit(0) except OSError as e: print(sys.stderr, "fork #2 failed: %d (%s)" % (e.errno, e.strerror)) sys.exit(1) self.args.command()
def daemonize(stdin='/dev/null', stdout='/dev/null', stderr='/dev/null'): LOG.debug("Daemonize") # first fork pid = os.fork() if pid > 0: sys.exit(0) os.chdir('/') os.setsid() os.umask(0) # second fork pid = os.fork() if pid > 0: sys.exit(0) # redirect standard file descriptors sys.stdout.flush() sys.stderr.flush() si = file(stdin, 'r') so = file(stdout, "a+") se = file(stderr, "a+", 0) os.dup2(si.fileno(), sys.stdin.fileno()) os.dup2(so.fileno(), sys.stdout.fileno()) os.dup2(se.fileno(), sys.stderr.fileno())
def daemonize(self): threadCnt = threading.activeCount() if threadCnt > 1: raise Exception("Can't daemonize since there are %i running threads which wouldn't be forked." % threadCnt) ## Fork pid = os.fork() if pid > 0: if self.pidbefore == True: self.writePid(pid) sys.exit(0) if self.pidbefore == False: self.writePid(os.getpid()) os.setsid() ## Change CWD self.setCwd(self.cwd) self.setGid(self.group) self.setUid(self.user) ## Redirect STDIO self.redirectStdIo(sys.stdin, self.stdin) self.redirectStdIo(sys.stdout, self.stdout) self.redirectStdIo(sys.stderr, self.stderr)
def minion_async_run(retriever, method, args): """ This is a simpler invocation for minion side async usage. """ # to avoid confusion of job id's (we use the same job database) # minion jobs contain the string "minion". job_id = "%s-minion" % pprint.pformat(time.time()) __update_status(job_id, JOB_ID_RUNNING, -1) pid = os.fork() if pid != 0: os.waitpid(pid, 0) return job_id else: # daemonize! os.umask(077) os.chdir('/') os.setsid() if os.fork(): os._exit(0) try: function_ref = retriever(method) rc = function_ref(*args) except Exception, e: (t, v, tb) = sys.exc_info() rc = cm_utils.nice_exception(t,v,tb) __update_status(job_id, JOB_ID_FINISHED, rc) os._exit(0)
def detach_process_context(): """ Detach the process context from parent and session. Detach from the parent process and session group, allowing the parent to exit while this process continues running. Reference: “Advanced Programming in the Unix Environment”, section 13.3, by W. Richard Stevens, published 1993 by Addison-Wesley. """ def fork_then_exit_parent(error_message): """ Fork a child process, then exit the parent process. If the fork fails, raise a ``DaemonProcessDetachError`` with ``error_message``. """ try: pid = os.fork() if pid > 0: # pylint: disable=W0212 os._exit(0) except OSError as exc: error = DaemonProcessDetachError( "%(error_message)s: [%(exc_errno)d] %(exc_strerror)s" % { 'error_message': error_message, 'exc_errno': exc.errno, 'exc_strerror': exc.strerror}) raise error fork_then_exit_parent(error_message="Failed first fork") os.setsid() fork_then_exit_parent(error_message="Failed second fork")
def main(): parser = argparse.ArgumentParser() parser.add_argument('--interfaces', nargs='+', required=True, help='Relay between these interfaces (minimum 2).') parser.add_argument( '--noTransmitInterfaces', nargs='+', help='Do not relay packets via these interfaces, listen only.') parser.add_argument( '--ifFilter', help= 'JSON file specifying which interface(s) a particular source IP can relay to.' ) parser.add_argument( '--ssdpUnicastAddr', help='IP address to listen to SSDP unicast replies, which will be' ' relayed to the IP that sent the SSDP multicast query.') parser.add_argument( '--oneInterface', action='store_true', help= 'Slightly dangerous: only one interface exists, connected to two networks.' ) parser.add_argument('--relay', nargs='*', help='Relay additional multicast address(es).') parser.add_argument('--noMDNS', action='store_true', help='Do not relay mDNS packets.') parser.add_argument( '--mdnsForceUnicast', action='store_true', help='Force mDNS packets to have the UNICAST-RESPONSE bit set.') parser.add_argument('--noSSDP', action='store_true', help='Do not relay SSDP packets.') parser.add_argument('--noSonosDiscovery', action='store_true', help='Do not relay broadcast Sonos discovery packets.') parser.add_argument('--homebrewNetifaces', action='store_true', help='Use self-contained netifaces-like package.') parser.add_argument( '--ifNameStructLen', type=int, default=40, help= 'Help the self-contained netifaces work out its ifName struct length.') parser.add_argument('--allowNonEther', action='store_true', help='Allow non-ethernet interfaces to be configured.') parser.add_argument( '--masquerade', nargs='+', help='Masquerade outbound packets from these interface(s).') parser.add_argument('--wait', action='store_true', help='Wait for IPv4 address assignment.') parser.add_argument('--ttl', type=int, help='Set TTL on outbound packets.') parser.add_argument( '--listen', nargs='+', help= 'Listen for a remote connection from one or more remote addresses A.B.C.D.' ) parser.add_argument( '--remote', nargs='+', help='Relay packets to remote multicast-relay(s) on A.B.C.D.') parser.add_argument('--remotePort', type=int, default=1900, help='Use this port to listen/connect to.') parser.add_argument( '--remoteRetry', type=int, default=5, help= 'If the remote connection is terminated, retry at least N seconds later.' ) parser.add_argument( '--noRemoteRelay', action='store_true', help= 'Only relay packets on local interfaces: don\'t relay packets out of --remote connected relays.' ) parser.add_argument( '--aes', help='Encryption key for the connection to the remote multicast-relay.' ) parser.add_argument('--foreground', action='store_true', help='Do not background.') parser.add_argument('--logfile', help='Save logs to this file.') parser.add_argument('--verbose', action='store_true', help='Enable verbose output.') args = parser.parse_args() if len( args.interfaces ) < 2 and not args.oneInterface and not args.listen and not args.remote: print('You should specify at least two interfaces to relay between') return 1 if args.remote and args.listen: print( 'Relay role should be either --listen or --remote (or neither) but not both' ) return 1 if args.ttl and (args.ttl < 0 or args.ttl > 255): print('Invalid TTL (must be between 1 and 255)') return 1 if not args.foreground: pid = os.fork() if pid != 0: return 0 os.setsid() os.close(sys.stdin.fileno()) logger = Logger(args.foreground, args.logfile, args.verbose) relays = set() if not args.noMDNS: relays.add(('%s:%d' % (PacketRelay.MDNS_MCAST_ADDR, PacketRelay.MDNS_MCAST_PORT), 'mDNS')) if not args.noSSDP: relays.add(('%s:%d' % (PacketRelay.SSDP_MCAST_ADDR, PacketRelay.SSDP_MCAST_PORT), 'SSDP')) if not args.noSonosDiscovery: relays.add((PacketRelay.BROADCAST + ':6969', 'Sonos Setup Discovery')) if args.ssdpUnicastAddr: relays.add( ('%s:%d' % (args.ssdpUnicastAddr, PacketRelay.SSDP_UNICAST_PORT), 'SSDP Unicast')) if args.relay: for relay in args.relay: relays.add((relay, None)) packetRelay = PacketRelay(interfaces=args.interfaces, noTransmitInterfaces=args.noTransmitInterfaces, ifFilter=args.ifFilter, waitForIP=args.wait, ttl=args.ttl, oneInterface=args.oneInterface, homebrewNetifaces=args.homebrewNetifaces, ifNameStructLen=args.ifNameStructLen, allowNonEther=args.allowNonEther, ssdpUnicastAddr=args.ssdpUnicastAddr, mdnsForceUnicast=args.mdnsForceUnicast, masquerade=args.masquerade, listen=args.listen, remote=args.remote, remotePort=args.remotePort, remoteRetry=args.remoteRetry, noRemoteRelay=args.noRemoteRelay, aes=args.aes, logger=logger) for relay in relays: try: (addr, port) = relay[0].split(':') _ = PacketRelay.ip2long(addr) port = int(port) except: errorMessage = '%s:%s: Expecting --relay A.B.C.D:P, where A.B.C.D is a multicast or broadcast IP address and P is a valid port number' % relay if args.foreground: print(errorMessage) else: logger.warning(errorMessage) return 1 if PacketRelay.isMulticast(addr): relayType = 'multicast' elif PacketRelay.isBroadcast(addr): relayType = 'broadcast' elif args.ssdpUnicastAddr: relayType = 'unicast' else: errorMessage = 'IP address %s is neither a multicast nor a broadcast address' % addr if args.foreground: print(errorMessage) else: logger.warning(errorMessage) return 1 if port < 0 or port > 65535: errorMessage = 'UDP port %s out of range' % port if args.foreground: print(errorMessage) else: logger.warning(errorMessage) return 1 logger.info( 'Adding %s relay for %s:%s%s' % (relayType, addr, port, relay[1] and ' (%s)' % relay[1] or '')) packetRelay.addListener(addr, port, relay[1]) packetRelay.loop()
def daemonize(pidfile, *, stdin='/dev/null', stdout='/dev/null', stderr='/dev/null'): """The code below is adapted by: https://github.com/dabeaz/python-cookbook/blob/master/ src/12/launching_a_daemon_process_on_unix/daemon.py It uses Unix double-fork magic based on Stevens's book "Advanced Programming in the UNIX Environment". Creates a daemon that is diassociated with the terminal and has no root privileges. Once double-forking is successful it writes its pid to a designated pidfile. The pidfile is later used to kill the daemon. """ # If pidfile exists, there is a server program that is currently running if os.path.exists(pidfile): raise RuntimeError('Already running') # First fork (detaches from parent) try: if os.fork() > 0: # Parent exit raise SystemExit(0) except OSError as e: raise RuntimeError(f'fork #1 failed: {e}') # Decouple from parent environment os.chdir('/tmp') os.umask(0) os.setsid() dropPrivileges() logger.info("fork#1 successfull") # Second fork (relinquish session leadership) try: if os.fork() > 0: raise SystemExit(0) except OSError as e: raise RuntimeError(f'fork #2 failed: {e}') # Flush I/O buffers sys.stdout.flush() sys.stderr.flush() # Replace file descriptors for stdin, stdout, and stderr with open(stdin, 'rb', 0) as f: os.dup2(f.fileno(), sys.stdin.fileno()) with open(stdout, 'ab', 0) as f: os.dup2(f.fileno(), sys.stdout.fileno()) with open(stderr, 'ab', 0) as f: os.dup2(f.fileno(), sys.stderr.fileno()) # PID of the double-forked daemon fork2DaemonPID = os.getpid() logger.info("Writing pidfile") # Write the PID file with open(pidfile, 'w') as f: print(fork2DaemonPID, file=f) logger.info(f"fork#2 successful pid[{fork2DaemonPID}]") # Arrange to have the PID file removed on exit/signal atexit.register(lambda: os.remove(pidfile)) atexit.register(lambda: removePidProcess()) # Signal handler for termination (required) def sigterm_handler(signo, frame): raise SystemExit(1) signal.signal(signal.SIGTERM, sigterm_handler)
if __name__ == '__main__': os_info = [pp.get_os_name(), pp.get_os_version()] if "centos" in os_info[0] and "6." in os_info[1]: """ 현재 프로세스를 데몬으로 생성하는 작업을 함. 2회 fork()를 통해서 데몬으로 생성함. """ pid = os.fork() if pid > 0: exit(0) else: os.chdir('/usr/local/Geni/Genian') os.setsid() os.umask(0) pid = os.fork() if pid > 0: exit(0) else: os.chdir('/usr/local/Geni/Genian') sys.stdout.flush() sys.stderr.flush() """ stdin, stdout, stderr fd를 /dev/null로 리다이렉션 함. """ si = open(os.devnull, 'r') so = open(os.devnull, 'a+') se = open(os.devnull, 'a+')
def show_setting_sid(): print 'Calling os.setsid() from %s' % os.getpid() sys.stdout.flush() os.setsid()
def mainloop(self): """the method used to actually fire off the server""" self.init() if self.numProcs == 1: self.info('running in non-daemon mode with single process') self._run_as() sys.exit() if not self.foreground: # become daemon and process group leader self.info("daemonizing...") if os.fork(): os._exit(0) #sys.exit() if os.fork(): #sys.exit() os._exit(0) os.setsid() else: # suppress going to background (for running under daemontools) self.info('not going to background') os.chdir('/') os.umask(0) # find number of possible file descriptors maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1] if (maxfd == resource.RLIM_INFINITY): maxfd = 1024 # close all file descriptors. for fd in xrange(0, maxfd): try: os.close(fd) except OSError: # wasn't open, that's OK pass # send the standard streams to the bit bucket os.open(_devnull, os.O_RDWR) os.dup2(0, 1) os.dup2(0, 2) #write the pid file open(self.pidFile,'w').write('%s' % os.getpid()) while 1: try: # do any periodic tasks self.periodic() # reap any process corpses self._reap() # if are we short of procs, spawn some new ones if len(self._children) < self.numProcs: self._spawn() # wait, we'll be interrupted by SIGCHLD if anything # important happens try: time.sleep(self.pollPeriod) except: pass # deal with signals if self._sig == 'SIGTERM': raise TermSignal if self._sig == 'SIGHUP': raise HupSignal except (TermSignal, KeyboardInterrupt): # server shutdown self.info('shutting down') self._die() self.stop() try: os.unlink(self.pidFile) except: self.exception('could not unlink pid file!') self.info('server shut down') sys.exit() except HupSignal: # nice restart self.info('restarting') # kill off children self._die() # reload modules self.reload() # reload configuration self._initAttrs() self._sig = None self.init()
global agent_debug agent_debug = True elif opt in ("-V", "--version"): print "PeerDisk v.", agent_version print "(C)opyright 2011, C.J. Steele, all rights reserved." elif opt in ("-D", "--daemon"): # daemonization bits for Linux and MacOSX if agent_os == 'linux' or agent_os == 'darwin': try: pid = fork() if pid > 0: exit(0) except OSError, e: exit(1) chdir("/") setsid() umask(0) try: pid = fork() if pid > 0: exit(0) except OSError, e: exit(1) # NT doesn't use daemonization, #TODO: http://islascruz.org/html/index.php?gadget=StaticPage&action=Page&id=6 elif agent_os == 'win32': #TODO: check to see if we were called by the service controll process? print "E: NT doesn't support daemonizing, you must start the service with the 'net start cpemagent' command" exit(1) # initialization has been finished, lets do this!
def daemonize(enable_stdio_inheritance=False): """\ Standard daemonization of a process. http://www.svbug.com/documentation/comp.unix.programmer-FAQ/faq_2.html#SEC16 """ if 'GUNICORN_FD' not in os.environ: if os.fork(): os._exit(0) os.setsid() if os.fork(): os._exit(0) os.umask(0o22) # In both the following any file descriptors above stdin # stdout and stderr are left untouched. The inheritence # option simply allows one to have output go to a file # specified by way of shell redirection when not wanting # to use --error-log option. if not enable_stdio_inheritance: # Remap all of stdin, stdout and stderr on to # /dev/null. The expectation is that users have # specified the --error-log option. closerange(0, 3) fd_null = os.open(REDIRECT_TO, os.O_RDWR) if fd_null != 0: os.dup2(fd_null, 0) os.dup2(fd_null, 1) os.dup2(fd_null, 2) else: fd_null = os.open(REDIRECT_TO, os.O_RDWR) # Always redirect stdin to /dev/null as we would # never expect to need to read interactive input. if fd_null != 0: os.close(0) os.dup2(fd_null, 0) # If stdout and stderr are still connected to # their original file descriptors we check to see # if they are associated with terminal devices. # When they are we map them to /dev/null so that # are still detached from any controlling terminal # properly. If not we preserve them as they are. # # If stdin and stdout were not hooked up to the # original file descriptors, then all bets are # off and all we can really do is leave them as # they were. # # This will allow 'gunicorn ... > output.log 2>&1' # to work with stdout/stderr going to the file # as expected. # # Note that if using --error-log option, the log # file specified through shell redirection will # only be used up until the log file specified # by the option takes over. As it replaces stdout # and stderr at the file descriptor level, then # anything using stdout or stderr, including having # cached a reference to them, will still work. def redirect(stream, fd_expect): try: fd = stream.fileno() if fd == fd_expect and stream.isatty(): os.close(fd) os.dup2(fd_null, fd) except AttributeError: pass redirect(sys.stdout, 1) redirect(sys.stderr, 2)
def detach(self, pid_file): self.pid_file = pid_file # Check for old pid file. old_pid = '' if os.path.isfile(pid_file): with open(pid_file, 'r') as pid_handle: old_pid = pid_handle.read() # Create lock file. try: lock_file = open(pid_file, 'w') except IOError: logging.error('Can\'t create PID file!') sys.exit(1) # Try to get lock on the file. try: fcntl.flock(lock_file, fcntl.LOCK_EX | fcntl.LOCK_NB) except IOError: logging.error('Can\'t create PID file! Can\'t lock on file!') # We need to overwrite the pidfile if we got here. with open(pid_file, 'w') as pid_handle: pid_handle.write(old_pid) # Fork to background. try: pid = os.fork() except OSError as e: logging.error('Unable to fork, error: {} ({})'.format( str(e), e.errno)) sys.exit(1) if pid != 0: # This is the parent process. Exit. os._exit(0) # Stop listening for signals. os.setsid() self.pid = os.getpid() # Determinate /dev/null on machine. dev_null = '/dev/null' if hasattr(os, 'devnull'): dev_null = os.devnull # Redirect output to dev null. dev_null_fd = os.open(dev_null, os.O_RDWR) os.dup2(dev_null_fd, 0) os.dup2(dev_null_fd, 1) os.dup2(dev_null_fd, 2) os.close(dev_null_fd) # Write PID file. lock_file.write(str(os.getpid())) lock_file.flush() self.detached = True # Register shutdown methods. signal.signal(signal.SIGTERM, self.sigterm) signal.signal(signal.SIGHUP, self.sigterm) atexit.register(self.exit)
def base_launch_kernel(code, fname, stdin=None, stdout=None, stderr=None, executable=None, independent=False, extra_arguments=[], cwd=None): """ Launches a localhost kernel, binding to the specified ports. Parameters ---------- code : str, A string of Python code that imports and executes a kernel entry point. stdin, stdout, stderr : optional (default None) Standards streams, as defined in subprocess.Popen. fname : unicode, optional The JSON connector file, containing ip/port/hmac key information. key : str, optional The Session key used for HMAC authentication. executable : str, optional (default sys.executable) The Python executable to use for the kernel process. independent : bool, optional (default False) If set, the kernel process is guaranteed to survive if this process dies. If not set, an effort is made to ensure that the kernel is killed when this process dies. Note that in this case it is still good practice to kill kernels manually before exiting. extra_arguments : list, optional A list of extra arguments to pass when executing the launch code. cwd : path, optional The working dir of the kernel process (default: cwd of this process). Returns ------- A tuple of form: (kernel_process, shell_port, iopub_port, stdin_port, hb_port) where kernel_process is a Popen object and the ports are integers. """ # Build the kernel launch command. if executable is None: executable = sys.executable arguments = [executable, '-c', code, '-f', fname] arguments.extend(extra_arguments) # Popen will fail (sometimes with a deadlock) if stdin, stdout, and stderr # are invalid. Unfortunately, there is in general no way to detect whether # they are valid. The following two blocks redirect them to (temporary) # pipes in certain important cases. # If this process has been backgrounded, our stdin is invalid. Since there # is no compelling reason for the kernel to inherit our stdin anyway, we'll # place this one safe and always redirect. redirect_in = True _stdin = PIPE if stdin is None else stdin # If this process in running on pythonw, we know that stdin, stdout, and # stderr are all invalid. redirect_out = sys.executable.endswith('pythonw.exe') if redirect_out: _stdout = PIPE if stdout is None else stdout _stderr = PIPE if stderr is None else stderr else: _stdout, _stderr = stdout, stderr # Spawn a kernel. if sys.platform == 'win32': # Create a Win32 event for interrupting the kernel. interrupt_event = ParentPollerWindows.create_interrupt_event() arguments += ['--interrupt=%i' % interrupt_event] # If the kernel is running on pythonw and stdout/stderr are not been # re-directed, it will crash when more than 4KB of data is written to # stdout or stderr. This is a bug that has been with Python for a very # long time; see http://bugs.python.org/issue706263. # A cleaner solution to this problem would be to pass os.devnull to # Popen directly. Unfortunately, that does not work. if executable.endswith('pythonw.exe'): if stdout is None: arguments.append('--no-stdout') if stderr is None: arguments.append('--no-stderr') # Launch the kernel process. if independent: proc = Popen( arguments, creationflags=512, # CREATE_NEW_PROCESS_GROUP stdin=_stdin, stdout=_stdout, stderr=_stderr) else: from _subprocess import DuplicateHandle, GetCurrentProcess, \ DUPLICATE_SAME_ACCESS pid = GetCurrentProcess() handle = DuplicateHandle( pid, pid, pid, 0, True, # Inheritable by new processes. DUPLICATE_SAME_ACCESS) proc = Popen(arguments + ['--parent=%i' % int(handle)], stdin=_stdin, stdout=_stdout, stderr=_stderr) # Attach the interrupt event to the Popen objet so it can be used later. proc.win32_interrupt_event = interrupt_event else: if independent: proc = Popen(arguments, preexec_fn=lambda: os.setsid(), stdin=_stdin, stdout=_stdout, stderr=_stderr, cwd=cwd) else: proc = Popen(arguments + ['--parent=1'], stdin=_stdin, stdout=_stdout, stderr=_stderr, cwd=cwd) # Clean up pipes created to work around Popen bug. if redirect_in: if stdin is None: proc.stdin.close() if redirect_out: if stdout is None: proc.stdout.close() if stderr is None: proc.stderr.close() return proc
def start(self, timeout=60, step=0.25): if self.check_pid(): logger.error("The application process is already started!") return False env = self._config.get_java_env() cmd = self._config.get_java_cmd() try: logger.trace("[%s] Forking now..." % os.getpid()) pid = os.fork() if pid > 0: self._pid = None logger.trace("[%s] Waiting for intermediate process to " "exit..." % os.getpid()) # prevent zombie process (waitpid, result) = os.waitpid(pid, 0) if result == 0: logger.debug("The JVM process has been started.") return True logger.error("Starting the JVM process did not succeed...") return False except OSError as e: logger.error("Forking subprocess failed: %d (%s)\n" % (e.errno, e.strerror)) return logger.trace("[%s] Now in intermediate forked process..." % os.getpid()) # decouple from parent environment os.chdir("/") os.setsid() os.umask(0o022) logger.debug("Environment to be used when starting the JVM: %s" % ' '.join(["%s='%s'" % (k, v) for k, v in env.items()])) logger.debug("Command line to be used when starting the JVM: %s" % ' '.join(cmd)) # start java subprocess (second fork) logger.trace("[%s] Starting the JVM..." % os.getpid()) try: proc = subprocess.Popen( cmd, close_fds=True, cwd='/', env=env, ) except OSError as ose: if ose.errno == errno.ENOENT: logger.error("The java binary cannot be found in the default " "search path!") logger.error("By default, when starting the JVM, the " "environment is not preserved. If you don't set " "preserve_environment to true or specify PATH in " "preserve_environment or custom_environment in " "the m2ee section of your m2ee.yaml " "configuration file, the search path is likely a " "very basic default list like '/bin:/usr/bin'") os._exit(1) # always write pid asap, so that monitoring can detect apps that should # be started but fail to do so self._pid = proc.pid logger.trace("[%s] Writing JVM pid to pidfile: %s" % (os.getpid(), self._pid)) self._write_pidfile() # wait for m2ee to become available t = 0 while t < timeout: sleep(step) dead = proc.poll() if dead is not None: logger.error("Java subprocess terminated with errorcode %s" % dead) logger.debug("[%s] Doing unclean exit from intermediate " "process now." % os.getpid()) os._exit(1) if self.check_pid(proc.pid) and self._client.ping(): break t += step if t >= timeout: logger.error("Timeout: Java subprocess takes too long to start.") logger.trace("[%s] Doing unclean exit from intermediate process " "now." % os.getpid()) os._exit(1) logger.trace("[%s] Exiting intermediate process..." % os.getpid()) os._exit(0)
def init(rewrites={}, use_setsid=True): """Initialize signal handling, leave the parent process to take care of signal propagation and return as a forked child process. """ if use_setsid: for signum in [signal.SIGTSTP, signal.SIGTTOU, signal.SIGTTIN]: _signal_rewrites.set(signum, signal.SIGSTOP) _signal_rewrites.update(rewrites) # Block all signals and store the original state originally_blocked = signal.pthread_sigmask(signal.SIG_BLOCK, all_signals) # A dummy signal handler used for signals we care about. # On the FreeBSD kernel, ignored signals cannot be waited on by `sigwait` (but # they can be on Linux). We must provide a dummy handler. # https:#lists.freebsd.org/pipermail/freebsd-ports/2009-October/057340.html for sig in all_signals - {signal.SIGKILL, signal.SIGSTOP}: signal.signal(sig, lambda sig, frame: None) # Detach from controlling tty, so that the child's session can # attach to it instead. # We want the child to be able to be the session leader of the TTY so that # it can do normal job control. if use_setsid: isatty = sys.stdout.isatty() if isatty: try: fcntl.ioctl(sys.stdin.fileno(), termios.TIOCNOTTY) # When the session leader detaches from its controlling tty via # TIOCNOTTY, the kernel sends SIGHUP and SIGCONT to the process # group. We need to be careful not to forward these on to the # child so that it doesn't receive a SIGHUP and # terminate itself. if os.getsid(0) == os.getpid(): log.debug("Detached from controlling tty, ignoring " "the first SIGHUP and SIGCONT we receive.") _temporary_ignores.ignore_next(signal.SIGHUP) _temporary_ignores.ignore_next(signal.SIGCONT) else: log.debug("Detached from controlling tty, " "but was not session leader.") except Exception as exc: log.debug("Unable to detach from controlling tty: %s", str(exc)) pid = os.fork() if pid < 0: raise RuntimeError("Unable to fork.") elif pid == 0: # child process # Reset signal blocking signal.pthread_sigmask(signal.SIG_SETMASK, originally_blocked) if use_setsid: try: os.setsid() log.debug("System call setsid opened a new session") except Exception as exc: log.error("Unable to setsid: %s. Exiting.", str(exc)) sys.exit(1) try: if isatty: # sys.stdout was attached to terminal, reattach fcntl.ioctl(0, termios.TIOCSCTTY, 0) except Exception as exc: log.debug("Unable to attach to controlling tty: %s", str(exc)) return # child initialization is ready else: # parent process log.debug("Child spawned with PID %d", pid) signal_handler_loop(child_pid=pid, session_leader=use_setsid)
def main(): parser = argparse.ArgumentParser() parser.add_argument( '--force', '-f', action='store_true', help='Do not check existing disk reservations', ) parser.add_argument( '--foreground', '-F', action='store_true', help='Run in foreground mode', ) parser.add_argument( '--no-panic', '-np', action='store_true', help='Do not panic in case of a fatal error', ) parser.add_argument( '--interval', '-i', default=5, type=int, help='Time in seconds between each SCSI reservation set/check', ) args = parser.parse_args() setup_logging(args.foreground) if is_running(): logger.error('fenced already running.') sys.exit(ExitCode.ALREADY_RUNNING.value) fence = Fence(args.interval) newkey = fence.init(args.force) if not args.foreground: logger.info('Entering in daemon mode.') if os.fork() != 0: sys.exit(0) os.setsid() if os.fork() != 0: sys.exit(0) os.closerange(0, 3) else: logger.info('Running in foreground mode.') signal.signal(signal.SIGHUP, fence.sighup_handler) try: fence.loop(newkey) except PanicExit as e: if args.no_panic: logger.info('Fatal error: %s', e) sys.exit(ExitCode.UNKNOWN.value) else: logger.info('Panic %s', e) panic(e) except Exception: logger.error('Unexpected exception', exc_info=True) sys.exit(ExitCode.UNKNOWN.value)
def _display(): os.setsid() plt.show()
def daemonize(self): """ Fork off as a daemon """ # pylint: disable=E1101 # Make a non-session-leader child process try: pid = os.fork() # @UndefinedVariable - only available in UNIX if pid != 0: os._exit(0) except OSError, e: sys.stderr.write("fork #1 failed: %d (%s)\n" % (e.errno, e.strerror)) sys.exit(1) os.setsid() # @UndefinedVariable - only available in UNIX # Make sure I can read my own files and shut out others prev = os.umask(0) os.umask(prev and int('077', 8)) # Make the child a session-leader by detaching from the terminal try: pid = os.fork() # @UndefinedVariable - only available in UNIX if pid != 0: os._exit(0) except OSError, e: sys.stderr.write("fork #2 failed: %d (%s)\n" % (e.errno, e.strerror)) sys.exit(1)
def setsid(): #run in a new session os.setsid()
def result(): os.setgid(user_gid) os.setuid(user_uid) os.setsid()
def pre_exec(): # Restore default signal disposition and invoke setsid for sig in ('SIGPIPE', 'SIGXFZ', 'SIGXFSZ'): if hasattr(signal, sig): signal.signal(getattr(signal, sig), signal.SIG_DFL) os.setsid()
def setsid(): os.setsid()
def daemonize(stdout=os.devnull, stderr=None, stdin=os.devnull, pidfile=None, startmsg='started with pid %s'): """ This forks the current process into a daemon. The stdin, stdout, and stderr arguments are file names that will be opened and be used to replace the standard file descriptors in sys.stdin, sys.stdout, and sys.stderr. These arguments are optional and default to /dev/null. Note that stderr is opened unbuffered, so if it shares a file with stdout then interleaved output may not appear in the order that you expect. """ # Do first fork. try: pid = os.fork() if pid > 0: sys.exit(0) # Exit first parent. except OSError as e: sys.stderr.write("fork #1 failed: (%d) %s%s" % (e.errno, e.strerror, os.linesep)) sys.exit(1) # Decouple from parent environment. os.chdir("/") os.umask(0) os.setsid() # interestingly enough, we MUST open STDOUT explicitly before we # fork the second time. # Otherwise, the duping of sys.stdout won't work, # and we will not be able to capture stdout sys.stdout.write('') # Do second fork. try: pid = os.fork() if pid > 0: sys.exit(0) # Exit second parent. except OSError as e: sys.stderr.write("fork #2 failed: (%d) %s%s" % (e.errno, e.strerror, os.linesep)) sys.exit(1) # Open file descriptors and print start message if not stderr: stderr = stdout si = open(stdin, 'rb') so = open(stdout, 'w+b') se = open(stderr, 'w+b', 0) pid = str(os.getpid()) sys.stderr.write("%s%s" % (startmsg, os.linesep) % pid) sys.stderr.flush() if pidfile: with open(pidfile, 'w+') as f: f.write("%s%s" % (pid, os.linesep)) # Redirect standard file descriptors. os.dup2(si.fileno(), sys.stdin.fileno()) os.dup2(so.fileno(), sys.stdout.fileno()) os.dup2(se.fileno(), sys.stderr.fileno())
def child_action(self, pipe_w): # This avoids some SIGTSTP signals from grandchildren # getting propagated up to the master process os.setsid() # First set back to the default signal handlers for the signals # we handle, and then clear their blocked state. # signal_list = [signal.SIGTSTP, signal.SIGTERM] for sig in signal_list: signal.signal(sig, signal.SIG_DFL) signal.pthread_sigmask(signal.SIG_UNBLOCK, signal_list) # Assign the pipe we passed across the process boundaries # # Set the global message handler in this child # process to forward messages to the parent process self._pipe_w = pipe_w self._messenger.set_message_handler(self._child_message_handler) starttime = datetime.datetime.now() stopped_time = None def stop_time(): nonlocal stopped_time stopped_time = datetime.datetime.now() def resume_time(): nonlocal stopped_time nonlocal starttime starttime += datetime.datetime.now() - stopped_time # Graciously handle sigterms. def handle_sigterm(): self._child_shutdown(_ReturnCode.TERMINATED) # Time, log and and run the action function # with _signals.terminator(handle_sigterm), _signals.suspendable( stop_time, resume_time ), self._messenger.recorded_messages(self._logfile, self._logdir) as filename: self.message(MessageType.START, self.action_name, logfile=filename) try: # Try the task action result = self.child_process() # pylint: disable=assignment-from-no-return except SkipJob as e: elapsed = datetime.datetime.now() - starttime self.message(MessageType.SKIPPED, str(e), elapsed=elapsed, logfile=filename) # Alert parent of skip by return code self._child_shutdown(_ReturnCode.SKIPPED) except BstError as e: elapsed = datetime.datetime.now() - starttime retry_flag = e.temporary if retry_flag and (self._tries <= self._max_retries): self.message( MessageType.FAIL, "Try #{} failed, retrying".format(self._tries), elapsed=elapsed, logfile=filename, ) else: self.message( MessageType.FAIL, str(e), elapsed=elapsed, detail=e.detail, logfile=filename, sandbox=e.sandbox ) self._send_message(_MessageType.CHILD_DATA, self.child_process_data()) # Report the exception to the parent (for internal testing purposes) self._child_send_error(e) # Set return code based on whether or not the error was temporary. # self._child_shutdown(_ReturnCode.FAIL if retry_flag else _ReturnCode.PERM_FAIL) except Exception: # pylint: disable=broad-except # If an unhandled (not normalized to BstError) occurs, that's a bug, # send the traceback and formatted exception back to the frontend # and print it to the log file. # elapsed = datetime.datetime.now() - starttime detail = "An unhandled exception occured:\n\n{}".format(traceback.format_exc()) self.message(MessageType.BUG, self.action_name, elapsed=elapsed, detail=detail, logfile=filename) # Unhandled exceptions should permenantly fail self._child_shutdown(_ReturnCode.PERM_FAIL) else: # No exception occurred in the action self._send_message(_MessageType.CHILD_DATA, self.child_process_data()) self._child_send_result(result) elapsed = datetime.datetime.now() - starttime self.message(MessageType.SUCCESS, self.action_name, elapsed=elapsed, logfile=filename) # Shutdown needs to stay outside of the above context manager, # make sure we dont try to handle SIGTERM while the process # is already busy in sys.exit() self._child_shutdown(_ReturnCode.OK)
def start(): log("Starting placement service") ''' Starts the placement service ''' # For DEBUGGING locally if ctx._local: client = CloudifyClient(host='10.239.2.83', username='******', password='******', tenant='default_tenant') else: client = manager.get_rest_client() r, w = os.pipe() pid = os.fork() if pid > 0: # wait for pid on pipe os.close(w) for i in range(10): pid = os.read(r, 10) if pid == "": time.sleep(1) log("waiting for pid") continue else: ctx.instance.runtime_properties["pid"] = str(pid) break if pid == "": log("ERROR: Failed to get child PID") os.close(r) return os.close(r) os.chdir("/tmp") os.setsid() os.umask(0) close_fds([w]) pid = os.fork() if pid > 0: log("INFO: child pid = " + str(pid)) os.write(w, str(pid)) os.close(w) os._exit(0) os.close(w) # Needed by Flask os.open("/dev/null", os.O_RDONLY) os.open("/dev/null", os.O_WRONLY) # Start REST server app = Flask(__name__) auto = Autodoc(app) # init stats stats = {} stats['errcnt'] = 0 stats['actions'] = [] # init config config = {} config['log_location'] = '/tmp/log' try: set_routes(app, auto, ctx.node.properties, stats, config, client) rest = Thread(target=app.run, kwargs={ "host": "0.0.0.0", "port": ctx.node.properties['port'], "debug": False }) rest.start() except Exception as e: log(str(e)) os._exit(0) rest.join() os._exit(0)
class daemon: """ A generic daemon class. Usage: subclass the Daemon class and override the run() method """ def __init__(self, pidfile, daemonize=True, root=False, root_chk_argv=True, stdin="/dev/null", stdout="/dev/null", stderr="/dev/null"): """ Make our daemon instance. pidfile: the file we're going to store the process id in. ex: /tmp/matt-daemon.pid root: does this script require root? True if it does, False if it doesn't. Will be enforced. root_chk_argv: does the script require '--requires-root' in sys.argv to run as root? (usage is good) stdin: where the script gets stdin from. "/dev/null", "/dev/stdin", etc. stdout: where the script writes stdout. "/dev/null", "/dev/stdout", etc. stderr: where the script writes stderr. "/dev/null", "/dev/stderr", etc. """ # Enforce root usage or non-usage. RootCheck.check(root, check_argv=root_chk_argv) self.pidfile = pidfile self.should_daemonize = daemonize self.stdin = stdin self.stdout = stdout self.stderr = stderr def daemonize(self): """ do the UNIX double-fork magic, see Stevens' "Advanced Programming in the UNIX Environment" for details (ISBN 0201563177) http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16 """ try: pid = os.fork() if pid > 0: # exit first parent sys.exit(0) except OSError, e: sys.stderr.write("fork #1 failed: {0} ({1})\n".format( e.errno, e.strerror)) sys.exit(1) # decouple from parent environment os.chdir("/") os.setsid() os.umask(0) # do second fork try: pid = os.fork() if pid > 0: # exit from second parent sys.exit(0) except OSError, e: sys.stderr.write("fork #2 failed: {0} ({1})\n".format( e.errno, e.strerror)) sys.exit(1)
def daemonize(stdout= '/dev/null', stderr = None, stdin= '/dev/null', \ workdir= None, startmsg = 'started with pid %s', \ keepParent = False ): ''' This forks the current process into a daemon. The stdin, stdout, and stderr arguments are file names that will be opened and be used to replace the standard file descriptors in sys.stdin, sys.stdout, and sys.stderr. These arguments are optional and default to /dev/null. Note that stderr is opened unbuffered, so if it shares a file with stdout then interleaved output may not appear in the order that you expect. ''' # Do first fork. try: pid = os.fork() if pid > 0: if not keepParent: os._exit(0) # Exit first parent. return pid except OSError as e: sys.stderr.write("fork #1 failed: (%d) %s\n" % (e.errno, e.strerror)) print("fork #1 failed: (%d) %s\n" % (e.errno, e.strerror)) sys.exit(1) # Decouple from parent environment. os.chdir("/") os.umask(UMASK) os.setsid() # Do second fork. try: pid = os.fork() if pid > 0: os._exit(0) # Exit second parent. except OSError as e: sys.stderr.write("fork #2 failed: (%d) %s\n" % (e.errno, e.strerror)) print("fork #2 failed: (%d) %s\n" % (e.errno, e.strerror)) sys.exit(1) # Open file descriptors and print start message if not stderr: stderr = stdout si = file(stdin, 'r') so = file(stdout, 'a+') se = file(stderr, 'a+', 0) pid = str(os.getpid()) sys.stderr.write("\n%s\n" % startmsg % pid) sys.stderr.flush() if workdir: #file(pidfile,'w+').write("%s\n" % pid) #Since the current working directory may be a mounted filesystem, we #avoid the issue of not being able to unmount the filesystem at #shutdown time by changing it to the root directory. os.chdir(workdir) #We probably don't want the file mode creation mask inherited from #the parent, so we give the child complete control over permissions. os.umask(UMASK) daemon = Element("Daemon") processId = Element("ProcessID") processId.setAttribute("Value", str(os.getpid())) daemon.appendChild(processId) parentProcessId = Element("ParentProcessID") parentProcessId.setAttribute("Value", str(os.getppid())) daemon.appendChild(parentProcessId) processGroupId = Element("ProcessGroupID") processGroupId.setAttribute("Value", str(os.getpgrp())) daemon.appendChild(processGroupId) userId = Element("UserID") userId.setAttribute("Value", str(os.getuid())) daemon.appendChild(userId) effectiveUserId = Element("EffectiveUserID") effectiveUserId.setAttribute("Value", str(os.geteuid())) daemon.appendChild(effectiveUserId) groupId = Element("GroupID") groupId.setAttribute("Value", str(os.getgid())) daemon.appendChild(groupId) effectiveGroupId = Element("EffectiveGroupID") effectiveGroupId.setAttribute("Value", str(os.getegid())) daemon.appendChild(effectiveGroupId) dom = Document() dom.appendChild(daemon) props = open("Daemon.xml", "w") props.write(daemon.toprettyxml()) props.close() # Redirect standard file descriptors. os.dup2(si.fileno(), sys.stdin.fileno()) os.dup2(so.fileno(), sys.stdout.fileno()) os.dup2(se.fileno(), sys.stderr.fileno()) return 0
def create_file_after_timeout_and_setsid(path, timeout): os.setsid() create_file_after_timeout(path, timeout)
class Daemon(object): """ A generic daemon class. Usage: subclass the Daemon class and override the run() method """ def __init__(self, pidfile, logfile=None, name=None, uid=None, gid=None, stdin=os.devnull, stdout=os.devnull, stderr=os.devnull): self.name = name self.uid = uid self.gid = gid self.stdin = stdin self.stdout = stdout self.stderr = stderr self.pidfile = pidfile def change_proc_name(self): """ Change the name of the process. """ try: from setproctitle import setproctitle setproctitle(self.name) except ImportError: pass def daemonize(self): """ Do the UNIX double-fork magic, see Stevens' "Advanced Programming in the UNIX Environment" for details (ISBN 0201563177) http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16 """ try: pid = os.fork() if pid > 0: # Exit first parent sys.exit(0) self.change_proc_name() except OSError, e: sys.stderr.write("fork #1 failed: %d (%s)\n" % (e.errno, e.strerror)) sys.exit(1) # Decouple from parent environment os.chdir("/") os.setsid() os.umask(0) # Do second fork try: pid = os.fork() if pid > 0: # Exit from second parent sys.exit(0) except OSError, e: sys.stderr.write("fork #2 failed: %d (%s)\n" % (e.errno, e.strerror)) sys.exit(1)
def setsid_preexec_fn(): os.setsid() if real_preexec_fn: apply(real_preexec_fn)
def start_mtr(host): options = [mtr_prog, '--report', '--report-wide'] pingtype = config.get(host, "type") count = config.getint(host, "count") ipv4 = config.getboolean(host, "force_ipv4") ipv6 = config.getboolean(host, "force_ipv6") size = config.getint(host, "size") lasttime = config.getint(host, "time") dns = config.getboolean(host, "dns") port = config.get(host, "port") address = config.get(host, "address") interval = config.get(host, "interval") timeout = config.get(host, "timeout") if "running" in status[host].keys(): if debug: sys.stdout.write( "MTR for host still running, not restarting MTR!\n") return if time.time() - status[host]["lasttime"] < lasttime: if debug: sys.stdout.write( "%s - %s = %s is smaller than %s => mtr run not needed yet.\n" % (time.time(), status[host]["lasttime"], time.time() - status[host]["lasttime"], lasttime)) return pid = os.fork() if pid > 0: # Parent process, return and keep running return os.chdir("/") os.umask(0) os.setsid() # Close all fd except stdin,out,err for fd in range(3, 256): try: os.close(fd) except OSError: pass if pingtype == 'tcp': options.append("--tcp") if pingtype == 'udp': options.append("--udp") if port is not None: options.append("--port") options.append(str(port)) if ipv4: options.append("-4") if ipv6: options.append("-6") options.append("-s") options.append(str(size)) options.append("-c") options.append(str(count)) if not dns: options.append("--no-dns") if not address is None: options.append("--address") options.append(str(address)) if not interval is None: options.append("-i") options.append(str(interval)) if not timeout is None: options.append("--timeout") options.append(str(timeout)) options.append(str(host)) if debug: sys.stdout.write("Startin MTR: %s\n" % (" ".join(options))) reportfile = report_filepre + host_to_filename(host) if os.path.exists(reportfile): os.unlink(reportfile) report = open(reportfile, 'a+') report.write(str(int(time.time())) + "\n") report.flush() process = subprocess.Popen(options, stdout=report, stderr=report) # Write pid to report.pid pidfile = open(reportfile + ".pid", 'w') pidfile.write("%d\n" % process.pid) pidfile.flush() pidfile.close() os._exit(os.EX_OK)
def execute(self): """Perform final initialization and launch target process commandline in a subprocess.""" user, _ = self._getpwuid() username, homedir = user.pw_name, user.pw_dir # TODO(wickman) reconsider setsid now that we're invoking in a subshell os.setsid() if self._use_chroot: self._chroot() # If the mesos containerizer path is set, then this process will be launched from within an # isolated filesystem image by the mesos-containerizer executable. This executable needs to be # run as root so that it can properly set up the filesystem as such we'll skip calling setuid at # this point. We'll instead setuid after the process has been forked (mesos-containerizer itself # ensures the forked process is run as the correct user). taskfs_isolated = self._mesos_containerizer_path is not None if not taskfs_isolated: self._setuid() # start process start_time = self._platform.clock().time() if not self._sandbox: cwd = subprocess_cwd = sandbox = os.getcwd() else: if self._use_chroot: cwd = subprocess_cwd = sandbox = '/' elif taskfs_isolated: cwd = homedir = sandbox = self._container_sandbox subprocess_cwd = self._sandbox else: cwd = subprocess_cwd = homedir = sandbox = self._sandbox thermos_profile = os.path.join(sandbox, self.RCFILE) if self._preserve_env: env = deepcopy(os.environ) else: env = {} env.update({ 'HOME': homedir, 'LOGNAME': username, 'USER': username, 'PATH': os.environ['PATH'] }) wrapped_cmdline = self.wrapped_cmdline(cwd) log.debug('Wrapped cmdline: %s' % wrapped_cmdline) real_thermos_profile_path = os.path.join( os.environ['MESOS_DIRECTORY'], TASK_FILESYSTEM_MOUNT_POINT, thermos_profile.lstrip('/')) if taskfs_isolated else thermos_profile if os.path.exists(real_thermos_profile_path): env.update(BASH_ENV=thermos_profile) log.debug('ENV is: %s' % env) subprocess_args = { 'args': wrapped_cmdline, 'close_fds': self.FD_CLOEXEC, 'cwd': subprocess_cwd, 'env': env, 'pathspec': self._pathspec } log_destination_resolver = LogDestinationResolver( self._pathspec, destination=self._logger_destination, mode=self._logger_mode, rotate_log_size=self._rotate_log_size, rotate_log_backups=self._rotate_log_backups) stdout, stderr, handlers_are_files = log_destination_resolver.get_handlers() if handlers_are_files: executor = SubprocessExecutor(stdout=stdout, stderr=stderr, **subprocess_args) else: executor = PipedSubprocessExecutor(stdout=stdout, stderr=stderr, **subprocess_args) pid = executor.start() # Now that we've forked the process, if the task's filesystem is isolated it's now safe to # setuid. if taskfs_isolated: self._setuid() self._write_process_update(state=ProcessState.RUNNING, pid=pid, start_time=start_time) rc = executor.wait() # indicate that we have finished/failed if rc < 0: state = ProcessState.KILLED elif rc == 0: state = ProcessState.SUCCESS else: state = ProcessState.FAILED self._write_process_update(state=state, return_code=rc, stop_time=self._platform.clock().time()) self._rc = rc
def run_autoserv(pid_file_manager, results, parser, ssp_url, use_ssp): """Run server job with given options. @param pid_file_manager: PidFileManager used to monitor the autoserv process @param results: Folder to store results. @param parser: Parser for the command line arguments. @param ssp_url: Url to server-side package. @param use_ssp: Set to True to run with server-side packaging. """ if parser.options.warn_no_ssp: # Post a warning in the log. logging.warn('Autoserv is required to run with server-side packaging. ' 'However, no drone is found to support server-side ' 'packaging. The test will be executed in a drone without ' 'server-side packaging supported.') # send stdin to /dev/null dev_null = os.open(os.devnull, os.O_RDONLY) os.dup2(dev_null, sys.stdin.fileno()) os.close(dev_null) # Create separate process group if the process is not a process group # leader. This allows autoserv process to keep running after the caller # process (drone manager call) exits. if os.getpid() != os.getpgid(0): os.setsid() # Container name is predefined so the container can be destroyed in # handle_sigterm. job_or_task_id = job_directories.get_job_id_or_task_id( parser.options.results) container_id = lxc.ContainerId(job_or_task_id, time.time(), os.getpid()) job_folder = job_directories.get_job_folder_name(parser.options.results) # Implement SIGTERM handler def handle_sigterm(signum, frame): logging.debug('Received SIGTERM') if pid_file_manager: pid_file_manager.close_file(1, signal.SIGTERM) logging.debug('Finished writing to pid_file. Killing process.') # Update results folder's file permission. This needs to be done ASAP # before the parsing process tries to access the log. if use_ssp and results: correct_results_folder_permission(results) # TODO (sbasi) - remove the time.sleep when crbug.com/302815 is solved. # This sleep allows the pending output to be logged before the kill # signal is sent. time.sleep(.1) if use_ssp: logging.debug( 'Destroy container %s before aborting the autoserv ' 'process.', container_id) try: bucket = lxc.ContainerBucket() container = bucket.get_container(container_id) if container: container.destroy() else: logging.debug('Container %s is not found.', container_id) except: # Handle any exception so the autoserv process can be aborted. logging.exception('Failed to destroy container %s.', container_id) # Try to correct the result file permission again after the # container is destroyed, as the container might have created some # new files in the result folder. if results: correct_results_folder_permission(results) os.killpg(os.getpgrp(), signal.SIGKILL) # Set signal handler signal.signal(signal.SIGTERM, handle_sigterm) # faulthandler is only needed to debug in the Lab and is not avaliable to # be imported in the chroot as part of VMTest, so Try-Except it. try: import faulthandler faulthandler.register(signal.SIGTERM, all_threads=True, chain=True) logging.debug('faulthandler registered on SIGTERM.') except ImportError: sys.exc_clear() # Ignore SIGTTOU's generated by output from forked children. signal.signal(signal.SIGTTOU, signal.SIG_IGN) # If we received a SIGALARM, let's be loud about it. signal.signal(signal.SIGALRM, log_alarm) # Server side tests that call shell scripts often depend on $USER being set # but depending on how you launch your autotest scheduler it may not be set. os.environ['USER'] = getpass.getuser() label = parser.options.label group_name = parser.options.group_name user = parser.options.user client = parser.options.client server = parser.options.server install_before = parser.options.install_before install_after = parser.options.install_after verify = parser.options.verify repair = parser.options.repair cleanup = parser.options.cleanup provision = parser.options.provision reset = parser.options.reset job_labels = parser.options.job_labels no_tee = parser.options.no_tee parse_job = parser.options.parse_job execution_tag = parser.options.execution_tag if not execution_tag: execution_tag = parse_job ssh_user = parser.options.ssh_user ssh_port = parser.options.ssh_port ssh_pass = parser.options.ssh_pass collect_crashinfo = parser.options.collect_crashinfo control_filename = parser.options.control_filename test_retry = parser.options.test_retry verify_job_repo_url = parser.options.verify_job_repo_url skip_crash_collection = parser.options.skip_crash_collection ssh_verbosity = int(parser.options.ssh_verbosity) ssh_options = parser.options.ssh_options no_use_packaging = parser.options.no_use_packaging host_attributes = parser.options.host_attributes in_lab = bool(parser.options.lab) # can't be both a client and a server side test if client and server: parser.parser.error( "Can not specify a test as both server and client!") if provision and client: parser.parser.error("Cannot specify provisioning and client!") is_special_task = (verify or repair or cleanup or collect_crashinfo or provision or reset) if len(parser.args) < 1 and not is_special_task: parser.parser.error("Missing argument: control file") if ssh_verbosity > 0: # ssh_verbosity is an integer between 0 and 3, inclusive ssh_verbosity_flag = '-' + 'v' * ssh_verbosity else: ssh_verbosity_flag = '' # We have a control file unless it's just a verify/repair/cleanup job if len(parser.args) > 0: control = parser.args[0] else: control = None machines = _get_machines(parser) if group_name and len(machines) < 2: parser.parser.error('-G %r may only be supplied with more than one ' 'machine.' % group_name) kwargs = { 'group_name': group_name, 'tag': execution_tag, 'disable_sysinfo': parser.options.disable_sysinfo } if parser.options.parent_job_id: kwargs['parent_job_id'] = int(parser.options.parent_job_id) if control_filename: kwargs['control_filename'] = control_filename if host_attributes: kwargs['host_attributes'] = host_attributes kwargs['in_lab'] = in_lab job = server_job.server_job(control, parser.args[1:], results, label, user, machines, client, parse_job, ssh_user, ssh_port, ssh_pass, ssh_verbosity_flag, ssh_options, test_retry, **kwargs) job.logging.start_logging() job.init_parser() # perform checks job.precheck() # run the job exit_code = 0 auto_start_servod = _CONFIG.get_config_value('AUTOSERV', 'auto_start_servod', type=bool, default=False) site_utils.SetupTsMonGlobalState('autoserv', indirect=False, auto_flush=False, short_lived=True) try: try: if repair: if auto_start_servod and len(machines) == 1: _start_servod(machines[0]) job.repair(job_labels) elif verify: job.verify(job_labels) elif provision: job.provision(job_labels) elif reset: job.reset(job_labels) elif cleanup: job.cleanup(job_labels) else: if auto_start_servod and len(machines) == 1: _start_servod(machines[0]) if use_ssp: try: _run_with_ssp(job, container_id, job_or_task_id, results, parser, ssp_url, job_folder, machines) finally: # Update the ownership of files in result folder. correct_results_folder_permission(results) else: if collect_crashinfo: # Update the ownership of files in result folder. If the # job to collect crashinfo was running inside container # (SSP) and crashed before correcting folder permission, # the result folder might have wrong permission setting. try: correct_results_folder_permission(results) except: # Ignore any error as the user may not have root # permission to run sudo command. pass metric_name = ('chromeos/autotest/experimental/' 'autoserv_job_run_duration') f = { 'in_container': utils.is_in_container(), 'success': False } with metrics.SecondsTimer(metric_name, fields=f) as c: job.run(install_before, install_after, verify_job_repo_url=verify_job_repo_url, only_collect_crashinfo=collect_crashinfo, skip_crash_collection=skip_crash_collection, job_labels=job_labels, use_packaging=(not no_use_packaging)) c['success'] = True finally: job.close() # Special task doesn't run parse, so result summary needs to be # built here. if results and (repair or verify or reset or cleanup or provision): # Throttle the result on the server side. try: result_utils.execute( results, control_data.DEFAULT_MAX_RESULT_SIZE_KB) except: logging.exception( 'Non-critical failure: Failed to throttle results ' 'in directory %s.', results) # Build result view and report metrics for result sizes. site_utils.collect_result_sizes(results) except: exit_code = 1 traceback.print_exc() finally: metrics.Flush() if pid_file_manager: pid_file_manager.num_tests_failed = job.num_tests_failed pid_file_manager.close_file(exit_code) job.cleanup_parser() sys.exit(exit_code)
class MysqlStatsd(): """Main program class""" opt = None config = None def __init__(self): """Program entry point""" op = argparse.ArgumentParser() op.add_argument("-c", "--config", dest="file", default="/etc/mysql-statsd.conf", help="Configuration file") op.add_argument("-d", "--debug", dest="debug", help="Debug mode", default=False, action="store_true") # TODO switch the default to True, and make it fork by default in init script. op.add_argument("-f", "--foreground", dest="foreground", help="Dont fork main program", default=False, action="store_true") opt = op.parse_args() self.get_config(opt.file) logfile = self.config.get('daemon').get('logfile', '/tmp/daemon.log') if not opt.foreground: self.daemonize(stdin='/dev/null', stdout=logfile, stderr=logfile) # Set up queue self.queue = Queue.Queue() # split off config for each thread mysql_config = dict(mysql=self.config['mysql']) mysql_config['metrics'] = self.config['metrics'] statsd_config = self.config['statsd'] # Spawn MySQL polling thread mysql_thread = ThreadMySQL(queue=self.queue, **mysql_config) # t1 = ThreadMySQL(config=self.config, queue=self.queue) # Spawn Statsd flushing thread statsd_thread = ThreadStatsd(queue=self.queue, **statsd_config) # Get thread manager tm = ThreadManager(threads=[mysql_thread, statsd_thread]) tm.run() def get_config(self, config_file): cnf = ConfigParser() cnf.read(config_file)[0] self.config = {} for section in cnf.sections(): self.config[section] = {} for key, value in cnf.items(section): self.config[section][key] = value return self.config def daemonize(self, stdin='/dev/null', stdout='/dev/null', stderr='/dev/null'): '''This forks the current process into a daemon. The stdin, stdout, and stderr arguments are file names that will be opened and be used to replace the standard file descriptors in sys.stdin, sys.stdout, and sys.stderr. These arguments are optional and default to /dev/null. Note that stderr is opened unbuffered, so if it shares a file with stdout then interleaved output may not appear in the order that you expect. ''' # Do first fork. try: pid = os.fork() if pid > 0: sys.exit(0) # Exit first parent. except OSError, e: sys.stderr.write("fork #1 failed: (%d) %s\n" % (e.errno, e.strerror)) sys.exit(1) # Decouple from parent environment. # TODO: do we need to change to '/' or can we chdir to wherever __file__ is? os.chdir("/") os.umask(0) os.setsid() # Do second fork. try: pid = os.fork() if pid > 0: f = open(self.config.get('daemon').get('pidfile', '/var/run/mysql_statsd.pid'), 'w') f.write(str(pid)) f.close() sys.exit(0) # Exit second parent. except OSError, e: sys.stderr.write("fork #2 failed: (%d) %s\n" % (e.errno, e.strerror)) sys.exit(1)
def main(): options, method = parse_options() log.init(options.logfile, options.loglevel) if not options.dump and options.daemonize: if os.fork() > 0: os._exit(0) os.chdir("/") os.setsid() if os.fork() > 0: os._exit(0) log.init_stdio() try: config = coil.parse(DEFAULT_CONFIG) if method.defaults: if isinstance(method.defaults, str): config.merge(coil.parse(method.defaults)) else: config.merge(coil.struct.Struct(method.defaults)) if options.config: config.merge(coil.parse_file(options.config)) except coil.errors.CoilError, ex: log.error("Error parsing config: %s" % ex) sys.exit(1)