def run_bjail(args: Namespace) -> None: instance_name = args.instance_name try: run_instace( instance_name, args.args_to_instance, args.wait, args.dry_run, args.debug_bwrap_args, args.debug_shell, args.debug_log_dbus, args.debug_helper_script, ) except Exception: from os import isatty from sys import stderr if not isatty(stderr.fileno()): from subprocess import run as subprocess_run from traceback import format_exc try: subprocess_run(('notify-send', '--urgency', 'critical', '--icon', 'bubblejail-config', f"Failed to run instance: {instance_name}", f"Exception: {format_exc(0)}")) except FileNotFoundError: # Make notify-send optional ... raise
def __call__(self, path): path = expanduser(path) if path == '/dev/stdout' and 'a' in self.mode: self.mode = self.mode.replace('a', 'w') if path == '-': if 'r' in self.mode: file_obj = stdin.buffer if 'b' in self.mode else stdin else: file_obj = fdopen(dup(stdout.fileno()), 'wb' if 'b' in self.mode else 'w') dup2(stderr.fileno(), stdout.fileno()) return file_obj elif path[-3:] != '.gz': file_obj = open(path, self.mode) else: file_obj = gzip.open(path, { 'r': 'rt', 'a': 'at' }.get(self.mode, self.mode)) file_obj.appending_to_file = bool(exists(path) and getsize(path)) return file_obj
def daemonize(self): """ Forks the process(es) from the controlling terminal and redirects I/O streams for logging. """ self.fork() chdir(getcwd()) setsid() umask(0) self.fork() stdout.flush() stderr.flush() si= file(self.stdin, 'w+') so= file(self.stdout, 'a+') se= file(self.stderr, 'a+', 0) dup2(si.fileno(), stdin.fileno()) dup2(so.fileno(), stdout.fileno()) dup2(se.fileno(), stderr.fileno()) register(self.del_pid) self.set_pid()
def run(self): """运行守护进程""" pid = os.fork() if pid > 0: exit(0) os.setsid() # 子进程创建新会话 os.chdir("/home/dnt") # 改变当前工作目录 os.umask(0) # 获取777权限 # 5. 关闭文件描述符 os.close(stdin.fileno()) os.close(stdout.fileno()) os.close(stderr.fileno()) # 【必须】6. 自己的逻辑代码 # 捕捉设置的定时器 signal.signal(signal.SIGALRM, self.heartbeat) # 第一次2s后执行,以后5s执行一次 signal.setitimer(signal.ITIMER_REAL, 2, 5) self.write_log("[%s]daeman running" % time.strftime("%Y-%m-%d %X")) self.write_log("p_name:%s,p_script:%s" % (self.p_name, self.p_script)) while True: time.sleep(5) # 不用担心影响signal(优先级别高)
def __init__(self, device, baudrate=115200, logfile=None, debug=False, twefirm=None, no_color=False, no_term=False): self._termstates = [] if platform != 'win32' and stdout.isatty(): self._termstates = [(fd, termios.tcgetattr(fd)) for fd in (stdin.fileno(), stdout.fileno(), stderr.fileno())] self._device = device self._baudrate = baudrate self._logfile = logfile self._port = self._open_port(self._device, self._baudrate, self._logfile, debug) self._resume = False self._debug = debug self._twefirm = twefirm self._twecmd = False self._tweformat = TWEDict.format_none self._twefmt_console = FmtAscii() self._twefmt_serail = None self._no_term = no_term TWELogger.__init__(self, no_color=no_color) if twefirm is not '': self._port.udev.set_baudrate(38400) self.tweprogram(twefirm)
def _daemonize(self): try: pid = os.fork() if pid > 0: exit() except OSError as e: error(_('Error entering daemon mode: %s') % e.strerror) exit() os.chdir('/') os.setsid() os.umask(0) stdout.flush() stderr.flush() si = open(os.devnull, 'r') so = open(os.devnull, 'a+') se = open(os.devnull, 'a+') os.dup2(si.fileno(), stdin.fileno()) os.dup2(so.fileno(), stdout.fileno()) os.dup2(se.fileno(), stderr.fileno()) on_exit(self._quit) old_log = getLogger() if old_log.handlers: for handler in old_log.handlers: old_log.removeHandler(handler) log(filename=self.logfile, level=self.loglevel, format='%(asctime)s %(levelname)-8s %(message)s') self._set_pid()
def get_terminal_size_posix(): def ioctl_GWINSZ(fd): try: import fcntl import termios cr = struct.unpack('hh', fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234')) return cr except: pass cr = ioctl_GWINSZ(stdin.fileno()) or \ ioctl_GWINSZ(stdout.fileno()) or \ ioctl_GWINSZ(stderr.fileno()) if not cr: try: with os.open(os.ctermid(), os.O_RDONLY) as fd: cr = ioctl_GWINSZ(fd) except: pass if not cr: try: cr = (os.environ['LINES'], os.environ['COLUMNS']) except: pass if not cr: raise TerminalError('cannot determine terminal size from POSIX') return int(cr[1]), int(cr[0])
def _pipeline(commands, env, fd_in, fd_out, fd_err): """Run a series of commands connected by their stdout/stdin.""" pids = [] first = True for i, command in enumerate(commands): last = i == len(commands) - 1 # If there are more commands upcoming then we need to set up a pipe. if not last: fd_in_new, fd_out_new = pipe2(O_CLOEXEC) pids += [fork()] child = pids[-1] == 0 if child: if not first: # Establish communication channel with previous process. dup2(fd_in_old, stdin_.fileno()) close_(fd_in_old) close_(fd_out_old) else: dup2(fd_in, stdin_.fileno()) if not last: # Establish communication channel with next process. close_(fd_in_new) dup2(fd_out_new, stdout_.fileno()) close_(fd_out_new) else: dup2(fd_out, stdout_.fileno()) # Stderr is redirected for all commands in the pipeline because each # process' output should be rerouted and stderr is not affected by # the pipe between the processes in any way. dup2(fd_err, stderr_.fileno()) _exec(*command, env=env) # This statement should never be reached: either exec fails in # which case a Python exception should be raised or the program is # started in which case this process' image is overwritten anyway. # Keep it to be absolutely safe. _exit(-1) else: if not first: close_(fd_in_old) close_(fd_out_old) else: first = False # If there are further commands then update the "old" pipe file # descriptors for future reference. if not last: fd_in_old = fd_in_new fd_out_old = fd_out_new return pids
def daemonize(self): try: pid = fork() if pid > 0: # exit first parent _exit(0) except OSError as e: stderr.write( "fork #1 failed: {0:d} ({1:s})\n".format( e.errno, str(e) ) ) raise SystemExit(1) # decouple from parent environment chdir(self.path) setsid() umask(0) # do second fork try: pid = fork() if pid > 0: # exit from second parent _exit(0) except OSError as e: stderr.write( "fork #2 failed: {0:d} ({1:s})\n".format( e.errno, str(e) ) ) raise SystemExit(1) # redirect standard file descriptors stdout.flush() stderr.flush() maxfd = getrlimit(RLIMIT_NOFILE)[1] if maxfd == RLIM_INFINITY: maxfd = 2048 closerange(0, maxfd) si = open(self.stdin, "r") so = open(self.stdout, "a+") se = open(self.stderr, "a+") dup2(si.fileno(), stdin.fileno()) dup2(so.fileno(), stdout.fileno()) dup2(se.fileno(), stderr.fileno()) self.fire(writepid()) self.fire(daemonized(self))
def __init__(self, device, baudrate=115200, logfile=None, debug=False): self._termstates = [(fd, termios.tcgetattr(fd)) for fd in (stdin.fileno(), stdout.fileno(), stderr.fileno())] self._device = device self._baudrate = baudrate self._logfile = logfile self._port = self._open_port(self._device, self._baudrate, self._logfile, debug) self._resume = False self._debug = debug
def __enter__(self): self._stderr_fd = stderr.fileno() self._destination_file = open(self._destination, "w") self._destination_fd = self._destination_file.fileno() # Create a second file descriptor that also points to stderr self._memento_fd = dup(self._stderr_fd) # Redirect stderr so it points towards the given destination dup2(self._destination_fd, self._stderr_fd) return self
def redirect_file_descriptors( daemon_stdin='/dev/null', daemon_stdout='/dev/null', daemon_stderr='/dev/null', ): for file_descriptor in [stdout, stderr]: file_descriptor.flush() new_stdin = open(daemon_stdin, 'r') new_stdout = open(daemon_stdout, 'a+') new_stderr = open(daemon_stderr, 'a+') dup2(new_stdin.fileno(), stdin.fileno()) dup2(new_stdout.fileno(), stdout.fileno()) dup2(new_stderr.fileno(), stderr.fileno())
def daemonize(self): if fork(): exit(0) umask(0) setsid() if fork(): exit(0) stdout.flush() stderr.flush() si = file('/dev/null', 'r') so = file('/dev/null', 'a+') se = file('/dev/null', 'a+', 0) dup2(si.fileno(), stdin.fileno()) dup2(so.fileno(), stdout.fileno()) dup2(se.fileno(), stderr.fileno())
def __init__(self, device, baudrate=None, parity=None, rtscts=False, debug=False): self._termstates = [] if not MSWIN and stdout.isatty(): self._termstates = [(fd, tcgetattr(fd)) for fd in (stdin.fileno(), stdout.fileno(), stderr.fileno())] self._device = device self._baudrate = baudrate or self.DEFAULT_BAUDRATE self._port = self._open_port(self._device, self._baudrate, parity, rtscts, debug) self._resume = False self._silent = False self._rxq = deque() self._rxe = Event() self._debug = debug register(self._cleanup)
def __init__(self, device, baudrate=None, parity=None, rtscts=False, debug=False): self._termstates = [] if not mswin and stdout.isatty(): self._termstates = [(fd, tcgetattr(fd)) for fd in (stdin.fileno(), stdout.fileno(), stderr.fileno())] self._device = device self._baudrate = baudrate or self.DEFAULT_BAUDRATE self._port = self._open_port(self._device, self._baudrate, parity, rtscts, debug) self._resume = False self._silent = False self._rxq = deque() self._rxe = Event() self._debug = debug register(self._cleanup)
def daemonize(): from os import fork, chdir, setsid, umask, getpid, dup2 from sys import stdout, stderr, stdin, exit if fork(): exit(0) chdir("/") setsid() umask(0) if fork(): exit(0) stdout.flush() stderr.flush() n1 = open('/dev/null', 'r') n2 = open('/dev/null', 'w') dup2(n1.fileno(), stdin.fileno()) dup2(n2.fileno(), stdout.fileno()) dup2(n2.fileno(), stderr.fileno()) return getpid()
def connect_over_serial(url, baudrate): from term import getkey from sys import platform, stdin, stdout, stderr MSWIN = platform == 'win32' if not MSWIN: from termios import TCSANOW, tcgetattr, tcsetattr if not MSWIN and stdout.isatty(): termstates = [(fd, tcgetattr(fd)) for fd in (stdin.fileno(), stdout.fileno(), stderr.fileno())] from pyftdi.serialext import serial_for_url try: port = serial_for_url(url, baudrate=baudrate) except SerialException as e: print("Uh-oh:", e) from pyftdi.ftdi import Ftdi Ftdi().open_from_url('ftdi:///?') sys.exit(1) print("Connected.") try: while True: try: c = getkey(False) if MSWIN and ord(c) == 3: raise KeyboardInterrupt() stdout.write(c.decode('utf8', errors='replace')) stdout.flush() port.write(c) except KeyboardInterrupt: port.close() print("kbai") break finally: for fd, att in termstates: tcsetattr(fd, TCSANOW, att)
def init(self, fullterm: bool) -> None: """Internal terminal initialization function""" if not self.IS_MSWIN: if stdout.isatty(): self._termstates = [(fd, tcgetattr(fd)) for fd in (stdin.fileno(), stdout.fileno(), stderr.fileno())] tfd = stdin.fileno() new = tcgetattr(tfd) new[3] = new[3] & ~ICANON & ~ECHO new[6][VMIN] = 1 new[6][VTIME] = 0 if fullterm: new[6][VINTR] = 0 new[6][VSUSP] = 0 tcsetattr(tfd, TCSANOW, new) else: # Windows black magic # https://stackoverflow.com/questions/12492810 call('', shell=True)
def doFork(): if os.fork(): exit(0) os.umask(0) os.setsid() if os.fork(): exit(0) stdout.flush() stderr.flush() si = os.open('/dev/null') so = os.open('/dev/null','w') se = os.open('/dev/null','w') os.dup2(si.fileno(),stdin.fileno()) os.dup2(so.fileno(),stdout.fileno()) os.dup2(se.fileno(),stderr.fileno()) # drop privs # setgid(getgrnam(group).gr_gid) # setuid(getpwnam(user).pw_uid) os.chdir('/')
def doFork(): if os.fork(): exit(0) os.umask(0) os.setsid() if os.fork(): exit(0) stdout.flush() stderr.flush() si = os.open('/dev/null') so = os.open('/dev/null', 'w') se = os.open('/dev/null', 'w') os.dup2(si.fileno(), stdin.fileno()) os.dup2(so.fileno(), stdout.fileno()) os.dup2(se.fileno(), stderr.fileno()) # drop privs # setgid(getgrnam(group).gr_gid) # setuid(getpwnam(user).pw_uid) os.chdir('/')
async def main(): global w w = getenv('COLUMNS', get_terminal_size(stderr.fileno()).columns) params = { # 'start': Date(2003,1,1), # 'end': Date(2004,1,1), # 'scope': Scope.COUNTRY, } filename = 'elections.jsonseq' async with Session(connections=100) as session: if not exists(filename): with open(filename, 'w', encoding='utf-8') as fp: async for e in Election.search(session, **params): dump([e.tojson()], fp, flush=True, ensure_ascii=False, indent=2) with open(filename, 'r') as fp: els = list(Election.fromjson(obj) for obj in load(fp)) seed(57) shuffle(els) await collect_types(session, els)
def daemonize(self): ''' Forks then sets up the I/O stream for the daemon ''' self.fork() chdir(getcwd()) setsid() umask(0) self.fork() stdout.flush() stderr.flush() si= file(self.stdin, 'w+') so= file(self.stdout, 'a+') se= file(self.stderr, 'a+', 0) dup2(si.fileno(), stdin.fileno()) dup2(so.fileno(), stdout.fileno()) dup2(se.fileno(), stderr.fileno()) register(self.delPID) self.setPID()
# daemonize if not options.foreground: if fork(): exit(0) umask(0) setsid() if fork(): exit(0) stdout.flush() stderr.flush() si = file("/dev/null", "r") so = file("/dev/null", "a+") se = file("/dev/null", "a+", 0) dup2(si.fileno(), stdin.fileno()) dup2(so.fileno(), stdout.fileno()) dup2(se.fileno(), stderr.fileno()) # our list of codes codes = [] script_path = path.abspath(path.dirname(__file__)) # open codes file try: f = open(script_path + "/rfid_codes.txt", "r") for line in f: codes.append(line.split("#")[0].strip()) f.close() except: print ("Could Find Code File") exit()
def unix_startup(config, user=None, debug=False): """ Unix specific startup actions """ global pidfile if user: try: userpw = getpwnam(user) setegid(userpw[3]) seteuid(userpw[2]) except: t, val, tb = exc_info() del t, tb print "Cannot swith to user", user, str(val) sys_exit(-2) else: user = getpwuid(getuid())[0] try: pidfile = config.get("global", "pidfile") except: LOG(E_ALWAYS, "[Main] Missing pidfile in config") do_shutdown(-4) locked = 1 try: pid = int(open(pidfile).read().strip()) LOG(E_TRACE, "[Main] Lock: Sending signal to the process") try: kill(pid, 0) LOG(E_ERR, "[Main] Stale Lockfile: Process is alive") except: LOG(E_ERR, "[Main] Stale Lockfile: Old process is not alive") locked = 0 except: locked = 0 if locked: LOG(E_ALWAYS, "[Main] Unable to start Netfarm Archiver, another instance is running") do_shutdown(-5) ## Daemonize - Unix only - win32 has service if not debug: try: pid = fork() except: t, val, tb = exc_info() del t print "Cannot go in background mode", str(val) if pid: sys_exit(0) chdir("/") null = open("/dev/null", "r") close(stdin.fileno()) dup(null.fileno()) null.close() close(stdout.fileno()) dup(LOG.fileno()) close(stderr.fileno()) dup(LOG.fileno()) ## Save my process id to file mypid = str(getpid()) try: open(pidfile, "w").write(mypid) except: LOG(E_ALWAYS, "[Main] Pidfile is not writable") do_shutdown(-6) return user, mypid
def run_job(req, path, fn, singleton=None): pipe_out, pipe_in = os.pipe() pid = os.fork() if pid > 0: # parent process os.close(pipe_in) # don't want write to pipe _pids.append(pid) req.log_error("Job crated with pid %d" % pid, state.LOG_NOTICE) with os.fdopen(pipe_out, 'r') as pipe: status = pipe.read(3) # wait for job init return pid, (status == 'ACK') # end of parent process # close all descriptor insead of out, err, and pipe_in out, err = (stdout.fileno(), stderr.fileno()) for i in xrange(0, 500): if i == out or i == err or i == pipe_in: continue try: os.close(i) except OSError: pass # endfor # reset log_error and logger function # TODO: copy level setting from poorwsgi def log_error(message, level): if level[0] < 5: stderr.write("[job: %d] <%s> %s\n" % (os.getpid(), level[1], message)) else: stdout.write("[job: %d] <%s> %s\n" % (os.getpid(), level[1], message)) def log_info(msg): log_error(msg, state.LOG_INFO) req.log_error = log_error req.log_info = log_info # create job record and return status to master process job = Job(path=path, singleton=singleton) job.pid = os.getpid() job.login_id = req.login.id if req.login else 0 with os.fdopen(pipe_in, 'w') as pipe: log_info('job add..') if job.add(req) is None: pipe.write('ERR') exit(1) # process failed else: pipe.write('ACK') # endwith try: fn(req, job) except: exc_type, exc_value, exc_traceback = exc_info() traceback = format_exception(exc_type, exc_value, exc_traceback) traceback = ''.join(traceback) req.log_error(traceback, state.LOG_ERR) exit(1) finally: job.delete(req) exit(0)
def _spring(commands, env, fds): """Execute a series of commands and accumulate their output to a single destination. Due to the nature of springs control flow here is a bit tricky. We want to execute the first set of commands in a serial manner. However, we need to get the remaining processes running in order to not stall everything (because nobody consumes any of the output). Furthermore, we need to poll for incoming data to be processed. That in turn is a process that must not block. Last but not least, because the first set of commands runs in a serial manner, we need to wait for each process to finish, which might be done with an error code. In such a case we return early but still let the _wait function handle the error propagation. """ def pollData(poller): """Poll for new data.""" # The poller might become exhausted here under certain # circumstances. We do not care, it will always quit with an # StopIteration exception which we kindly ignore. try: next(poller) except StopIteration: pass assert len(commands) > 0, commands assert len(commands[0]) > 0, commands assert isinstance(commands[0][0], list), commands pids = [] first = True status = 0 failed = None poller = None fd_in = fds.stdin() fd_out = fds.stdout() fd_err = fds.stderr() # A spring consists of a number of commands executed in a serial # fashion with their output accumulated to a single destination and a # (possibly empty) pipeline that processes the output of the spring. spring_cmds = commands[0] pipe_cmds = commands[1:] pipe_len = len(pipe_cmds) # We need a pipe to connect the spring's output with the pipeline's # input, if there is a pipeline following the spring. if pipe_cmds: fd_in_new, fd_out_new = pipe2(O_CLOEXEC) else: fd_in_new = fd_in fd_out_new = fd_out for i, command in enumerate(spring_cmds): last = i == len(spring_cmds) - 1 pid = fork() child = pid == 0 if child: dup2(fd_in, stdin_.fileno()) dup2(fd_out_new, stdout_.fileno()) dup2(fd_err, stderr_.fileno()) if pipe_cmds: close_(fd_in_new) close_(fd_out_new) _exec(*command, env=env) _exit(-1) else: # After we started the first command from the spring we need to # make sure that there is a consumer of the output data. If there # were none, the new process could potentially block forever # trying to write data. To that end, start the remaining commands # in the form of a pipeline. if first: if pipe_cmds: pids += _pipeline(pipe_cmds, env, fd_in_new, fd_out, fd_err) first = False # The pipeline could still be stalled at some point if there is no # final consumer of the data. We are required here to poll for # data in order to prevent starvation. if not poller: poller = fds.poll() else: pollData(poller) if not last: status = _waitpid(pid) if status != 0: # One command failed. Do not start any more commands and # indicate failure to the caller. He may try reading data from # stderr (if any and if reading from it is enabled) and will # raise an exception. failed = formatCommands(command) break else: # If we reached the last command in the spring we can just have # it run in background and wait for it to finish later on -- no # more serialization is required at that point. # We insert the pid just before the pids for the pipeline. The # pipeline is started early but it runs the longest (because it # processes the output of the spring) and we must keep this # order in the pid list. pids[-pipe_len:-pipe_len] = [pid] if pipe_cmds: close_(fd_in_new) close_(fd_out_new) assert poller return pids, poller, status, failed
from images import * from mrc import * from tasks import * # TODO: # Verify final step # Make water-level optional and automatically found # Allow training mask to be supplied instead of training model import os from sys import stdout, stderr, argv, exit if hasattr(stdout, 'fileno'): try: stdout = os.fdopen(stdout.fileno(), 'w', 0) stderr = os.fdopen(stderr.fileno(), 'w', 0) except: pass def imodmop_cmd(args, model, in_mrc, out_mrc, contract = 0): args = ['imodmop'] + list(args) if contract != 0: args.append('-pad') args.append(-contract) args.append(model) args.append(in_mrc) args.append(out_mrc) return args def create_color_mask_cmd(model, in_mrc, out_mrc, contract = 0, extra_args = ()): return imodmop_cmd(['-color', '-mask', '255'] + list(extra_args), model, in_mrc, out_mrc, contract)
def read_settings(): argparser = ArgumentParser(description='Online Machine Simulation Tool') argparser.add_argument('--machine', type=str, required=True, metavar='M', help='Machine name M to run') argparser.add_argument('--step', type=int, required=True, metavar='S', help='Step number to retrieve'), argparser.add_argument('--config', default='config/general.conf', type=str, metavar='FILE', help='Config file to load [default: %(default)s]') argparser.add_argument( '--text', default=stdout.fileno(), type=int, metavar='FD', help='Text output file descriptor [default: stdout]') argparser.add_argument( '--error', default=stderr.fileno(), type=int, metavar='FD', help='Error output file descriptor [default: stderr]') argparser.add_argument( '--warn', default=stderr.fileno(), type=int, metavar='FD', help='Warning output file descriptor [default: stderr]') argparser.add_argument( '--result', default=7, type=int, metavar='FD', help='Result output file descriptor [default: %(default)s]') argparser.add_argument( '--sim-log', default=os.devnull, type=str, metavar='FILE', help='Dump simulation log to FILE [default: %(default)s]') argparser.add_argument( 'files', metavar='FILES', nargs='+', help='Assembly/machine program files to compile and run') args = argparser.parse_args() configparser = ConfigParser() configparser.read(args.config) SETTINGS['machine'] = args.machine SETTINGS['step'] = args.step SETTINGS['config'] = args.config SETTINGS['text'] = os.fdopen(args.text, 'w') SETTINGS['error'] = os.fdopen(args.error, 'w') SETTINGS['warn'] = os.fdopen(args.warn, 'w') SETTINGS['files'] = args.files try: SETTINGS['result'] = os.fdopen(args.result, 'w') except OSError: SETTINGS['result'] = SETTINGS['text'] SETTINGS['sim_log'] = args.sim_log SETTINGS['assemblers'] = configparser.get('directories', 'assemblers') SETTINGS['simulators'] = configparser.get('directories', 'simulators') SETTINGS['machines'] = configparser.get('directories', 'machines') SETTINGS['definitions'] = configparser.get('directories', 'definitions') SETTINGS['templates'] = configparser.get('directories', 'templates') SETTINGS['images'] = configparser.get('directories', 'images') SETTINGS['def_ext'] = '.' + configparser.get('extensions', 'def_ext') SETTINGS['logger'] = Logger(SETTINGS)
def unix_startup(config, user=None, debug=False): """ Unix specific startup actions """ global pidfile if user: try: userpw = getpwnam(user) setegid(userpw[3]) seteuid(userpw[2]) except: t, val, tb = exc_info() del t, tb print 'Cannot swith to user', user, str(val) sys_exit(-2) else: user = getpwuid(getuid())[0] try: pidfile = config.get('global', 'pidfile') except: LOG(E_ALWAYS, '[Main] Missing pidfile in config') do_shutdown(-4) locked = 1 try: pid = int(open(pidfile).read().strip()) LOG(E_TRACE, '[Main] Lock: Sending signal to the process') try: kill(pid, 0) LOG(E_ERR, '[Main] Stale Lockfile: Process is alive') except: LOG(E_ERR, '[Main] Stale Lockfile: Old process is not alive') locked = 0 except: locked = 0 if locked: LOG( E_ALWAYS, '[Main] Unable to start Netfarm Archiver, another instance is running' ) do_shutdown(-5) ## Daemonize - Unix only - win32 has service if not debug: try: pid = fork() except: t, val, tb = exc_info() del t print 'Cannot go in background mode', str(val) if pid: sys_exit(0) chdir('/') null = open('/dev/null', 'r') close(stdin.fileno()) dup(null.fileno()) null.close() close(stdout.fileno()) dup(LOG.fileno()) close(stderr.fileno()) dup(LOG.fileno()) ## Save my process id to file mypid = str(getpid()) try: open(pidfile, 'w').write(mypid) except: LOG(E_ALWAYS, '[Main] Pidfile is not writable') do_shutdown(-6) return user, mypid
lg = getLogger(__name__) accounts = [] with open('100 Gmail.csv') as f: rd = DictReader(f) for i in rd: i['name'] = i['Email'].split('@')[0] accounts.append(i) #If running headless, create new virtual display if headless: from pyvirtualdisplay import Display from sys import stderr from os import devnull, dup, dup2 saved = dup(stderr.fileno()) with open(devnull, 'w') as null: dup2(null.fileno(), stderr.fileno()) display = Display(visible=0, size=(1366, 768)) display.start() dup2(saved, stderr.fileno()) # Restore back class gmail(Thread): def __init__(self, account): name = account['name'] super().__init__(name=name) # Thread __init__
def init_shell(s, shell): os.dup2(s.fileno(), stdin.fileno()) os.dup2(s.fileno(), stdout.fileno()) os.dup2(s.fileno(), stderr.fileno()) return subprocess.call([shell])
num /= 1024.0 return "%.1f%s%s" % (num, 'Yi', suffix) if __name__ == "__main__": if len(argv) < 3: print(f'usage: {argv[0]} <path> <key> [key key ... key]') exit(1) a = Archive(argv[1]) keys = argv[2:] if len(argv) > 3 else argv[2] size = 0 spinner = spinning_cursor() with fdopen(stdout.fileno(), "wb", closefd=False) as out, fdopen(stderr.fileno(), "wb", closefd=False) as err: with a.serialize(keys, buffer_size=1024 * 1024) as tarout: b = tarout.read(1024 * 1024) while b != b'': size += len(b) #err.write(f'got {len(b)} bytes\n'.encode('utf-8')) err.write( f'\rStreaming TAR-file ... ({sizeof_fmt(size)}) {next(spinner)} ' .encode('utf-8')) err.flush() out.write(b) b = tarout.read(1024 * 1024) #for b in a.serialize(keys, buffer_size=1024*1024, iter_content=True):
#GPIO.setwarnings(False) # daemonize if not options.foreground: if fork(): exit(0) umask(0) setsid() if fork(): exit(0) stdout.flush() stderr.flush() si = file('/dev/null', 'r') so = file('/dev/null', 'a+') se = file('/dev/null', 'a+', 0) dup2(si.fileno(), stdin.fileno()) dup2(so.fileno(), stdout.fileno()) dup2(se.fileno(), stderr.fileno()) #our list of codes codes = [] script_path = path.abspath(path.dirname(__file__)) #open codes file try: f = open(script_path+'/rfid_codes.txt','r') for line in f: codes.append(line.split("#")[0].strip()) f.close() except: print("Could Find Code File") exit()
def main(): sleep(30) s = server(ThreadPoolExecutor(max_workers=5)) add_YTestServicer_to_server(YTestServer(), s) s.add_insecure_port('[::]:%s' % ConfigParser().read('config.ini')['Network']['port']) s.start() s.wait_for_termination() if __name__ == '__main__': pid = fork() if pid: exit(0) chdir('/') umask(0) setsid() pid = fork() if pid: exit(0) stdout.flush() stderr.flush() with open('/dev/null') as read_null, open('/dev/null', 'w') as write_null: dup2(read_null.fileno(), stdin.fileno()) dup2(write_null.fileno(), stdout.fileno()) dup2(write_null.fileno(), stderr.fileno()) main()