def exec_cleanup(): global cleanuppid # no os.SEEK_SET on RHEL4 os.lseek(clfd, 0, 0) filename = os.read(clfd, 1024).strip() # no cleanup if not filename: debug('no cleanup set') return if not os.path.isfile(filename) or not os.access(filename, os.X_OK): debug('cleanup file not found / not executable, skipping') return signal.signal(signal.SIGINT, cleanup_interrupt) cleanuppid = os.fork() if cleanuppid == 0: os.setpgrp() debug('executing cleanup at '+filename) os.execvp(filename, [filename]) else: debug('parent waiting for cleanup '+str(cleanuppid)) while cleanuppid != 0: try: os.waitpid(cleanuppid, 0) cleanuppid = 0 except OSError, e: if e.errno == errno.EINTR: pass if e.errno == errno.ECHILD: cleanuppid = 0
def run(self, write_pipe): def recordSignal(signum, frame): print('Ignored signal SIGINT') signal.signal(signal.SIGINT, recordSignal) # avoiding a problem where signal.SIG_IGN would cause the test to never stop sys.stdin.close() redirect_fd_to_file(1, join(self.dir, "stdout"), tee=self.runner.verbose) redirect_fd_to_file(2, join(self.dir, "stderr"), tee=self.runner.verbose) os.chdir(self.run_dir or self.dir) os.setpgrp() with Timeout(self.timeout): try: self.test.run() except TimeoutException: write_pipe.send(TestRunner.TIMED_OUT) except: sys.stdout.write(traceback.format_exc() + '\n') sys.stderr.write(str(sys.exc_info()[1]) + '\n') write_pipe.send(TestRunner.FAILED) else: write_pipe.send(TestRunner.SUCCESS) finally: if self.run_dir: for file in os.listdir(self.run_dir): shutil.move(join(self.run_dir, file), join(self.dir, file)) os.rmdir(self.run_dir)
def fork_task(self, task_info): self.log_debug("Forking task %s" % self._task_str(task_info)) pid = os.fork() if pid: self.log_info("Task forked %s: pid=%s" % (self._task_str(task_info), pid)) return pid # in no circumstance should we return after the fork # nor should any exceptions propagate past here try: # set process group os.setpgrp() # set a do-nothing handler for sigusr2 # do not use signal.signal(signal.SIGUSR2, signal.SIG_IGN) - it completely masks interrups !!! signal.signal(signal.SIGUSR2, lambda *args: None) # set a default handler for SIGTERM signal.signal(signal.SIGTERM, signal.SIG_DFL) # run the task self.run_task(task_info) except Exception: self.log_critical("Error running forked task", exc_info=1) # don't bother raising since we're about to exit finally: # die os._exit(os.EX_OK)
def process_reworker(work_queue, insert_queue, stats_queue, options): global shutdown_event global finished_event try: os.setpgrp() es = connectElastic(options.es_uri) while not shutdown_event.is_set(): try: work = work_queue.get_nowait() try: entry = parse_entry(work['row'], work['header'], options) if entry is None: print("Malformed Entry") continue domainName = entry['domainName'] current_entry_raw = find_entry(es, domainName, options) if update_required(current_entry_raw, options): stats_queue.put('total') process_entry(insert_queue, stats_queue, es, entry, current_entry_raw, options) finally: work_queue.task_done() except queue.Empty as e: if finished_event.is_set(): break time.sleep(.01) except Exception as e: sys.stdout.write("Unhandled Exception: %s, %s\n" % (str(e), traceback.format_exc())) except Exception as e: sys.stdout.write("Unhandeled Exception: %s, %s\n" % (str(e), traceback.format_exc()))
def preexec_function(): """ Ignore the SIGINT signal by setting the handler to the standard signal handler SIG_IGN. This allows Cosmos to cleanly terminate jobs when there is a ctrl+c event """ os.setpgrp()
def spawn_agents(self): os.setpgrp() if os.path.exists(MultiAgent.IMAGE_PATH): rm_rf(MultiAgent.IMAGE_PATH) mkdir_p(MultiAgent.IMAGE_PATH) for index in xrange(self.agent_count): # Create config sub-dirs config_path = self.create_config(index) log_file = self.get_log_file(index) # Set up argument list args = self.argv[:] args.append("--multi-agent-id") args.append(str(index)) args.append("--config-path") args.append(config_path) args.append("--logging-file") args.append(log_file) args.append("--port") args.append(str(self.agent_port + index)) args.append("--datastores") args.append("DataStore-" + str(index).zfill(4)) args.append("--vm-network") args.append("Network-" + str(index).zfill(4)) command = '' for arg in args: command += ' ' command += arg proc = subprocess.Popen(args) self.procs.append(proc) signal.signal(signal.SIGTERM, self._signal_handler) signal.pause() self.cleanup() sys.exit(0)
def runIt(): global childPid ws = nws.client.NetWorkSpace(key, serverHost=nwssHost, serverPort=int(nwssPort)) LogI('Opened workspace %s on %s at %s.'%(key, nwssHost, nwssPort)) jobTag = 'job %s'%jobIndex cmd = ws.fetch(jobTag) LogI('Fetched job: %s.'%cmd) try: ws.deleteVar(jobTag) except: pass startTime = time.time() ecmd = ['/bin/bash', '-c', cmd] childPid = os.fork() if childPid == 0: # Create a new process group to simplify killing the child and its descendants. os.setpgrp() os.execv(ecmd[0], ecmd) LogI('Child %d started on %s at %f.'%(childPid, myHost, startTime)) childExistsLock.release() while 1: (retpid, retval) = os.waitpid(childPid, 0) if retpid: break LogI('Child %d returned on %d.'%(childPid, retval)) childExitedLock.release() ws.store('Job %s Status'%jobIndex, (retval, startTime, time.time(), 0, childPid, myHost)) ws.store('shut down %s'%jobIndex, 0) # let the sentinel know that the child is done. runItDoneLock.release()
def RunCHARMM(InputFile, OutputFile, OtherInputs = '', CHARMMBin = 'charmm', Overwrite = True): """ Runs CHARMM @todo document """ if not Overwrite and os.path.exists(OutputFile): print 'Output file', OutputFile, 'already exists. Abort.' return UnboundVars = UnboundCHARMMVariables(InputFile, OtherInputs) if len(UnboundVars) > 0: assert False, 'ERROR: Unbound variables detected: '+\ ' '.join(UnboundVars) os.setpgrp() #create new process group, become its leader try: p = Popen(CHARMMBin+' '+OtherInputs, stdin=PIPE, stdout=PIPE, stderr=STDOUT, bufsize=1, shell=True, close_fds=True) #p.stdin.write('\n'.join([open(InputFile).read(), OtherInputs])) p.stdin.write(open(InputFile).read()) p.stdin.close() o = open(OutputFile, 'w', 1) o.write(p.stdout.read()) p.stdout.close() o.close() except KeyboardInterrupt: #Kill ALL children os.killpg(0, signal.SIGKILL) exit()
def __init__(self, stream, gate): self.stream = stream self.gate = gate aj.master = False os.setpgrp() setproctitle.setproctitle( '%s worker [%s]' % ( sys.argv[0], self.gate.name ) ) set_log_params(tag=self.gate.log_tag) init_log_forwarding(self.send_log_event) logging.info( 'New worker "%s" PID %s, EUID %s, EGID %s', self.gate.name, os.getpid(), os.geteuid(), os.getegid(), ) self.context = Context(parent=aj.context) self.context.session = self.gate.session self.context.worker = self self.handler = HttpMiddlewareAggregator([ AuthenticationMiddleware.get(self.context), CentralDispatcher.get(self.context), ]) self._master_config_reloaded = Event()
def pre_exec_fn(self, i): #This is called after fork(), but before exec() #Assign this new process to a new group if self.NEW_PROCESS_GROUP: os.setpgrp() if self.LINUX_USE_PDEATHSIG: prctl.set_pdeathsig(self.get_pdeath_sig())
def start_jdb(args, pid): log("Starting jdb to unblock application.") # Give gdbserver some time to attach. time.sleep(0.5) # Do setup stuff to keep ^C in the parent from killing us. signal.signal(signal.SIGINT, signal.SIG_IGN) windows = sys.platform.startswith("win") if not windows: os.setpgrp() jdb_port = 65534 args.device.forward("tcp:{}".format(jdb_port), "jdwp:{}".format(pid)) jdb_cmd = [args.jdb_cmd, "-connect", "com.sun.jdi.SocketAttach:hostname=localhost,port={}".format(jdb_port)] flags = subprocess.CREATE_NEW_PROCESS_GROUP if windows else 0 jdb = subprocess.Popen(jdb_cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, creationflags=flags) jdb.stdin.write("exit\n") jdb.wait() log("JDB finished unblocking application.")
def _run(self): os.setpgrp() signal.signal(signal.SIGHUP, self._sig_ign) signal.signal(signal.SIGINT, self._sig_ign) signal.signal(signal.SIGTERM, self._sig_ign) self._connection_pipe = self._write_pipe self._log_ctl.disable_logging() self._log_ctl.set_connection(self._connection_pipe) result = {} try: self._cmd_cls.run() except (KeyboardInterrupt, SystemExit): pass except: log_exc_traceback() type, value, tb = sys.exc_info() data = {"Exception": "%s" % value} self._cmd_cls.set_fail(data) finally: res_data = self._cmd_cls.get_result() result["type"] = "result" result["cmd_id"] = self._id result["result"] = res_data send_data(self._write_pipe, result) self._write_pipe.close()
def become_tty_fg(self,child=True): os.setpgrp() hdlr = signal.signal(signal.SIGTTOU, signal.SIG_IGN) tty = os.open('/dev/tty', os.O_RDWR) os.tcsetpgrp(tty, os.getpgrp()) if child: signal.signal(signal.SIGTTOU, hdlr)
def forkTask(self,handler): #get the subsession before we fork newhub = self.session.subsession() session_id = newhub.sinfo['session-id'] pid = os.fork() if pid: newhub._forget() return pid, session_id #in no circumstance should we return after the fork #nor should any exceptions propagate past here try: self.session._forget() #set process group os.setpgrp() #use the subsession self.session = newhub handler.session = self.session #set a do-nothing handler for sigusr2 signal.signal(signal.SIGUSR2,lambda *args: None) self.runTask(handler) finally: #diediedie try: self.session.logout() finally: os._exit(0)
def main (argv): os.setpgrp () loadEnv (os.path.join (root, "env.common")) seahorn_args = filter (is_seahorn_opt, argv [1:]) argv = filter (is_non_seahorn_opt, argv [1:]) (opt, args) = parseOpt (argv) workdir = createWorkDir (opt.temp_dir, opt.save_temps) assert len(args) == 1 in_name = args [0] do_bc = True do_seahorn = opt.do_seahorn (in_base, in_ext) = os.path.splitext (in_name) if in_ext == '.bc' : do_bc = False if do_bc: bc = defBcName (in_name, workdir) Gcc (in_name, bc, arch=opt.machine) in_name = bc if do_seahorn: seahorn (in_name, seahorn_args, cpu=opt.cpu, mem=opt.mem) in_name = None if opt.out_name != None and in_name != None and in_name != args [0]: shutil.copy2 (in_name, opt.out_name) return 0
def say(self, text, timeout=5): for p in '"-!\'': text = text.replace(p, '') if _platform == "linux" or _platform == "linux2": # linux pass elif _platform == "darwin": # OS X subprocess.call(['say', text]) pass elif _platform == "win32": # Windows... print('windows detected') pid = os.fork() if pid == 0: os.setpgrp() new = os.fork() if new == 0: subprocess.call(['python', 'speech.py', text]) sys.exit(1) time.sleep(timeout) os.kill(-int(os.getpgrp()), signal.SIGKILL) sys.exit(1) os.waitpid(pid, 0) # time.sleep(5) # os.kill(pid, signal.SIGKILL) # os.kill(-int(os.getpgrp()), signal.SIGKILL) # self.engine.say(text) # self.engine.runAndWait() print(text) pass
def __preexec_work(self): """Function used before running a program that needs to run as a different user.""" # Put us into a separate process group so we don't get # SIGINT signals on Ctrl-C (bundy-init will shut everything down by # other means). os.setpgrp()
def _startBuild(self, job): pid = self._fork('Job %s' % job.jobId, close=True) if pid: self._buildPids[pid] = job.jobId # mark this pid for potential # killing later return job.jobId else: try: try: # we want to be able to kill this build process and # all its children with one swell foop. os.setpgrp() self.db.reopen() buildMgr = self.getBuilder(job) self._subscribeToBuild(buildMgr) # Install builder-specific signal handlers. buildMgr._installSignalHandlers() # need to reinitialize the database in the forked child # process buildCfg = job.getMainConfig() if buildCfg.jobContext: buildMgr.setJobContext(buildCfg.jobContext) # don't do anything else in here, buildAndExit has # handling for ensuring that exceptions are handled # correctly. buildMgr.buildAndExit() except Exception, err: tb = traceback.format_exc() buildMgr.logger.error('Build initialization failed: %s' % err, tb) job.exceptionOccurred(err, tb) finally: os._exit(2)
def spawn_independent(command, shell=False): """ Given a command suitable for 'Popen', open the process such that if this process is killed, the spawned process survives. `command` is either a list of strings, with the first item in the list being the executable and the rest being its arguments, or a single string containing the executable and its arguments. In the latter case, any argument that contains spaces must be delimited with double-quotes. """ if sys.platform == 'win32': if isinstance(command, STRING_BASE_CLASS): command = 'start /b ' + command else: command.insert(0, 'start') command.insert(1, '/b') Popen(command, shell=True) elif sys.platform == 'darwin': pid = os.fork() if pid: return else: os.setpgrp() if isinstance(command, STRING_BASE_CLASS): tmp = quoted_split(command) else: tmp = command os.execv(tmp[0], tmp) else: pid = os.fork() if pid: return else: os.setpgrp() Popen(command, shell=shell) sys.exit(0)
def loop_body(shared_break, shared_slots, shared_lock, i, target, args): """ Executes the given function in its own process group (on Windows: in its own process). :param shared_break: Loop-wide shared integer. Should be set to 1 if the whole loop should be terminated. :param shared_slots: Loop-wide shared array of slots to keep track of status of loop bodies. The slot corresponding to a loop body should be set to 0 once it has finished. :param shared_lock: Loop-wide shared lock. Should be notified when a loop body has finished. :param i: The index of the current slot. :param target: The function to run in parallel. :param args: The arguments that the target should run with. """ if not is_windows: os.setpgrp() try: if not target(*args): shared_break.value = 1 except Exception as e: logger.warning('', exc_info=e) shared_break.value = 1 shared_slots[i] = 0 with shared_lock: shared_lock.notify()
def install(fork=True, sig=SIGTERM): def _reg(gid): handler = make_quit_signal_handler(gid, sig) signal(SIGINT, handler) signal(SIGQUIT, handler) signal(SIGTERM, handler) signal(SIGCHLD, make_child_die_signal_handler(gid, sig)) if not fork: _reg(os.getpid()) return pid = os.fork() if pid == 0: # child process os.setpgrp() pid = os.fork() if pid != 0: # still in child process exit_when_parent_or_child_dies(sig) # grand child process continues... else: # parent process gid = pid _reg(gid) while True: pause()
def fast_read_ldif(source,timeout): # Get pipe file descriptors read_fd, write_fd = os.pipe() # Fork pid = os.fork() if pid: # Close write file descriptor as we don't need it. os.close(write_fd) read_fh = os.fdopen(read_fd) raw_ldif = read_fh.read() result = os.waitpid(pid, 0) if (result[1] > 0): return "" raw_ldif = raw_ldif.replace("\n ", "") return raw_ldif else: # Close read file d os.close(read_fd) # Set process group os.setpgrp() # Setup signal handler signal.signal(signal.SIGALRM, handler) signal.alarm(timeout) # Open pipe to LDIF if (source[:7] == 'ldap://'): url = source.split('/') host = url[2].split(':')[0] port = url[2].split(':')[1] bind = url[3] count=4 while count < len(url): bind = bind + "/" + url[count] count = count + 1 command = "ldapsearch -LLL -x -h %s -p %s -b %s 2>/dev/null" % (host, port, bind) pipe = os.popen(command) elif(source[:7] == 'file://'): pipe = open(source[7:]) else: pipe = os.popen(source) raw_ldif = pipe.read() # Close LDIF pipe pipe.close() write_fh = os.fdopen(write_fd, 'w') write_fh.write(raw_ldif) write_fh.close() signal.alarm(0) # Disable the alarm sys.exit(0)
def main(usage, get_app): parser = op.OptionParser(usage=usage, option_list=options()) opts, args = parser.parse_args() configure_logging(opts) app = get_app(parser, opts, args) workers = opts.workers or 1 if opts.debug: workers = 1 host = opts.host or '127.0.0.1' port = opts.port if port is None: if ':' in host: host, port = host.split(':', 1) port = int(port) else: port = 8000 kwargs = dict( debug=opts.debug, pidfile=opts.pidfile ) arbiter = Arbiter((host,port), workers, app, **kwargs) if opts.daemon: daemonize() else: os.setpgrp() arbiter.run()
def __init__(self, tarball): """ This method takes a tarfile.TarFile object and spawns *two* new processes: an xz process for decompression and an additional python process that simply feeds data from the TarFile to it. The latter is necessary because the file-like object we get from TarFile.extractfile cannot be passed to a subprocess directly. For that reason, one is also free to close the tarball after this object is created. """ self.__subp_pid = None self.__read_fh = None member = tarball.getmember(IMAGE_ARCNAME) compressed_image = tarball.extractfile(member) pipe_r, pipe_w = open_pipe_fileobjs() self.__subp_pid = os.fork() if self.__subp_pid == 0: os.setpgrp() pipe_r.close() self.__xz_proc = subprocess.Popen( ('xz', '-d'), stdin=subprocess.PIPE, stdout=pipe_w, close_fds=True) pipe_w.close() shutil.copyfileobj(compressed_image, self.__xz_proc.stdin) self.__xz_proc.stdin.close() self.__xz_proc.wait() os._exit(os.EX_OK) else: self.__read_fh = pipe_r
def paste_server(app, global_conf=None, host="127.0.0.1", port=None, *args, **kwargs): """ Paster server entrypoint to add to your paster ini file: [server:main] use = egg:gunicorn#main host = 127.0.0.1 port = 5000 """ options = kwargs.copy() if port and not host.startswith("unix:"): bind = "%s:%s" % (host, port) else: bind = host options['bind'] = bind if global_conf: for key, value in list(global_conf.items()): if value and value is not None: if key == "debug": value = (value == "true") options[key] = value options['default_proc_name'] = options['__file__'] conf = Config(options) arbiter = conf.arbiter(conf.address, conf.workers, app, debug=conf["debug"], pidfile=conf["pidfile"], config=conf) if conf["daemon"] : daemonize() else: os.setpgrp() configure_logging(conf) arbiter.run()
def __init__(self, output_dir): self.cmd_id = 0 self.pid_list = {} self.processes = [] self.files = [] self.output_dir = output_dir setpgrp() # creat new process group and become its leader
def run(self): os.setpgrp() unittest_args = [sys.argv[0]] if ARGS.verbose: unittest_args += ["-v"] unittest.main(argv=unittest_args)
def __init__(self): from_parent, to_worker = os.pipe() from_worker, to_parent = os.pipe() sync_from_worker, sync_to_parent = os.pipe() unix.close_on_exec(to_worker) unix.close_on_exec(from_worker) unix.close_on_exec(sync_from_worker) unix.keep_on_exec(from_parent) unix.keep_on_exec(to_parent) unix.keep_on_exec(sync_to_parent) worker_pid = os.fork() if not worker_pid: os.setpgrp() # prevent worker from receiving Ctrl-C python = sys.executable os.execvp(python, [python, '-m', 'assay.worker', str(from_parent), str(to_parent), str(sync_to_parent)]) os.close(from_parent) os.close(to_parent) os.close(sync_to_parent) self.pids = [worker_pid] self.to_worker = os.fdopen(to_worker, 'wb') self.from_worker = os.fdopen(from_worker, 'rb', BUFSIZE) self.sync_from_worker = sync_from_worker
def __init__(self, account=None, workdir=None, settings=None, memory=None, cores=None, jobLimit=None, killGroup=True, preamble=None, debug=False): self.account = account if memory == None: memory = 4194304 self.memory = int(memory) if cores == None: cores = 1 self.cores = int(cores) #self.machineName = account.split("@")[-1] self.workDir = workdir self.preamble = preamble #self._workDirBase = workDirBase #self.setWorkDir("", False) # State constants self.NOT_EXIST = "NOT_EXIST" self.NONZERO = "NONZERO" self.ZERO = "ZERO" # Batch command queue self.commands = [] self.compression = False #True self.remoteSettingsPath = settings self.cachedRemoteSettings = None self._logs = {} if jobLimit == None: jobLimit = -1 self.jobLimit = int(jobLimit) self.debug = debug self.resubmitOnlyFinished = True # Make sure local processes are killed on exit if self.account == None and killGroup: # and not UnixConnection.programGroupSet: #UnixConnection.programGroupSet = True os.setpgrp() # All child processes from subprocess should be in this group
def fork_kernel(self, config, pipe, resource_limits, logfile): """ A function to be set as the target for the new kernel processes forked in ForkingKernelManager.start_kernel. This method forks and initializes a new kernel, uses the update_function to update the kernel's namespace, sets resource limits for the kernel, and sends kernel connection information through the Pipe object. :arg IPython.config.loader config: kernel configuration :arg multiprocessing.Pipe pipe: a multiprocessing connection object which will send kernel ip, session, and port information to the other side :arg dict resource_limits: a dict with keys resource.RLIMIT_* (see config_default documentation for explanation of valid options) and values of the limit for the given resource to be set in the kernel process """ os.setpgrp() logging.basicConfig(filename=self.filename,format=str(uuid.uuid4()).split('-')[0]+': %(asctime)s %(message)s',level=logging.DEBUG) logging.debug("kernel forked; now starting and configuring") try: ka = IPKernelApp.instance(config=config, ip=config["ip"]) from namespace import InstrumentedNamespace ka.user_ns = InstrumentedNamespace() ka.initialize([]) except: logging.exception("Error initializing IPython kernel") try: if self.update_function is not None: self.update_function(ka) except: logging.exception("Error configuring up kernel") logging.debug("finished updating") for r, limit in resource_limits.iteritems(): resource.setrlimit(getattr(resource, r), (limit, limit)) pipe.send({"ip": ka.ip, "key": ka.session.key, "shell_port": ka.shell_port, "stdin_port": ka.stdin_port, "hb_port": ka.hb_port, "iopub_port": ka.iopub_port}) pipe.close() ka.start()
def setpgrp_preexec_fn (): os.setpgrp()
tornado_port = base_port + 2 webpack_port = base_port + 3 thumbor_port = base_port + 4 os.chdir(os.path.join(os.path.dirname(__file__), '..')) # Clean up stale .pyc files etc. subprocess.check_call('./tools/clean-repo') if options.clear_memcached: print("Clearing memcached ...") subprocess.check_call('./scripts/setup/flush-memcached') # Set up a new process group, so that we can later kill run{server,tornado} # and all of the processes they spawn. os.setpgrp() # Save pid of parent process to the pid file. It can be used later by # tools/stop-run-dev to kill the server without having to find the # terminal in question. if options.test: pid_file_path = os.path.join( os.path.join(os.getcwd(), 'var/casper/run_dev.pid')) else: pid_file_path = os.path.join( os.path.join(os.getcwd(), 'var/run/run_dev.pid')) # Required for compatibility python versions. if not os.path.exists(os.path.dirname(pid_file_path)): os.makedirs(os.path.dirname(pid_file_path))
def start(self, num_tolerable_ping_failures=4294967295): _get_metric_tracker().track('engine-started', value=1, send_sys_info=True) _get_metric_tracker().track('engine-started-local', value=1) arglist = [self.server_bin, self.server_addr] if (self.auth_token): arglist.append("--auth_token=%s" % self.auth_token) if self.secret_key != '': arglist.append("--secret_key=%s" % self.secret_key) arglist.append("--log_file=%s" % self.unity_log) arglist.append("--log_rotation_interval=%d" % default_local_conf.log_rotation_interval) arglist.append("--log_rotation_truncate=%d" % default_local_conf.log_rotation_truncate) self._validate_protocol_and_address() if sys.platform == 'win32': self.unity_log += ".0" # Start a local server as a child process. try: FNULL = open(os.devnull, 'w') if sys.platform == 'win32': self.proc = subprocess.Popen( arglist, env=_sys_util.make_unity_server_env(), stdin=subprocess.PIPE, stdout=FNULL, stderr=None, bufsize=-1) # preexec_fn not supported on windows else: self.proc = subprocess.Popen( arglist, env=_sys_util.make_unity_server_env(), stdin=subprocess.PIPE, stdout=FNULL, stderr=None, bufsize=-1, preexec_fn=lambda: os.setpgrp()) # do not forward signal except OSError as e: raise RuntimeError('Invalid server binary \"%s\": %s' % (self.server_bin, str(e))) except KeyError as e: raise RuntimeError(e.message) # update the default server_addr if (self.server_addr == 'default'): self.server_addr = 'ipc:///tmp/graphlab_server-%s' % ( self.proc.pid) self.logger.info('Start server at: ' + self.server_addr + " - " 'Server binary: ' + self.server_bin + " - " 'Server log: ' + self.unity_log) # try to establish a connection to the server. (client_public_key, client_secret_key) = ('', '') if (self.public_key != '' and self.secret_key != ''): (client_public_key, client_secret_key) = get_public_secret_key_pair() max_retry = 5 retry = 0 server_alive = True while retry < max_retry: retry += 1 # Make sure the server process is still alive if (self.proc.poll() is not None): server_alive = False # OK, server is alive, try create a client and connect if (server_alive): try: c = Client([], self.server_addr, num_tolerable_ping_failures, public_key=client_public_key, secret_key=client_secret_key, server_public_key=self.public_key) if self.auth_token: c.add_auth_method_token(self.auth_token) c.set_server_alive_watch_pid(self.proc.pid) c.start() # everything works, break out of the retry loop break except Exception as e: self.logger.error( 'Try connecting to server. Error: %s. Retry = %d' % (str(e), retry)) time.sleep(0.5) finally: c.stop() # Server process terminated, raise exception and get the return code else: retcode = self.proc.returncode self.proc = None self.logger.error( 'Cannot start server process. Return code: %d' % retcode) raise RuntimeError( 'Cannot start server process. Return code: %d' % retcode) if retry == max_retry: self.logger.error( 'Cannot connect to server. Exceeded max retry (%d).' % max_retry) raise RuntimeError( 'Cannot connect to server. Exceeded max retry (%d).' % max_retry) import threading def server_wait(): self.proc.wait() self.wait_thread = threading.Thread(target=server_wait) self.wait_thread.setDaemon(True) self.wait_thread.start()
def envoy_preexec_fn(): os.setpgrp() libc = ctypes.CDLL(ctypes.util.find_library('c'), use_errno=True) libc.prctl(PR_SET_PDEATHSIG, signal.SIGTERM)
def main(): parser = argparse.ArgumentParser( description= "Solve Sudoku puzzles with logic and multiprocessed backtracking.") parser.add_argument( "board", nargs="?", metavar="BOARD", help= "Serialized board, read from top left to right. Use '.' for empty cell.", type=str) parser.add_argument("-s", metavar="Board size. Must be a square number.", type=int, default=9) parser.add_argument("-p", metavar="Number of processes.", type=int, default=4) parser.add_argument("--flat", action='store_const', const=True) parser.add_argument("--debug", action='store_const', const=True) parser.add_argument("--show", help="Pretty print the board before solving.", action='store_const', const=True) cmdargs = parser.parse_args() if cmdargs.debug: print("ARGS:", cmdargs) if cmdargs.board: try: b = board_from_string(cmdargs.board, cmdargs.s) if cmdargs.show: print(b) solution = solve_sudoku(b, num_processes=cmdargs.p) if cmdargs.flat: print(solution.serialize()) else: print(solution) quit() except Exception as e: raise e if sys.platform == "linux": os.setpgrp() print("Sudoku") print("Test case:") some_boards = [ "000050040200800530510029678000004003072030950600200000125940087098003002060080000", "483921657900305001001806400008102900700000008006708200002609500800203009005010300", "000000000000000000000000000000000000000000000000000000000000000000000000000000000", "900100400007020080060000000400500200080090010003006000100700030005008900020000006", "003020600900305001001806400008102900700000008006708200002609500800203009005010300", "800000000003600000070090200050007000000045700000100030001000068008500010090000400", ] cases = [list(map(int, i)) for i in some_boards] # start = [] bsize = 9 # print(start) tb = SudokuBoard(cases[-1], bsize) print(tb) assert tb.check_partial(), "Test input failure" numthreads = 4 try: while True: numthreads = int( input("How many processes? default=4\n>>>").strip()) if numthreads > 32: input("Are you sure you want {} processes?".format(numthreads)) else: break except ValueError: pass except EOFError: quit() print(numthreads, "process(es)") ti_solve = time.time() br = backtracking.Backtracker(next_choice_func=sudoku_next_choices, candidate_matcher=sudoku_final_test, partial_checker=sudoku_partial_test, starting_guesses=[tb]) br.go(numthreads=numthreads) while br.solutions_queue.empty(): pass br.terminate() br.join() if not br.solutions_queue.empty(): print("Solution found!") print("DeltaT = {:.5f}ish".format(time.time() - ti_solve), "seconds") results = [] while not br.solutions_queue.empty(): r = br.solutions_queue.get() r.unoptimize() print(r) results.append(r) results = [i for i in results if i.check()]
def launch(self, launch_cmd, get_ip = True, qemuparams = None, extra_bootparams = None, env = None): try: self.threadsock, threadport = self.create_socket() self.server_socket, self.serverport = self.create_socket() except socket.error as msg: self.logger.error("Failed to create listening socket: %s" % msg[1]) return False bootparams = 'console=tty1 console=ttyS0,115200n8 printk.time=1' if extra_bootparams: bootparams = bootparams + ' ' + extra_bootparams # Ask QEMU to store the QEMU process PID in file, this way we don't have to parse running processes # and analyze descendents in order to determine it. if os.path.exists(self.qemu_pidfile): os.remove(self.qemu_pidfile) self.qemuparams = 'bootparams="{0}" qemuparams="-serial tcp:127.0.0.1:{1} -pidfile {2}"'.format(bootparams, threadport, self.qemu_pidfile) if qemuparams: self.qemuparams = self.qemuparams[:-1] + " " + qemuparams + " " + '\"' launch_cmd += ' tcpserial=%s %s' % (self.serverport, self.qemuparams) self.origchldhandler = signal.getsignal(signal.SIGCHLD) signal.signal(signal.SIGCHLD, self.handleSIGCHLD) self.logger.debug('launchcmd=%s'%(launch_cmd)) # FIXME: We pass in stdin=subprocess.PIPE here to work around stty # blocking at the end of the runqemu script when using this within # oe-selftest (this makes stty error out immediately). There ought # to be a proper fix but this will suffice for now. self.runqemu = subprocess.Popen(launch_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, stdin=subprocess.PIPE, preexec_fn=os.setpgrp, env=env) output = self.runqemu.stdout # # We need the preexec_fn above so that all runqemu processes can easily be killed # (by killing their process group). This presents a problem if this controlling # process itself is killed however since those processes don't notice the death # of the parent and merrily continue on. # # Rather than hack runqemu to deal with this, we add something here instead. # Basically we fork off another process which holds an open pipe to the parent # and also is setpgrp. If/when the pipe sees EOF from the parent dieing, it kills # the process group. This is like pctrl's PDEATHSIG but for a process group # rather than a single process. # r, w = os.pipe() self.monitorpid = os.fork() if self.monitorpid: os.close(r) self.monitorpipe = os.fdopen(w, "w") else: # child process os.setpgrp() os.close(w) r = os.fdopen(r) x = r.read() os.killpg(os.getpgid(self.runqemu.pid), signal.SIGTERM) sys.exit(0) self.logger.debug("runqemu started, pid is %s" % self.runqemu.pid) self.logger.debug("waiting at most %s seconds for qemu pid (%s)" % (self.runqemutime, time.strftime("%D %H:%M:%S"))) endtime = time.time() + self.runqemutime while not self.is_alive() and time.time() < endtime: if self.runqemu.poll(): if self.runqemu.returncode: # No point waiting any longer self.logger.debug('runqemu exited with code %d' % self.runqemu.returncode) self._dump_host() self.logger.debug("Output from runqemu:\n%s" % self.getOutput(output)) self.stop() return False time.sleep(0.5) if not self.is_alive(): self.logger.error("Qemu pid didn't appear in %s seconds (%s)" % (self.runqemutime, time.strftime("%D %H:%M:%S"))) # Dump all processes to help us to figure out what is going on... ps = subprocess.Popen(['ps', 'axww', '-o', 'pid,ppid,command '], stdout=subprocess.PIPE).communicate()[0] processes = ps.decode("utf-8") self.logger.debug("Running processes:\n%s" % processes) self._dump_host() op = self.getOutput(output) self.stop() if op: self.logger.error("Output from runqemu:\n%s" % op) else: self.logger.error("No output from runqemu.\n") return False # We are alive: qemu is running out = self.getOutput(output) netconf = False # network configuration is not required by default self.logger.debug("qemu started in %s seconds - qemu procces pid is %s (%s)" % (time.time() - (endtime - self.runqemutime), self.qemupid, time.strftime("%D %H:%M:%S"))) if get_ip: cmdline = '' with open('/proc/%s/cmdline' % self.qemupid) as p: cmdline = p.read() # It is needed to sanitize the data received # because is possible to have control characters cmdline = re_control_char.sub(' ', cmdline) try: ips = re.findall(r"((?:[0-9]{1,3}\.){3}[0-9]{1,3})", cmdline.split("ip=")[1]) self.ip = ips[0] self.server_ip = ips[1] self.logger.debug("qemu cmdline used:\n{}".format(cmdline)) except (IndexError, ValueError): # Try to get network configuration from runqemu output match = re.match(r'.*Network configuration: ([0-9.]+)::([0-9.]+):([0-9.]+)$.*', out, re.MULTILINE|re.DOTALL) if match: self.ip, self.server_ip, self.netmask = match.groups() # network configuration is required as we couldn't get it # from the runqemu command line, so qemu doesn't run kernel # and guest networking is not configured netconf = True else: self.logger.error("Couldn't get ip from qemu command line and runqemu output! " "Here is the qemu command line used:\n%s\n" "and output from runqemu:\n%s" % (cmdline, out)) self._dump_host() self.stop() return False self.logger.debug("Target IP: %s" % self.ip) self.logger.debug("Server IP: %s" % self.server_ip) self.thread = LoggingThread(self.log, self.threadsock, self.logger) self.thread.start() if not self.thread.connection_established.wait(self.boottime): self.logger.error("Didn't receive a console connection from qemu. " "Here is the qemu command line used:\n%s\nand " "output from runqemu:\n%s" % (cmdline, out)) self.stop_thread() return False self.logger.debug("Output from runqemu:\n%s", out) self.logger.debug("Waiting at most %d seconds for login banner (%s)" % (self.boottime, time.strftime("%D %H:%M:%S"))) endtime = time.time() + self.boottime socklist = [self.server_socket] reachedlogin = False stopread = False qemusock = None bootlog = b'' data = b'' while time.time() < endtime and not stopread: try: sread, swrite, serror = select.select(socklist, [], [], 5) except InterruptedError: continue for sock in sread: if sock is self.server_socket: qemusock, addr = self.server_socket.accept() qemusock.setblocking(0) socklist.append(qemusock) socklist.remove(self.server_socket) self.logger.debug("Connection from %s:%s" % addr) else: data = data + sock.recv(1024) if data: bootlog += data data = b'' if b' login:'******'t reach login banner in %d seconds (%s)" % (self.boottime, time.strftime("%D %H:%M:%S"))) tail = lambda l: "\n".join(l.splitlines()[-25:]) # in case bootlog is empty, use tail qemu log store at self.msg lines = tail(bootlog if bootlog else self.msg) self.logger.debug("Last 25 lines of text:\n%s" % lines) self.logger.debug("Check full boot log: %s" % self.logfile) self._dump_host() self.stop() return False # If we are not able to login the tests can continue try: (status, output) = self.run_serial("root\n", raw=True) if re.search(r"root@[a-zA-Z0-9\-]+:~#", output): self.logged = True self.logger.debug("Logged as root in serial console") if netconf: # configure guest networking cmd = "ifconfig eth0 %s netmask %s up\n" % (self.ip, self.netmask) output = self.run_serial(cmd, raw=True)[1] if re.search(r"root@[a-zA-Z0-9\-]+:~#", output): self.logger.debug("configured ip address %s", self.ip) else: self.logger.debug("Couldn't configure guest networking") else: self.logger.debug("Couldn't login into serial console" " as root using blank password") self.logger.debug("The output:\n%s" % output) except: self.logger.debug("Serial console failed while trying to login") return True
def __init__(self, name=None, target=0, preProxy=None, randomReseed=True): """ When initializing, an optional target may be given. If no target is specified, self.eventLoop will be used. If None is given, no target will be called (and it will be up to the caller to properly shut down the forked process) preProxy may be a dict of values that will appear as ObjectProxy in the remote process (but do not need to be sent explicitly since they are available immediately before the call to fork(). Proxies will be availabe as self.proxies[name]. If randomReseed is True, the built-in random and numpy.random generators will be reseeded in the child process. """ self.hasJoined = False if target == 0: target = self.eventLoop if name is None: name = str(self) conn, remoteConn = multiprocessing.Pipe() proxyIDs = {} if preProxy is not None: for k, v in preProxy.items(): proxyId = LocalObjectProxy.registerObject(v) proxyIDs[k] = proxyId ppid = os.getpid() # write this down now; windows doesn't have getppid pid = os.fork() if pid == 0: self.isParent = False ## We are now in the forked process; need to be extra careful what we touch while here. ## - no reading/writing file handles/sockets owned by parent process (stdout is ok) ## - don't touch QtGui or QApplication at all; these are landmines. ## - don't let the process call exit handlers os.setpgrp() ## prevents signals (notably keyboard interrupt) being forwarded from parent to this process ## close all file handles we do not want shared with parent conn.close() sys.stdin.close() ## otherwise we screw with interactive prompts. fid = remoteConn.fileno() os.closerange(3, fid) os.closerange(fid+1, 4096) ## just guessing on the maximum descriptor count.. ## Override any custom exception hooks def excepthook(*args): import traceback traceback.print_exception(*args) sys.excepthook = excepthook ## Make it harder to access QApplication instance for qtlib in ('PyQt4', 'PySide', 'PyQt5'): if qtlib in sys.modules: sys.modules[qtlib+'.QtGui'].QApplication = None sys.modules.pop(qtlib+'.QtGui', None) sys.modules.pop(qtlib+'.QtCore', None) ## sabotage atexit callbacks atexit._exithandlers = [] atexit.register(lambda: os._exit(0)) if randomReseed: if 'numpy.random' in sys.modules: sys.modules['numpy.random'].seed(os.getpid() ^ int(time.time()*10000%10000)) if 'random' in sys.modules: sys.modules['random'].seed(os.getpid() ^ int(time.time()*10000%10000)) #ppid = 0 if not hasattr(os, 'getppid') else os.getppid() RemoteEventHandler.__init__(self, remoteConn, name+'_child', pid=ppid) self.forkedProxies = {} for name, proxyId in proxyIDs.items(): self.forkedProxies[name] = ObjectProxy(ppid, proxyId=proxyId, typeStr=repr(preProxy[name])) if target is not None: target() else: self.isParent = True self.childPid = pid remoteConn.close() RemoteEventHandler.handlers = {} ## don't want to inherit any of this from the parent. RemoteEventHandler.__init__(self, conn, name+'_parent', pid=pid) atexit.register(self.join)
def preexec(): # Don't forward signals. os.setpgrp()
"""For starting up remote processes""" from __future__ import print_function import sys, pickle, os if __name__ == '__main__': if hasattr(os, 'setpgrp'): os.setpgrp() ## prevents signals (notably keyboard interrupt) being forwarded from parent to this process if sys.version[0] == '3': #name, port, authkey, ppid, targetStr, path, pyside = pickle.load(sys.stdin.buffer) opts = pickle.load(sys.stdin.buffer) else: #name, port, authkey, ppid, targetStr, path, pyside = pickle.load(sys.stdin) opts = pickle.load(sys.stdin) #print "key:", ' '.join([str(ord(x)) for x in authkey]) path = opts.pop('path', None) if path is not None: if isinstance(path, str): # if string, just insert this into the path sys.path.insert(0, path) else: # if list, then replace the entire sys.path ## modify sys.path in place--no idea who already has a reference to the existing list. while len(sys.path) > 0: sys.path.pop() sys.path.extend(path) pyqtapis = opts.pop('pyqtapis', None) if pyqtapis is not None: import sip for k,v in list(pyqtapis.items()): sip.setapi(k, v)
def _preexec(): # do not forward signals (Like. SIGINT, SIGTERM) os.setpgrp()
def preexec_helper(): # make this process a new process group # os.setsid() # This prevent the signal to be sent to the children (and create a new process group) os.setpgrp()
def preexec_fn(): # Don't forward signals to QEMU os.setpgrp()
def __enter__(self): if os.name != "nt": os.setpgrp()
def _ignoreSignal(): "Detach from process group to ignore all signals" os.setpgrp()
def main(argv): stat('Progress', 'UNKNOWN') os.setpgrp() loadEnv(os.path.join(root, "env.common")) ## add directory containing this file to the PATH os.environ ['PATH'] = os.path.dirname (os.path.realpath (__file__)) + \ os.pathsep + os.environ['PATH'] seahorn_args = filter(is_seahorn_opt, argv[1:]) z3_args = filter(is_z3_opt, argv[1:]) argv = filter(is_non_seahorn_opt, argv[1:]) args = parseArgs(argv) workdir = createWorkDir(args.temp_dir, args.save_temps) in_name = args.file bc_out = defBCName(in_name, workdir) assert bc_out != in_name with stats.timer('Clang'): extra_args = [] if args.debug_info: extra_args.append('-g') clang(in_name, bc_out, arch=args.machine, extra_args=extra_args) stat('Progress', 'CLANG') in_name = bc_out pp_out = defPPName(in_name, workdir) assert pp_out != in_name with stats.timer('Seapp'): seapp(in_name, pp_out, arch=args.machine, args=args) stat('Progress', 'SEAPP') in_name = pp_out ms_out = defMSName(in_name, workdir) assert ms_out != in_name with stats.timer('Mixed'): mixSem(in_name, ms_out, arch=args.machine) stat('Progress', 'MIXED') in_name = ms_out opt_out = defOPTName(in_name, args.L, workdir) with stats.timer('Opt'): llvmOpt(in_name, opt_out, opt_level=args.L, time_passes=args.time_passes) stat('Progress', 'OPT') in_name = opt_out smt_out = defSMTName(in_name, workdir) with stats.timer('Seahorn'): seahorn(in_name, smt_out, seahorn_args, cex=args.cex, cpu=args.cpu, mem=args.mem) stat('Progress', 'SMT2') if args.out_name is not None and args.out_name != smt_out: if verbose: print 'cp {0} {1}'.format(smt_out, args.out_name) shutil.copy2(smt_out, args.out_name) if (args.run_z3): if args.use_z3_smt2: runZ3(smt_out, z3_args) else: runSpacer(smt_out, args.engine, cpu=args.cpu) return 0
def preexec_function(): os.setpgrp()
def preexec(self): os.setpgrp()
def detach_from_parent_group(): # detach from parent group (no more inherited signals!) os.setpgrp()
def no_pg_xonsh_preexec_fn(): """Default subprocess preexec function for when there is no existing pipeline group. """ os.setpgrp() signal.signal(signal.SIGTSTP, default_signal_pauser)
def main(): parser = optparse.OptionParser() parser.add_option("-v", action="store_true", dest="version", default="", help="show version number") parser.add_option("-i", dest="interface", default="", help="set listen interface (default: 0.0.0.0)") parser.add_option("-p", dest="port", default="8022", help="set listen port (default: 8022)") parser.add_option("-c", dest="cmd", default=None, help="set shell command (default: ssh localhost)") parser.add_option( "-t", dest="term", default="xterm-color", help="set terminal emulation string (default: xterm-color)") parser.add_option("-l", action="store_true", dest="log", default=0, help="output connection log to stderr (default: quiet)") parser.add_option("-d", action="store_true", dest="daemon", default=0, help="run as daemon in the background") parser.add_option("-P", dest="pidfile", default="/var/run/webshell.pid", help="set pidfile (default: /var/run/webshell.pid)") parser.add_option("-u", dest="uid", help="set daemon user id") parser.add_option("--ssl-disable", action="store_false", dest="ssl_enabled", default=1, help="disable SSL, set listen interface to localhost") parser.add_option("--ssl-cert", dest="ssl_cert", default="webshell.pem", help="set SSL certificate file (default: webshell.pem)") parser.add_option("--www-dir", dest="www_dir", default="www", help="set WebShell www path (default: www)") (o, a) = parser.parse_args() if o.version: print('WebShell ' + version) sys.exit(0) # Parameter validation try: o.port = int(o.port) except ValueError: print('Invalid parameters') sys.exit(0) if (not openssl_installed) & o.ssl_enabled: print('The python SSL extensions seem to be not installed.') print( 'You can run WebShell without SSL encryption with the --ssl-disable command line switch.' ) sys.exit(0) if not o.ssl_enabled: if len(o.interface) == 0: o.interface = 'localhost' # Daemon mode if o.daemon: pid = os.fork() if pid == 0: # os.setsid() ? os.setpgrp() nullin = file('/dev/null', 'r') nullout = file('/dev/null', 'w') os.dup2(nullin.fileno(), sys.stdin.fileno()) os.dup2(nullout.fileno(), sys.stdout.fileno()) os.dup2(nullout.fileno(), sys.stderr.fileno()) if os.getuid() == 0 and o.uid: try: os.setuid(int(o.uid)) except: os.setuid(pwd.getpwnam(o.uid).pw_uid) else: try: file(o.pidfile, 'w+').write(str(pid) + '\n') except: pass sys.exit(0) # Run server try: server_address = (o.interface, o.port) httpd = SecureHTTPServer(server_address, WebShellRequestHandler, o.cmd, o.term, o.ssl_enabled, o.ssl_cert, o.www_dir) if httpd.socket is None: print( 'There is a problem with OpenSSL. Make sure the certificates\' path and content are correct.' ) sys.exit(0) sa = httpd.socket.getsockname() if not o.daemon: scheme = 'http' if o.ssl_enabled: scheme += 's' print('WebShell (%s) at %s, port %s' % (scheme, sa[0], sa[1])) httpd.serve_forever() except KeyboardInterrupt: httpd.stop() print('Stopped')
def _subproc_pre(): if _pipeline_group is None: os.setpgrp() else: os.setpgid(0, _pipeline_group) signal.signal(signal.SIGTSTP, lambda n, f: signal.pause())
def show_setting_prgrp(): print('Calling os.setpgrp() from {}'.format(os.getpid())) os.setpgrp() print('Process group is now {}'.format(os.getpgrp())) sys.stdout.flush()
def entry_point(argv): os.setpgrp() return 0
def main(argv): #def stat(key, val): stats.put(key, val) os.setpgrp() loadEnv(os.path.join(root, "env.common")) ## add directory containing this file to the PATH os.environ ['PATH'] = os.path.dirname(os.path.realpath(__file__)) + \ os.pathsep + os.environ['PATH'] if '--llvm-version' in argv[1:] or '-llvm-version' in argv[1:]: print("LLVM version " + llvm_version) return 0 if '--clang-version' in argv[1:] or '-clang-version' in argv[1:]: print("Clang version " + getClangVersion(getClang(False))) return 0 print("Platform: {0} {1}".format(platform.system(), platform.release())) print("LLVM version: {0}".format(llvm_version)) #print("Clam started at {0}\n\n".format(datetime.now().strftime("%H:%M:%S"))) args = parseArgs(argv[1:]) workdir = createWorkDir(args.temp_dir, args.save_temps) in_name = args.file if args.preprocess: bc_out = defBCName(in_name, workdir) if bc_out != in_name: extra_args = [] if args.debug_info: extra_args.append('-g') with stats.timer('Clang'): clang(in_name, bc_out, args, arch=args.machine, extra_args=extra_args) #stat('Progress', 'Clang') in_name = bc_out pp_out = defPPName(in_name, workdir) if pp_out != in_name: with stats.timer('ClamPP'): crabpp(in_name, pp_out, args=args, cpu=args.cpu, mem=args.mem) #stat('Progress', 'Clam preprocessor') in_name = pp_out if args.L > 0: o_out = defOptName(in_name, workdir) if o_out != in_name: extra_args = [] with stats.timer('CrabOptLlvm'): optLlvm(in_name, o_out, args, extra_args, cpu=args.cpu, mem=args.mem) #stat('Progress', 'Llvm optimizer') in_name = o_out pp_out = defOutPPName(in_name, workdir) with stats.timer('Clam'): extra_opts = [] clam(in_name, pp_out, args, extra_opts, cpu=args.cpu, mem=args.mem) #stat('Progress', 'Clam') if args.asm_out_name is not None and args.asm_out_name != pp_out: if False: #verbose: print('cp {0} {1}'.format(pp_out, args.asm_out_name)) shutil.copy2(pp_out, args.asm_out_name) #print("\nClam finished at {0}\n".format(datetime.now().strftime("%H:%M:%S"))) return 0
def preexec_function(): import signal # Detaching from the parent process group os.setpgrp() # Explicitly ignoring signals in the child process signal.signal(signal.SIGINT, signal.SIG_IGN)
def detach_from_parent_group(): ''' A utility function that prevents child process from getting parent signals. ''' os.setpgrp()
def sndrcv(pks, pkt, timeout=None, inter=0, verbose=None, chainCC=0, retry=0, multi=0): if not isinstance(pkt, Gen): pkt = SetGen(pkt) if verbose is None: verbose = conf.verb debug.recv = plist.PacketList([], "Unanswered") debug.sent = plist.PacketList([], "Sent") debug.match = plist.SndRcvList([]) nbrecv = 0 ans = [] # do it here to fix random fields, so that parent and child have the same all_stimuli = tobesent = [p for p in pkt] notans = len(tobesent) hsent = {} for i in tobesent: h = i.hashret() if h in hsent: hsent[h].append(i) else: hsent[h] = [i] if retry < 0: retry = -retry autostop = retry else: autostop = 0 while retry >= 0: found = 0 if timeout < 0: timeout = None rdpipe, wrpipe = os.pipe() rdpipe = os.fdopen(rdpipe) wrpipe = os.fdopen(wrpipe, "w") pid = 1 try: pid = os.fork() if pid == 0: try: sys.stdin.close() rdpipe.close() try: i = 0 if verbose: print "Begin emission:" for p in tobesent: pks.send(p) i += 1 time.sleep(inter) if verbose: print "Finished to send %i packets." % i except SystemExit: pass except KeyboardInterrupt: pass except: log_runtime.exception("--- Error in child %i" % os.getpid()) log_runtime.info("--- Error in child %i" % os.getpid()) finally: try: os.setpgrp() # Chance process group to avoid ctrl-C sent_times = [ p.sent_time for p in all_stimuli if p.sent_time ] cPickle.dump((conf.netcache, sent_times), wrpipe) wrpipe.close() except: pass elif pid < 0: log_runtime.error("fork error") else: wrpipe.close() stoptime = 0 remaintime = None inmask = [rdpipe, pks] try: try: while 1: if stoptime: remaintime = stoptime - time.time() if remaintime <= 0: break r = None if arch.FREEBSD or arch.DARWIN: inp, out, err = select(inmask, [], [], 0.05) if len(inp) == 0 or pks in inp: r = pks.nonblock_recv() else: inp, out, err = select(inmask, [], [], remaintime) if len(inp) == 0: break if pks in inp: r = pks.recv(MTU) if rdpipe in inp: if timeout: stoptime = time.time() + timeout del (inmask[inmask.index(rdpipe)]) if r is None: continue ok = 0 h = r.hashret() if h in hsent: hlst = hsent[h] for i, sentpkt in enumerate(hlst): if r.answers(sentpkt): ans.append((sentpkt, r)) if verbose > 1: os.write(1, "*") ok = 1 if not multi: del hlst[i] notans -= 1 else: if not hasattr( sentpkt, '_answered'): notans -= 1 sentpkt._answered = 1 break if notans == 0 and not multi: break if not ok: if verbose > 1: os.write(1, ".") nbrecv += 1 if conf.debug_match: debug.recv.append(r) except KeyboardInterrupt: if chainCC: raise finally: try: nc, sent_times = cPickle.load(rdpipe) except EOFError: warning( "Child died unexpectedly. Packets may have not been sent %i" % os.getpid()) else: conf.netcache.update(nc) for p, t in zip(all_stimuli, sent_times): p.sent_time = t os.waitpid(pid, 0) finally: if pid == 0: os._exit(0) remain = reduce(list.__add__, hsent.values(), []) if multi: remain = filter(lambda p: not hasattr(p, '_answered'), remain) if autostop and len(remain) > 0 and len(remain) != len(tobesent): retry = autostop tobesent = remain if len(tobesent) == 0: break retry -= 1 if conf.debug_match: debug.sent = plist.PacketList(remain[:], "Sent") debug.match = plist.SndRcvList(ans[:]) #clean the ans list to delete the field _answered if (multi): for s, r in ans: if hasattr(s, '_answered'): del (s._answered) if verbose: print "\nReceived %i packets, got %i answers, remaining %i packets" % ( nbrecv + len(ans), len(ans), notans) return plist.SndRcvList(ans), plist.PacketList(remain, "Unanswered")
def sigint_ignore(): import os os.setpgrp()
def preexec_function(): import signal # Detaching from the parent process group os.setpgrp()
def preexec_fn(): signal.signal(signal.SIGINT, signal.SIG_IGN) os.setpgrp()