def legal_manhole_file(f): """ Helper method to check if a process exists and is likely a manhole-able container. @return True/False if is a likely container. """ mh_pid = int(r.search(f).group(1)) try: os.getpgid(mh_pid) except OSError as e: if e.errno == errno.ESRCH: return False raise # unexpected, just re-raise # the pid seems legal, now check status of sockets - the pid may be reused with open(f) as ff: mh_doc = json.load(ff) s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: s.bind((mh_doc['ip'], mh_doc['shell_port'])) except socket.error as e: if e.errno == errno.EADDRINUSE: return True raise # unexpected, re-raise finally: s.close() return False
def tearDown(self): # kill ros os.killpg(os.getpgid(self.ros.pid), 15) os.waitpid(self.ros.pid, os.WUNTRACED) # kill the simulator os.killpg(os.getpgid(self.sim.pid), 15) os.waitpid(self.sim.pid, os.WUNTRACED)
def start_server(): logging.info('Server starting at: {0}'.format(datetime.now())) pid = None try: pid = pidlockfile.read_pid_from_pidfile(PIDFILE) except OSError: pass if pid is not None: try: os.getpgid(pid) except OSError: pidlockfile.remove_existing_pidfile(PIDFILE) else: init_application() return try: pidlockfile.write_pid_to_pidfile(PIDFILE) except OSError: logging.error('Pid file already exist, process must be running') sys.exit() init_application()
def is_running(pid): try: os.getpgid(pid) return True except Exception: pass return False
def validateResults(self, worker_client, worker_server, timeout): if os.path.isdir('/proc/{}'.format(worker_server.process.pid)): self.logger.info("Killing server worker process (pid %d)" % worker_server.process.pid) os.killpg(os.getpgid(worker_server.process.pid), signal.SIGTERM) worker_server.join() self.logger.info("Client worker result is `%s'" % worker_client.result) error = False if worker_client.result is None: try: error = True self.logger.error( "Timeout: %ss! Killing client worker process (pid %d)" % (timeout, worker_client.process.pid)) os.killpg(os.getpgid(worker_client.process.pid), signal.SIGKILL) worker_client.join() except OSError: self.logger.debug( "Couldn't kill client worker process") raise if error: raise Exception( "Timeout! Client worker did not finish in %ss" % timeout) self.assert_equal(worker_client.result, 0, "Binary test return code")
def _flush_pid_queues(): global active_pids, waiting_pids if not _do_flush: return rm = set() for pid in active_pids: try: os.getpgid(pid) except OSError: # pid no longer active rm.add(pid) active_pids -= rm rm = set() for pid in waiting_pids: try: os.getpgid(pid) except OSError: # pid no longer active rm.add(pid) for p in rm: waiting_pids.remove(p) print 'ACTIVE', active_pids print 'WAITING', waiting_pids
def kill_server_pid(pidfile, wait=10, passenger_root=False): # Must re-import modules in order to work during atexit import os import signal import subprocess import time try: if passenger_root: # First try to shut down nicely restore_cwd = os.getcwd() os.chdir(passenger_root) subprocess.call([ 'bundle', 'exec', 'passenger', 'stop', '--pid-file', pidfile]) os.chdir(restore_cwd) now = time.time() timeout = now + wait with open(pidfile, 'r') as f: server_pid = int(f.read()) while now <= timeout: if not passenger_root or timeout - now < wait / 2: # Half timeout has elapsed. Start sending SIGTERM os.kill(server_pid, signal.SIGTERM) # Raise OSError if process has disappeared os.getpgid(server_pid) time.sleep(0.1) now = time.time() except IOError: pass except OSError: pass
def checkWorkerStatus(worker,batchsize): db=MySQLdb.connect(host=localhost,user=localuser,passwd=localpwd,db=localdb) c=db.cursor() query="select * from pairwise_infos where worker=\""+str(worker)+"\" AND ended=FALSE order by update_id DESC LIMIT 1;" #print query c.execute(query) remax = c.fetchall() #print remax [update_idok,last_idok]=getUpdateInfos() if len(remax)>0: update_id=remax[0][0] last_id=remax[0][1] proc_id=remax[0][3] ended=remax[0][5] #print "Ended",ended if not ended: print "Error: previous update "+str(update_id)+" of worker "+str(worker)+" not completed (last_id:"+str(last_id)+",proc_id:"+str(proc_id)+")" #check if proc_id running try: if proc_id==0: # manually reset raise OSError os.getpgid(proc_id) print "Still running. Leaving." quit() except OSError: # not running. delete line print "Process is dead. Cleaning...", cleanError(update_id,last_id,worker,proc_id) print "Restarting this batch..." update_idok=update_id last_idok=last_id-batchsize # Won't work if failed process was using another batchsize return update_idok,last_idok
def signal_handler(sgnl, frame): os.killpg(os.getpgid(webcontrol.pid), signal.SIGKILL) os.killpg(os.getpgid(beakerx.pid), signal.SIGKILL) test_util.kill_processes('jupyter') test_util.kill_processes('webdriver') test_util.kill_processes('java') sys.exit(20)
def main(): if (len(sys.argv) < 3): print("Usage: timer <timeout (sec)> <program> [program arguments]", file=sys.stderr) exit(1) try: timeout = locale.atoi(sys.argv[1]) except ValueError: print("Unrecognized timeout", file=sys.stderr) print("Usage: timer <timeout (sec)> <program> [program arguments]", file=sys.stderr) exit(1) prog = sys.argv[2] sprog = prog args = [prog] for i in range(len(sys.argv) - 3): args.append(sys.argv[i + 3]) sprog += " "+sys.argv[i + 3] child = os.fork() startime = time.time() if (child == 0): try: os.execvp(prog, args) except FileNotFoundError: print("Unknown program "+prog) os.killpg(os.getpgid(0), signal.SIGKILL) try: time.sleep(timeout) except ValueError: print("Unrecognized timeout (must be positive)", file=sys.stderr) os.killpg(os.getpgid(child), signal.SIGKILL) timelapse = time.time() - startime print("\nkilling "+sprog+" after "+str(round(timelapse,2))+" seconds") os.killpg(os.getpgid(child), signal.SIGKILL) exit(0)
def main(): global die don=dayOrNight() comm='fswebcam -c paranal_%s.conf' % (don) if args.v: print "%s - %s" % (dt.utcnow(),comm) if not args.debug: pro=subprocess.Popen(comm,subprocess.PIPE,shell=True,preexec_fn=os.setsid) while(1): don_new=dayOrNight() # change from day to night, and vice versa if don_new != don: if args.v: print "%s - Changing the settings to %s" % (dt.utcnow(),don) if not args.debug: os.killpg(os.getpgid(pro.pid), signal.SIGTERM) time.sleep(30) comm='fswebcam -c paranal_%s.conf' % (don_new) if args.v: print "%s - %s" % (dt.utcnow(),comm) if not args.debug: pro=subprocess.Popen(comm,subprocess.PIPE,shell=True,preexec_fn=os.setsid) don=don_new # wait for 5 mins between checks for change in day/night time.sleep(300) # if ctrl+c, die correctly if die == True: print "Killing fswebcam" if not args.debug: os.killpg(os.getpgid(pro.pid), signal.SIGTERM) print "Exiting..." break
def main(): if len(sys.argv) != 3: sys.stderr.write("usage: %s VOLUME_NAME BRICK_NAME\n" % os.path.basename(sys.argv[0])) sys.exit(-1) volumeName = sys.argv[1] brickName = sys.argv[2] # glusterfs-3.3 config change from /etc/glusterd to /var/lib/glusterd pidFile = "/var/lib/glusterd/vols/%s/run/%s.pid" % (volumeName, brickName.replace(":", "").replace("/", "-")) total, free = getBrickSpace(brickName) if pidFile[-5] == '-': pidFile = pidFile[:-5]+pidFile[-4:] if not os.path.exists(pidFile): print "OFFLINE", total, free sys.exit(0) lines = Utils.readFile(pidFile) if not lines: print "UNKNOWN", total, free sys.exit(0) try: pidString = lines[0] os.getpgid(int(pidString)) print "ONLINE", total, free except ValueError, e: Utils.log("invalid pid %s in file %s: %s" % (pidString, pidFile, str(e))) print "UNKNOWN", total, free
def stop(self): self.stop_thread() if hasattr(self, "origchldhandler"): signal.signal(signal.SIGCHLD, self.origchldhandler) if self.runqemu: if hasattr(self, "monitorpid"): os.kill(self.monitorpid, signal.SIGKILL) logger.info("Sending SIGTERM to runqemu") try: os.killpg(os.getpgid(self.runqemu.pid), signal.SIGTERM) except OSError as e: if e.errno != errno.ESRCH: raise endtime = time.time() + self.runqemutime while self.runqemu.poll() is None and time.time() < endtime: time.sleep(1) if self.runqemu.poll() is None: logger.info("Sending SIGKILL to runqemu") os.killpg(os.getpgid(self.runqemu.pid), signal.SIGKILL) self.runqemu = None if hasattr(self, 'server_socket') and self.server_socket: self.server_socket.close() self.server_socket = None self.qemupid = None self.ip = None
def execute_cb(self, goal): # decide whether recording should be started or stopped if goal.command == "start": #start to record the topics rospy.loginfo('now the topic recording should start') args = " --rate=5" args = args + " " + goal.file command = "rosbag play" + args self.p = subprocess.Popen("exec " + command, stdin=subprocess.PIPE, preexec_fn=os.setsid, shell=True) rospy.loginfo(self.p.pid) # check if the goal is preempted rate = rospy.Rate(1.0) while not rospy.is_shutdown() and self.p.poll() is None: #self.p.communicate(input=b'\n') if self._as.is_preempt_requested(): rospy.loginfo('Logging is preempted') os.killpg(os.getpgid(self.p.pid), signal.SIGINT) self._as.set_preempted() break rate.sleep() elif goal.command == "stop": #stop to record the topics rospy.loginfo('now the topic playing should stop') rospy.loginfo(self.p.pid) os.killpg(os.getpgid(self.p.pid), signal.SIGINT) rospy.loginfo("I'm done") else: rospy.loginfo('goal.command is not valid')
def status(self): try: pf = file(self.pidfile, "r") pid = int(pf.read().strip()) pf.close() except IOError: pid = None if not pid: sys.stderr.write("Daemon not running.\n") return 3 # see if it's running? try: os.getpgid(pid) sys.stderr.write("Daemon (PID %s) is running...\n" % pid) return 0 except OSError, err: err = str(err) if err.find("No such process") > 0: sys.stderr.write("Daemon not running but pidfile exists\n") return 1 else: print str(err) return 4
def stop(self): log.info("Stopping daemon") pid = self.pid() # Clear the pid file if os.path.exists(self.pidfile): os.remove(self.pidfile) if pid > 1: try: if self.autorestart: # Try killing the supervising process try: os.kill(os.getpgid(pid), signal.SIGTERM) except OSError: log.warn("Couldn't not kill parent pid %s. Killing pid." % os.getpgid(pid)) os.kill(pid, signal.SIGTERM) else: # No supervising process present os.kill(pid, signal.SIGTERM) log.info("Daemon is stopped") except OSError as err: if str(err).find("No such process") <= 0: log.exception("Cannot kill Agent daemon at pid %s" % pid) sys.stderr.write(str(err) + "\n") else: message = "Pidfile %s does not exist. Not running?\n" % self.pidfile log.info(message) sys.stderr.write(message) # A ValueError might occur if the PID file is empty but does actually exist if os.path.exists(self.pidfile): os.remove(self.pidfile) return # Not an error in a restart
def murder_family(pid=None, pgid=None, killall=False, sig=signal.SIGTERM): """Send signal to all processes in the process group pgid, apart from pid. If killall is true, signal to pid as well. SIGTERM - die nicely SIGKILL - die right now SIGSTOP - uncatchable stop signal - might break queues SIGTSTP - makes main thread sleep (with signal.pause()) until sigcont received SIGCONT - start anything that is paused or stopped""" if pid is None: pid = os.getpid() # pid of current process if pgid is None: pgid = os.getpgid(pid) # process group of pid for ps in psutil.process_iter(): if os.getpgid(ps.pid) == pgid and ps.pid != pid: try: ps.send_signal(sig) except (psutil.NoSuchProcess, psutil.AccessDenied, IOError): pass if killall is True: try: ps = psutil.Process(pid=pid) ps.send_signal(sig) except (psutil.NoSuchProcess, psutil.AccessDenied, IOError): pass
def stop(self): log.info("Stopping daemon") pid = self.pid() # Clear the pid file if os.path.exists(self.pidfile): os.remove(self.pidfile) if pid > 1: try: if self.autorestart: # Try killing the supervising process try: os.kill(os.getpgid(pid), signal.SIGTERM) except OSError: log.warn("Couldn't not kill parent pid %s. Killing pid." % os.getpgid(pid)) os.kill(pid, signal.SIGTERM) else: # No supervising process present os.kill(pid, signal.SIGTERM) log.info("Daemon is stopped") except OSError, err: if str(err).find("No such process") <= 0: log.exception("Cannot kill Agent daemon at pid %s" % pid) sys.stderr.write(str(err) + "\n")
def kill_proc_group(proc, signum=signal.SIGTERM): if proc: try: sys.stderr.write('kill_proc_group: killed process group with pgid ' '%s\n' % os.getpgid(proc.pid)) os.killpg(os.getpgid(proc.pid), signum) except OSError as exception: sys.stderr.write('kill_proc_group: %s\n' % exception)
def __exit__(self, *args): """Teardown the server""" # Get the process group to kill all of the suprocesses pgrp_engine = os.getpgid(self.scoring_process.pid) os.killpg(pgrp_engine, signal.SIGKILL) if self.pipeline == True: pgrp_pipeline = os.getpgid(self.pipeline_process.pid) os.killpg(pgrp_pipeline, signal.SIGKILL)
def process_exists(pid): """True if the given process exists.""" try: os.getpgid(pid) except OSError as x: if x.errno == 3: return False logging.error("Unknown exception from getpgid - %s", str(x)) return True
def flume_master_status(): pid_path = "/var/run/flume/flume-flume-master.pid" try: with open(pid_path, "rb") as fp: pid = int(fp.read().strip()) os.getpgid(pid) # Throws OSError if processes doesn't exist except (IOError, OSError, ValueError): return False return True
def _check_unique(self): """check if logging server is not running""" try: old_pid = int(open(self.pidfile).read()) os.getpgid(old_pid) raise Exception('Logging server is already running with pid %d!' % old_pid) except (OSError, IOError): pass
def clean_junk_locks(self): for path, dirnames, filenames in walk_on_path(self.path): filenames = filenames or [] for dirname in dirnames: folder_path = join_paths(path, dirname) for filename in get_dir_filenames(folder_path): if not filename.startswith('.'): filenames.append(join_paths(dirname, filename)) for filename in filenames: filename = to_string(filename) if filename.startswith('.'): continue file_path = join_paths(path, filename) if '.' in filename: # Delete inactive positions locks binary = get_file_binary(file_path, mode='r') if binary: info = binary.split() if len(info) >= 2 and info[0] == DOMAIN_NAME and maybe_integer(info[1]): try: getpgid(int(info[1])) except OSError as error: if error.errno is errno.ESRCH: remove_file_quietly( file_path, retries=self.retries, retry_errno=self.retry_errno) else: # Clean locks wait list # Get last modified time, to check if file as been updated in the process modified_time = file_modified_time(file_path) if modified_time: binary = get_file_binary(file_path, mode='r') if binary: # Find alive locks keep_codes = binary.splitlines() for i, line in enumerate(keep_codes): info = line.split() if len(info) >= 2 and info[0] == DOMAIN_NAME and maybe_integer(info[1]): try: getpgid(int(info[1])) except OSError as error: if error.errno is errno.ESRCH: # Add empty line to keep position number keep_codes[i] = '' # Check if file as been updated in the process last_modified_time = file_modified_time(file_path) if last_modified_time and modified_time == last_modified_time: if not any(keep_codes): remove_file_quietly(file_path) else: with open(file_path, 'w') as f: f.write(NEW_LINE.join(keep_codes))
def startstop(argv=None, stdout='/dev/null', stderr=None, stdin='/dev/null', pidfile='pid.txt', startmsg = 'started with pid %s' ): ''' function to call "before" the main in your program, it will automatically take care of the start/stop mechanism and the rest of the code will be run in daemon mode ''' if not argv: argv = sys.argv if len(argv) > 1: action = argv[1] try: pf = file(pidfile,'r') pid = int(pf.read().strip()) pf.close() except IOError: pid = None if 'stop' == action or 'restart' == action: if not pid: mess = "Could not stop, pid file '%s' missing.\n" sys.stderr.write(mess % pidfile) if 'stop' == action: sys.exit(1) action = 'start' pid = None else: try: for i in xrange(3): os.kill(pid,SIGTERM) time.sleep(1) os.kill(pid,SIGKILL) time.sleep(1) # Note that this code was not tested ! It's tricky to crash so badly a daemon # that it doesnt answer to a kill -9 try: os.getpgid(pid) sys.stderr.write('Error kill -9 didnt achieve to kill pid '+str(pid)) except OSError: # ok SIGKILL killed it pass except OSError, err: err = str(err) if err.find("No such process") > 0: os.remove(pidfile) if 'stop' == action: sys.exit(0) action = 'start' pid = None else: sys.stderr.write(str(err)) sys.exit(1) if 'start' == action: if pid: mess = "Start aborted since pid file '%s' exists.\n" sys.stderr.write(mess % pidfile) sys.exit(1) daemonize(stdout,stderr,stdin,pidfile,startmsg) return
def is_running_pid(pid): ''' Return true if the provided process is currently running on this machine. ''' try: # If a process with PID is no longer running, an exception is thrown os.getpgid(pid) except OSError: return False return True
def shutdown_server(): global MASTER, WORKER try: os.killpg(os.getpgid(MASTER.pid), signal.SIGTERM) except (OSError, AttributeError): pass try: os.killpg(os.getpgid(WORKER.pid), signal.SIGTERM) except (OSError, AttributeError): pass
def test_ocs_acksubscriber(self): try: cdm = toolsmod.intake_yaml_file("../../tests/yaml/L1SystemCfg_Test_ocs_bridge.yaml") except IOError as e: trace = traceback.print_exc() emsg = "Unable to fine CFG Yaml file %s\n" % self._config_file print(emsg + trace) sys.exit(101) broker_addr = cdm[ROOT]["BASE_BROKER_ADDR"] # dmcs publisher dmcs_pub_name = cdm[ROOT]["DMCS_BROKER_PUB_NAME"] dmcs_pub_pwd = cdm[ROOT]["DMCS_BROKER_PUB_PASSWD"] dmcs_broker_pub_url = "amqp://" + dmcs_pub_name + ":" + \ dmcs_pub_pwd + "@" + \ broker_addr self.dmcs_publisher = SimplePublisher(dmcs_broker_pub_url, "YAML") # dmcs consumer dmcs_name = cdm[ROOT]["DMCS_BROKER_NAME"] dmcs_pwd = cdm[ROOT]["DMCS_BROKER_PASSWD"] dmcs_broker_url = "amqp://" + dmcs_name + ":" + \ dmcs_pwd + "@" + \ broker_addr self.dmcs_consumer = Consumer(dmcs_broker_url, "ocs_dmcs_consume", "thread-dmcs-consume", self.on_ocs_message, "YAML") self.dmcs_consumer.start() # ocs consumer from DMCS ocs_name = cdm[ROOT]["OCS_BROKER_NAME"] ocs_pwd = cdm[ROOT]["OCS_BROKER_PASSWD"] # FIXME: New OCS account for consumer test_dmcs_ocs_publish ocs_broker_url = "amqp://" + "AFM" + ":" +\ "AFM" + "@" +\ broker_addr self.ocs_consumer = Consumer(ocs_broker_url, "test_dmcs_ocs_publish", "thread-ocs-consume", self.on_dmcs_message, "YAML") self.ocs_consumer.start() print("Test setup Complete. Commencing Messages...") self._msg_auth = MessageAuthority("../../messages.yaml") self.send_messages() sleep(10) os.killpg(os.getpgid(self.cmdListener.pid), signal.SIGTERM) os.killpg(os.getpgid(self.ackSubscriber.pid), signal.SIGTERM) print("MY OCS MESSAGES: %s" % self.ocs_consumer_msg_list) self.verify_ocs_messages() print("Finished with CommandListener tests.")
def stop(self): """Stop the daemon.""" if self.pidfile is None: raise DaemonError('Cannot stop daemon without PID file') pid = self._read_pidfile() if pid is None: # I don't think this should be a fatal error self._emit_warning('{prog} is not running'.format(prog=self.prog)) return self._emit_message('Stopping {prog} ... '.format(prog=self.prog)) pgid = os.getpgid(pid) for gproc in psutil.process_iter(): try: if os.getpgid(gproc.pid) == pgid and gproc.pid != 0: try: # Try to terminate the process os.kill(gproc.pid, signal.SIGTERM) except OSError as ex: self._emit_failed() self._emit_error(str(ex)) sys.exit(1) _, alive = psutil.wait_procs([psutil.Process(gproc.pid)], timeout=self.stop_timeout) if alive: # The process didn't terminate for some reason if self.kill_timeout: try: os.kill(gproc.pid, signal.SIGKILL) except OSError as ex: self._emit_failed() self._emit_error(str(ex)) sys.exit(1) _, alive = psutil.wait_procs([psutil.Process(gproc.pid)], timeout=self.kill_timeout) if alive: self._emit_failed() self._emit_error('Timed out while waiting for process (PID {pid}) ' 'to terminate'.format(pid=gproc.pid)) sys.exit(1) else: self._emit_message('{pid} '.format(pid=gproc.pid)) else: self._emit_failed() self._emit_error('Timed out while waiting for process (PID {pid}) ' 'to terminate'.format(pid=gproc.pid)) sys.exit(1) else: self._emit_message('{pid} '.format(pid=gproc.pid)) except psutil.Error: pass self._emit_ok()
def stop(self): try: os.killpg(os.getpgid(self._p.pid), signal.SIGTERM) except Exception: pass time.sleep(.1) try: os.killpg(os.getpgid(self._p.pid), signal.SIGKILL) except Exception: pass self._t.join()
def run_test( test_cmd, pkg_path, name, retryable, continue_on_errors, is_integration_test=False, ): """Run test_cmd in the given pkg_path.""" logging.info('Running `%s` in %s...', ' '.join(test_cmd), pkg_path) start = datetime.now() env = None # If we're running integration tests in parallel, assign a unique NUCLIDE_SERVER_PORT. if is_integration_test: process_identity = multiprocessing.current_process()._identity if len(process_identity) > 0: # process_id is numbered starting from 1. process_id = process_identity[0] - 1 if process_id < len(OPEN_PORTS): env = os.environ.copy() env['TEST_NUCLIDE_SERVER_PORT'] = str(OPEN_PORTS[process_id]) proc = subprocess.Popen(test_cmd, cwd=pkg_path, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, preexec_fn=os.setsid, shell=False, env=env) # Record the pgid now - sometimes the process exits but not its children. proc_pgid = os.getpgid(proc.pid) stdout = [] def kill_proc(): logging.info('KILLING TEST: %s', name) # Kill the group so child processes are also cleaned up. os.killpg(proc_pgid, signal.SIGKILL) timer = threading.Timer(MAX_RUN_TIME_IN_SECONDS, kill_proc) try: timer.start() for line in iter(proc.stdout.readline, ''): # line is a bytes string literal in Python 3. logging.info('[%s %s]: %s', test_cmd[0], name, line.rstrip().decode('utf-8')) stdout.append(line) proc.wait() except KeyboardInterrupt as e: # Cleanly kill all child processes before terminating. kill_proc() sys.exit(1) finally: timer.cancel() end = datetime.now() if proc.returncode: logging.info( 'TEST %s: %s (exit code: %d)\nstdout:\n%s', 'ERROR' if timer.is_alive() else 'TIMED OUT', name, proc.returncode, ''.join(stdout).rstrip(), ) if retryable and is_retryable_error(''.join(stdout)): logging.info('RETRYING TEST: %s', name) time.sleep(3) run_test(test_cmd, pkg_path, name, False, continue_on_errors) return if not continue_on_errors: raise utils.TestFailureError( 'TEST FAILED: %s %s (exit code: %d)' % (test_cmd[0], name, proc.returncode), proc.returncode, ) else: logging.info('TEST PASSED: %s (%s seconds)', name, (end - start).seconds)
def run_springroll(self, dir, filename, domain, domain_name, report): instance_path = os.path.join(dir, filename) domain_path = os.path.join(BASE_DIR, domain) def target(): if os.name == 'nt': self.process = subprocess.Popen([ 'java', '-classpath', '\".\\testsuit\\dist\\lib\\antlr-3.4-complete.jar;.\\testsuit\\dist\\lib\\jgraph-5.13.0.0.jar;.\\testsuit\\dist\\lib\\jgrapht-core-0.9.0.jar;.\\testsuit\\dist\\lib\\PPMaJal2.jar;.\\testsuit\dist\\springroll_fixed.jar;\"', 'runner.SMTHybridPlanner', '-o', domain_path, '-f', instance_path ], shell=True, stdout=subprocess.PIPE) self.output = self.process.communicate()[0] else: # This does not necessarily work on any other os cmd = [ 'java -classpath testsuit/dist/lib/antlr-3.4-complete.jar:testsuit/dist/lib/jgraph-5.13.0.0.jar:testsuit/dist/lib/jgrapht-core-0.9.0.jar:testsuit/dist/lib/PPMaJal2.jar:testsuit/dist/springroll.jar runner.SMTHybridPlanner -o ' + domain_path + ' -f ' + instance_path ] self.process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, preexec_fn=os.setsid) self.output = self.process.communicate()[0] thread = threading.Thread(target=target) start = time.time() thread.start() thread.join(timeout) duration = time.time() - start found = False if thread.is_alive(): if os.name == 'nt': print 'Terminating process (in a sketchy way)' subprocess.Popen( "TASKKILL /F /PID {pid} /T".format(pid=self.process.pid)) else: # This does not necessarily work on any os os.killpg(os.getpgid(self.process.pid), signal.SIGTERM) thread.join() # Log a timeout: errorcode: time = -1 duration = -1 else: print('OUTPUT START') print(self.output) print('OUTPUT END') if 'Solved: True' in self.output: found = True elif not ('Solved: False' in self.output): # Log a crash: errorcode: time = -2 duration = -2 else: print( '*****************Parameters have to be changed!*****************' ) print('Finished within timeout, without solving.') duration = -3 log_metadata = { 'mode': 'springroll', 'domain': domain_name, 'instance': filename, 'found': found, 'horizon': 0, 'time': duration, 'time_log': None } val_data = (None, domain_path, instance_path) report.create_log(val_data, log_metadata) print(self.process.returncode)
def kill_task_2(process): os.killpg(os.getpgid(process.pid), signal.SIGKILL) process.wait()
def kill(p): had_to_kill.set() os.killpg(os.getpgid(p.pid), signal.SIGTERM)
def generic_handler(event, context_dict): """ context_dict is generic infromation about the context that we are running in, provided by the scheduler """ try: response_status = {'exception': None} s3 = boto3.resource('s3') logger.info("invocation started") # download the input status_key = event['status_key'] func_key = event['func_key'] data_key = event['data_key'] data_byte_range = event['data_byte_range'] output_key = event['output_key'] if version.__version__ != event['pywren_version']: raise Exception("WRONGVERSION", "Pywren version mismatch", version.__version__, event['pywren_version']) start_time = time.time() response_status['start_time'] = start_time func_filename = "/tmp/func.pickle" data_filename = "/tmp/data.pickle" output_filename = "/tmp/output.pickle" runtime_s3_bucket = event['runtime_s3_bucket'] runtime_s3_key = event['runtime_s3_key'] if event.get('shard_runtime_key', False): random.seed() shard = random.randrange(wrenconfig.MAX_S3_RUNTIME_SHARDS) key_shard = wrenutil.get_s3_shard(runtime_s3_key, shard) runtime_s3_key_used = wrenutil.hash_s3_key(key_shard) else: runtime_s3_key_used = runtime_s3_key job_max_runtime = event.get("job_max_runtime", 290) # default for lambda response_status['func_key'] = func_key response_status['data_key'] = data_key response_status['output_key'] = output_key response_status['status_key'] = status_key b, k = data_key KS = s3util.key_size(b, k) #logger.info("bucket=", b, "key=", k, "status: ", KS, "bytes" ) while KS is None: logger.warn("WARNING COULD NOT GET FIRST KEY") KS = s3util.key_size(b, k) if not event['use_cached_runtime']: subprocess.check_output("rm -Rf {}/*".format(RUNTIME_LOC), shell=True) # get the input and save to disk # FIXME here is we where we would attach the "canceled" metadata s3.meta.client.download_file(func_key[0], func_key[1], func_filename) func_download_time = time.time() - start_time response_status['func_download_time'] = func_download_time logger.info("func download complete, took {:3.2f} sec".format( func_download_time)) if data_byte_range is None: s3.meta.client.download_file(data_key[0], data_key[1], data_filename) else: range_str = 'bytes={}-{}'.format(*data_byte_range) dres = s3.meta.client.get_object(Bucket=data_key[0], Key=data_key[1], Range=range_str) data_fid = open(data_filename, 'wb') data_fid.write(dres['Body'].read()) data_fid.close() data_download_time = time.time() - start_time logger.info("data data download complete, took {:3.2f} sec".format( data_download_time)) response_status['data_download_time'] = data_download_time # now split d = json.load(open(func_filename, 'r')) shutil.rmtree(PYTHON_MODULE_PATH, True) # delete old modules os.mkdir(PYTHON_MODULE_PATH) # get modules and save for m_filename, m_text in d['module_data'].items(): m_path = os.path.dirname(m_filename) if len(m_path) > 0 and m_path[0] == "/": m_path = m_path[1:] to_make = os.path.join(PYTHON_MODULE_PATH, m_path) #print "to_make=", to_make, "m_path=", m_path try: os.makedirs(to_make) except OSError as e: if e.errno == 17: pass else: raise e full_filename = os.path.join(to_make, os.path.basename(m_filename)) #print "creating", full_filename fid = open(full_filename, 'wb') fid.write(m_text.encode('utf-8')) fid.close() logger.info("Finished writing {} module files".format( len(d['module_data']))) logger.debug( subprocess.check_output("find {}".format(PYTHON_MODULE_PATH), shell=True)) logger.debug( subprocess.check_output("find {}".format(os.getcwd()), shell=True)) response_status['runtime_s3_key_used'] = runtime_s3_key_used runtime_cached = download_runtime_if_necessary(s3, runtime_s3_bucket, runtime_s3_key_used) logger.info("Runtime ready, cached={}".format(runtime_cached)) response_status['runtime_cached'] = runtime_cached cwd = os.getcwd() jobrunner_path = os.path.join(cwd, "jobrunner.py") extra_env = event.get('extra_env', {}) extra_env['PYTHONPATH'] = "{}:{}".format(os.getcwd(), PYTHON_MODULE_PATH) call_id = event['call_id'] callset_id = event['callset_id'] response_status['call_id'] = call_id response_status['callset_id'] = callset_id CONDA_PYTHON_RUNTIME = "/tmp/condaruntime/bin/python" cmdstr = "{} {} {} {} {}".format(CONDA_PYTHON_RUNTIME, jobrunner_path, func_filename, data_filename, output_filename) setup_time = time.time() response_status['setup_time'] = setup_time - start_time local_env = os.environ.copy() local_env["OMP_NUM_THREADS"] = "1" local_env.update(extra_env) logger.debug("command str=%s", cmdstr) # This is copied from http://stackoverflow.com/a/17698359/4577954 # reasons for setting process group: http://stackoverflow.com/a/4791612 process = subprocess.Popen(cmdstr, shell=True, env=local_env, bufsize=1, stdout=subprocess.PIPE, preexec_fn=os.setsid) logger.info("launched process") def consume_stdout(stdout, queue): with stdout: for line in iter(stdout.readline, b''): queue.put(line) q = Queue() t = Thread(target=consume_stdout, args=(process.stdout, q)) t.daemon = True t.start() stdout = b"" while t.isAlive(): try: line = q.get_nowait() stdout += line logger.info(line) except Empty: time.sleep(PROCESS_STDOUT_SLEEP_SECS) total_runtime = time.time() - start_time if total_runtime > job_max_runtime: logger.warn( "Process exceeded maximum runtime of {} sec".format( job_max_runtime)) # Send the signal to all the process groups os.killpg(os.getpgid(process.pid), signal.SIGTERM) raise Exception( "OUTATIME", "Process executed for too long and was killed") logger.info("command execution finished") s3.meta.client.upload_file(output_filename, output_key[0], output_key[1]) logger.debug("output uploaded to %s %s", output_key[0], output_key[1]) end_time = time.time() response_status['stdout'] = stdout.decode("ascii") response_status['exec_time'] = time.time() - setup_time response_status['end_time'] = end_time response_status['host_submit_time'] = event['host_submit_time'] response_status['server_info'] = get_server_info() response_status.update(context_dict) except Exception as e: # internal runtime exceptions response_status['exception'] = str(e) response_status['exception_args'] = e.args response_status['exception_traceback'] = traceback.format_exc() finally: s3.meta.client.put_object(Bucket=status_key[0], Key=status_key[1], Body=json.dumps(response_status))
def killProcesses(self, pid): pgid = os.getpgid(pid) os.killpg(pgid, signal.SIGKILL) self.timedOut = True self.done = True
def kill_process(): nonlocal cmd_success cmd_success = False os.killpg(os.getpgid(cmd.pid), signal.SIGTERM) logger.debug("cmd_output() timed out after {} seconds " "with command '{}'".format(timeout, command))
state, action, vec_reward, reward, done, cum_reward, flag_success ) state = next_state if done: n_ep += 1 logging.warning( "Episode {} finished in {} steps, reward is {}.".format( n_ep, n_ep_steps, cum_reward, ) ) break except Exception as e: print e.message traceback.print_exc() finally: logging.warning("="*30) logging.warning("="*30) logging.warning("Tidying up...") # kill orphaned monitor daemon process if env is not None: env.env.exit() replay_buffer.close() if replay_buffer is not None: replay_buffer.close() if _agent is not None: _agent.stop() os.killpg(os.getpgid(os.getpid()), signal.SIGKILL) logging.warning("="*30)
def _stop_unix(self, errors): """ UNIX implementation of process killing @param errors: error messages. stop() will record messages into this list. @type errors: [str] """ self.exit_code = self.popen.poll() if self.exit_code is not None: _logger.debug( "process[%s].stop(): process has already returned %s", self.name, self.exit_code) #print "process[%s].stop(): process has already returned %s"%(self.name, self.exit_code) self.popen = None self.stopped = True return pid = self.popen.pid pgid = os.getpgid(pid) _logger.info("process[%s]: killing os process with pid[%s] pgid[%s]", self.name, pid, pgid) try: # Start with SIGINT and escalate from there. _logger.info("[%s] sending SIGINT to pgid [%s]", self.name, pgid) os.killpg(pgid, signal.SIGINT) _logger.info("[%s] sent SIGINT to pgid [%s]", self.name, pgid) timeout_t = time.time() + _TIMEOUT_SIGINT retcode = self.popen.poll() while time.time() < timeout_t and retcode is None: time.sleep(0.1) retcode = self.popen.poll() # Escalate non-responsive process if retcode is None: printerrlog("[%s] escalating to SIGTERM" % self.name) timeout_t = time.time() + _TIMEOUT_SIGTERM os.killpg(pgid, signal.SIGTERM) _logger.info("[%s] sent SIGTERM to pgid [%s]" % (self.name, pgid)) retcode = self.popen.poll() while time.time() < timeout_t and retcode is None: time.sleep(0.2) _logger.debug('poll for retcode') retcode = self.popen.poll() if retcode is None: printerrlog("[%s] escalating to SIGKILL" % self.name) errors.append( "process[%s, pid %s]: required SIGKILL. May still be running." % (self.name, pid)) try: os.killpg(pgid, signal.SIGKILL) _logger.info("[%s] sent SIGKILL to pgid [%s]" % (self.name, pgid)) # #2096: don't block on SIGKILL, because this results in more orphaned processes overall #self.popen.wait() #os.wait() _logger.info("process[%s]: sent SIGKILL", self.name) except OSError as e: if e.args[0] == 3: printerrlog("no [%s] process with pid [%s]" % (self.name, pid)) else: printerrlog( "errors shutting down [%s], see log for details" % self.name) _logger.error(traceback.format_exc()) else: _logger.info( "process[%s]: SIGTERM killed with return value %s", self.name, retcode) else: _logger.info("process[%s]: SIGINT killed with return value %s", self.name, retcode) finally: self.popen = None
def cleanup(): os.killpg(os.getpgid(p.pid), 15)
def kill(self, p): print("Killing %r and related processes" % self.hash(), p.pid, os.getpgid(p.pid)) p.kill() del p sys.exit()
def cancel(self): if self.isRunning(): mutex.unlock() os.killpg(os.getpgid(self.process.pid), signal.SIGTERM) self.terminate()
def launch(self, launch_cmd, get_ip=True, qemuparams=None, extra_bootparams=None, env=None): try: if self.serial_ports >= 2: self.threadsock, threadport = self.create_socket() self.server_socket, self.serverport = self.create_socket() except socket.error as msg: self.logger.error("Failed to create listening socket: %s" % msg[1]) return False bootparams = 'console=tty1 console=ttyS0,115200n8 printk.time=1' if extra_bootparams: bootparams = bootparams + ' ' + extra_bootparams # Ask QEMU to store the QEMU process PID in file, this way we don't have to parse running processes # and analyze descendents in order to determine it. if os.path.exists(self.qemu_pidfile): os.remove(self.qemu_pidfile) self.qemuparams = 'bootparams="{0}" qemuparams="-pidfile {1}"'.format( bootparams, self.qemu_pidfile) if qemuparams: self.qemuparams = self.qemuparams[:-1] + " " + qemuparams + " " + '\"' if self.serial_ports >= 2: launch_cmd += ' tcpserial=%s:%s %s' % (threadport, self.serverport, self.qemuparams) else: launch_cmd += ' tcpserial=%s %s' % (self.serverport, self.qemuparams) self.origchldhandler = signal.getsignal(signal.SIGCHLD) signal.signal(signal.SIGCHLD, self.handleSIGCHLD) self.logger.debug('launchcmd=%s' % (launch_cmd)) # FIXME: We pass in stdin=subprocess.PIPE here to work around stty # blocking at the end of the runqemu script when using this within # oe-selftest (this makes stty error out immediately). There ought # to be a proper fix but this will suffice for now. self.runqemu = subprocess.Popen(launch_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, stdin=subprocess.PIPE, preexec_fn=os.setpgrp, env=env) output = self.runqemu.stdout # # We need the preexec_fn above so that all runqemu processes can easily be killed # (by killing their process group). This presents a problem if this controlling # process itself is killed however since those processes don't notice the death # of the parent and merrily continue on. # # Rather than hack runqemu to deal with this, we add something here instead. # Basically we fork off another process which holds an open pipe to the parent # and also is setpgrp. If/when the pipe sees EOF from the parent dieing, it kills # the process group. This is like pctrl's PDEATHSIG but for a process group # rather than a single process. # r, w = os.pipe() self.monitorpid = os.fork() if self.monitorpid: os.close(r) self.monitorpipe = os.fdopen(w, "w") else: # child process os.setpgrp() os.close(w) r = os.fdopen(r) x = r.read() os.killpg(os.getpgid(self.runqemu.pid), signal.SIGTERM) sys.exit(0) self.logger.debug("runqemu started, pid is %s" % self.runqemu.pid) self.logger.debug("waiting at most %s seconds for qemu pid (%s)" % (self.runqemutime, time.strftime("%D %H:%M:%S"))) endtime = time.time() + self.runqemutime while not self.is_alive() and time.time() < endtime: if self.runqemu.poll(): if self.runqemu_exited: return False if self.runqemu.returncode: # No point waiting any longer self.logger.warning('runqemu exited with code %d' % self.runqemu.returncode) self._dump_host() self.logger.warning("Output from runqemu:\n%s" % self.getOutput(output)) self.stop() return False time.sleep(0.5) if self.runqemu_exited: return False if not self.is_alive(): self.logger.error("Qemu pid didn't appear in %s seconds (%s)" % (self.runqemutime, time.strftime("%D %H:%M:%S"))) # Dump all processes to help us to figure out what is going on... ps = subprocess.Popen(['ps', 'axww', '-o', 'pid,ppid,command '], stdout=subprocess.PIPE).communicate()[0] processes = ps.decode("utf-8") self.logger.debug("Running processes:\n%s" % processes) self._dump_host() op = self.getOutput(output) self.stop() if op: self.logger.error("Output from runqemu:\n%s" % op) else: self.logger.error("No output from runqemu.\n") return False # We are alive: qemu is running out = self.getOutput(output) netconf = False # network configuration is not required by default self.logger.debug( "qemu started in %s seconds - qemu procces pid is %s (%s)" % (time.time() - (endtime - self.runqemutime), self.qemupid, time.strftime("%D %H:%M:%S"))) cmdline = '' if get_ip: with open('/proc/%s/cmdline' % self.qemupid) as p: cmdline = p.read() # It is needed to sanitize the data received # because is possible to have control characters cmdline = re_control_char.sub(' ', cmdline) try: if self.use_slirp: tcp_ports = cmdline.split("hostfwd=tcp::")[1] host_port = tcp_ports[:tcp_ports.find('-')] self.ip = "localhost:%s" % host_port else: ips = re.findall(r"((?:[0-9]{1,3}\.){3}[0-9]{1,3})", cmdline.split("ip=")[1]) self.ip = ips[0] self.server_ip = ips[1] self.logger.debug("qemu cmdline used:\n{}".format(cmdline)) except (IndexError, ValueError): # Try to get network configuration from runqemu output match = re.match( r'.*Network configuration: ([0-9.]+)::([0-9.]+):([0-9.]+)$.*', out, re.MULTILINE | re.DOTALL) if match: self.ip, self.server_ip, self.netmask = match.groups() # network configuration is required as we couldn't get it # from the runqemu command line, so qemu doesn't run kernel # and guest networking is not configured netconf = True else: self.logger.error( "Couldn't get ip from qemu command line and runqemu output! " "Here is the qemu command line used:\n%s\n" "and output from runqemu:\n%s" % (cmdline, out)) self._dump_host() self.stop() return False self.logger.debug("Target IP: %s" % self.ip) self.logger.debug("Server IP: %s" % self.server_ip) if self.serial_ports >= 2: self.thread = LoggingThread(self.log, self.threadsock, self.logger) self.thread.start() if not self.thread.connection_established.wait(self.boottime): self.logger.error( "Didn't receive a console connection from qemu. " "Here is the qemu command line used:\n%s\nand " "output from runqemu:\n%s" % (cmdline, out)) self.stop_thread() return False self.logger.debug("Output from runqemu:\n%s", out) self.logger.debug("Waiting at most %d seconds for login banner (%s)" % (self.boottime, time.strftime("%D %H:%M:%S"))) endtime = time.time() + self.boottime socklist = [self.server_socket] reachedlogin = False stopread = False qemusock = None bootlog = b'' data = b'' while time.time() < endtime and not stopread: try: sread, swrite, serror = select.select(socklist, [], [], 5) except InterruptedError: continue for sock in sread: if sock is self.server_socket: qemusock, addr = self.server_socket.accept() qemusock.setblocking(0) socklist.append(qemusock) socklist.remove(self.server_socket) self.logger.debug("Connection from %s:%s" % addr) else: data = data + sock.recv(1024) if data: bootlog += data if self.serial_ports < 2: # this socket has mixed console/kernel data, log it to logfile self.log(data) data = b'' if self.boot_patterns[ 'search_reached_prompt'] in bootlog: self.server_socket = qemusock stopread = True reachedlogin = True self.logger.debug( "Reached login banner in %s seconds (%s)" % (time.time() - (endtime - self.boottime), time.strftime("%D %H:%M:%S"))) else: # no need to check if reachedlogin unless we support multiple connections self.logger.debug( "QEMU socket disconnected before login banner reached. (%s)" % time.strftime("%D %H:%M:%S")) socklist.remove(sock) sock.close() stopread = True if not reachedlogin: if time.time() >= endtime: self.logger.warning( "Target didn't reach login banner in %d seconds (%s)" % (self.boottime, time.strftime("%D %H:%M:%S"))) tail = lambda l: "\n".join(l.splitlines()[-25:]) bootlog = bootlog.decode("utf-8") # in case bootlog is empty, use tail qemu log store at self.msg lines = tail(bootlog if bootlog else self.msg) self.logger.warning("Last 25 lines of text:\n%s" % lines) self.logger.warning("Check full boot log: %s" % self.logfile) self._dump_host() self.stop() return False # If we are not able to login the tests can continue try: (status, output) = self.run_serial(self.boot_patterns['send_login_user'], raw=True) if re.search(self.boot_patterns['search_login_succeeded'], output): self.logged = True self.logger.debug("Logged as root in serial console") if netconf: # configure guest networking cmd = "ifconfig eth0 %s netmask %s up\n" % (self.ip, self.netmask) output = self.run_serial(cmd, raw=True)[1] if re.search(r"root@[a-zA-Z0-9\-]+:~#", output): self.logger.debug("configured ip address %s", self.ip) else: self.logger.debug( "Couldn't configure guest networking") else: self.logger.warning("Couldn't login into serial console" " as root using blank password") self.logger.warning("The output:\n%s" % output) except: self.logger.warning("Serial console failed while trying to login") return True
def _set_up_and_run_controller_function(controller_function, minion_function, controller_timeout, sleep_time, **kwargs): """Set up a pty, spawn a minion process, execute controller_function. Handles the mechanics of setting up a PTY, then calls ``controller_function``. """ os.setsid() # new session; this process is the controller controller_fd, minion_fd = os.openpty() pty_name = os.ttyname(minion_fd) # take controlling terminal pty_fd = os.open(pty_name, os.O_RDWR) os.close(pty_fd) ready = multiprocessing.Value('i', False) minion_process = multiprocessing.Process( target=PseudoShell._set_up_and_run_minion_function, args=(pty_name, sys.stdout.fileno(), sys.stderr.fileno(), ready, minion_function), kwargs=kwargs, ) minion_process.start() # wait for subprocess to be running and connected. while not ready.value: time.sleep(1e-5) pass if kwargs.get("debug"): sys.stderr.write("pid: %d\n" % os.getpid()) sys.stderr.write("pgid: %d\n" % os.getpgrp()) sys.stderr.write("sid: %d\n" % os.getsid(0)) sys.stderr.write("tcgetpgrp: %d\n" % os.tcgetpgrp(controller_fd)) sys.stderr.write("\n") minion_pgid = os.getpgid(minion_process.pid) sys.stderr.write("minion pid: %d\n" % minion_process.pid) sys.stderr.write("minion pgid: %d\n" % minion_pgid) sys.stderr.write("minion sid: %d\n" % os.getsid(minion_process.pid)) sys.stderr.write("\n") sys.stderr.flush() # set up controller to ignore SIGTSTP, like a shell signal.signal(signal.SIGTSTP, signal.SIG_IGN) # call the controller function once the minion is ready try: controller = ProcessController(minion_process.pid, controller_fd, debug=kwargs.get("debug")) controller.timeout = controller_timeout controller.sleep_time = sleep_time error = controller_function(minion_process, controller, **kwargs) except BaseException: error = 1 traceback.print_exc() minion_process.join() # return whether either the parent or minion failed return error or minion_process.exitcode
def fg(self): self.horizontal_line("fg") with log.ignore_signal(signal.SIGTTOU): os.tcsetpgrp(self.controller_fd, os.getpgid(self.pid)) time.sleep(self.sleep_time)
def launch(self, launch_cmd, get_ip=True, qemuparams=None, extra_bootparams=None, env=None): # use logfile to determine the recipe-sysroot-native path and # then add in the site-packages path components and add that # to the python sys.path so qmp.py can be found. python_path = os.path.dirname(os.path.dirname(self.logfile)) python_path += "/recipe-sysroot-native/usr/lib/python3.9/site-packages" sys.path.append(python_path) importlib.invalidate_caches() try: qmp = importlib.import_module("qmp") except: self.logger.error( "qemurunner: qmp.py missing, please ensure it's installed") return False # Path relative to tmpdir used as cwd for qemu below to avoid unix socket path length issues qmp_file = "." + next(tempfile._get_candidate_names()) qmp_param = ' -S -qmp unix:./%s,server,wait' % (qmp_file) qmp_port = self.tmpdir + "/" + qmp_file # Create a second socket connection for debugging use, # note this will NOT cause qemu to block waiting for the connection qmp_file2 = "." + next(tempfile._get_candidate_names()) qmp_param += ' -qmp unix:./%s,server,nowait' % (qmp_file2) qmp_port2 = self.tmpdir + "/" + qmp_file2 self.logger.info("QMP Available for connection at %s" % (qmp_port2)) try: if self.serial_ports >= 2: self.threadsock, threadport = self.create_socket() self.server_socket, self.serverport = self.create_socket() except socket.error as msg: self.logger.error("Failed to create listening socket: %s" % msg[1]) return False bootparams = ' printk.time=1' if extra_bootparams: bootparams = bootparams + ' ' + extra_bootparams # Ask QEMU to store the QEMU process PID in file, this way we don't have to parse running processes # and analyze descendents in order to determine it. if os.path.exists(self.qemu_pidfile): os.remove(self.qemu_pidfile) self.qemuparams = 'bootparams="{0}" qemuparams="-pidfile {1} {2}"'.format( bootparams, self.qemu_pidfile, qmp_param) if qemuparams: self.qemuparams = self.qemuparams[:-1] + " " + qemuparams + " " + '\"' if self.serial_ports >= 2: launch_cmd += ' tcpserial=%s:%s %s' % (threadport, self.serverport, self.qemuparams) else: launch_cmd += ' tcpserial=%s %s' % (self.serverport, self.qemuparams) self.origchldhandler = signal.getsignal(signal.SIGCHLD) signal.signal(signal.SIGCHLD, self.handleSIGCHLD) self.logger.debug('launchcmd=%s' % (launch_cmd)) # FIXME: We pass in stdin=subprocess.PIPE here to work around stty # blocking at the end of the runqemu script when using this within # oe-selftest (this makes stty error out immediately). There ought # to be a proper fix but this will suffice for now. self.runqemu = subprocess.Popen(launch_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, stdin=subprocess.PIPE, preexec_fn=os.setpgrp, env=env, cwd=self.tmpdir) output = self.runqemu.stdout launch_time = time.time() # # We need the preexec_fn above so that all runqemu processes can easily be killed # (by killing their process group). This presents a problem if this controlling # process itself is killed however since those processes don't notice the death # of the parent and merrily continue on. # # Rather than hack runqemu to deal with this, we add something here instead. # Basically we fork off another process which holds an open pipe to the parent # and also is setpgrp. If/when the pipe sees EOF from the parent dieing, it kills # the process group. This is like pctrl's PDEATHSIG but for a process group # rather than a single process. # r, w = os.pipe() self.monitorpid = os.fork() if self.monitorpid: os.close(r) self.monitorpipe = os.fdopen(w, "w") else: # child process os.setpgrp() os.close(w) r = os.fdopen(r) x = r.read() os.killpg(os.getpgid(self.runqemu.pid), signal.SIGTERM) sys.exit(0) self.logger.debug("runqemu started, pid is %s" % self.runqemu.pid) self.logger.debug("waiting at most %s seconds for qemu pid (%s)" % (self.runqemutime, time.strftime("%D %H:%M:%S"))) endtime = time.time() + self.runqemutime while not self.is_alive() and time.time() < endtime: if self.runqemu.poll(): if self.runqemu_exited: self.logger.warning("runqemu during is_alive() test") return False if self.runqemu.returncode: # No point waiting any longer self.logger.warning('runqemu exited with code %d' % self.runqemu.returncode) self._dump_host() self.logger.warning("Output from runqemu:\n%s" % self.getOutput(output)) self.stop() return False time.sleep(0.5) if self.runqemu_exited: self.logger.warning("runqemu after timeout") if self.runqemu.returncode: self.logger.warning('runqemu exited with code %d' % self.runqemu.returncode) if not self.is_alive(): self.logger.error("Qemu pid didn't appear in %s seconds (%s)" % (self.runqemutime, time.strftime("%D %H:%M:%S"))) qemu_pid = None if os.path.isfile(self.qemu_pidfile): with open(self.qemu_pidfile, 'r') as f: qemu_pid = f.read().strip() self.logger.error( "Status information, poll status: %s, pidfile exists: %s, pidfile contents %s, proc pid exists %s" % (self.runqemu.poll(), os.path.isfile(self.qemu_pidfile), str(qemu_pid), os.path.exists("/proc/" + str(qemu_pid)))) # Dump all processes to help us to figure out what is going on... ps = subprocess.Popen( ['ps', 'axww', '-o', 'pid,ppid,pri,ni,command '], stdout=subprocess.PIPE).communicate()[0] processes = ps.decode("utf-8") self.logger.debug("Running processes:\n%s" % processes) self._dump_host() op = self.getOutput(output) self.stop() if op: self.logger.error("Output from runqemu:\n%s" % op) else: self.logger.error("No output from runqemu.\n") return False # Create the client socket for the QEMU Monitor Control Socket # This will allow us to read status from Qemu if the the process # is still alive self.logger.debug("QMP Initializing to %s" % (qmp_port)) # chdir dance for path length issues with unix sockets origpath = os.getcwd() try: os.chdir(os.path.dirname(qmp_port)) try: self.qmp = qmp.QEMUMonitorProtocol(os.path.basename(qmp_port)) except OSError as msg: self.logger.warning( "Failed to initialize qemu monitor socket: %s File: %s" % (msg, msg.filename)) return False self.logger.debug("QMP Connecting to %s" % (qmp_port)) if not os.path.exists(qmp_port) and self.is_alive(): self.logger.debug( "QMP Port does not exist waiting for it to be created") endtime = time.time() + self.runqemutime while not os.path.exists(qmp_port) and self.is_alive( ) and time.time() < endtime: self.logger.info("QMP port does not exist yet!") time.sleep(0.5) if not os.path.exists(qmp_port) and self.is_alive(): self.logger.warning( "QMP Port still does not exist but QEMU is alive") return False try: self.qmp.connect() connect_time = time.time() self.logger.info( "QMP connected to QEMU at %s and took %s seconds" % (time.strftime("%D %H:%M:%S"), time.time() - launch_time)) except OSError as msg: self.logger.warning( "Failed to connect qemu monitor socket: %s File: %s" % (msg, msg.filename)) return False except qmp.QMPConnectError as msg: self.logger.warning( "Failed to communicate with qemu monitor: %s" % (msg)) return False finally: os.chdir(origpath) # We worry that mmap'd libraries may cause page faults which hang the qemu VM for periods # causing failures. Before we "start" qemu, read through it's mapped files to try and # ensure we don't hit page faults later mapdir = "/proc/" + str(self.qemupid) + "/map_files/" try: for f in os.listdir(mapdir): linktarget = os.readlink(os.path.join(mapdir, f)) if not linktarget.startswith("/") or linktarget.startswith( "/dev") or "deleted" in linktarget: continue with open(linktarget, "rb") as readf: data = True while data: data = readf.read(4096) # Centos7 doesn't allow us to read /map_files/ except PermissionError: pass # Release the qemu process to continue running self.run_monitor('cont') self.logger.info( "QMP released QEMU at %s and took %s seconds from connect" % (time.strftime("%D %H:%M:%S"), time.time() - connect_time)) # We are alive: qemu is running out = self.getOutput(output) netconf = False # network configuration is not required by default self.logger.debug( "qemu started in %s seconds - qemu procces pid is %s (%s)" % (time.time() - (endtime - self.runqemutime), self.qemupid, time.strftime("%D %H:%M:%S"))) cmdline = '' if get_ip: with open('/proc/%s/cmdline' % self.qemupid) as p: cmdline = p.read() # It is needed to sanitize the data received # because is possible to have control characters cmdline = re_control_char.sub(' ', cmdline) try: if self.use_slirp: tcp_ports = cmdline.split("hostfwd=tcp::")[1] host_port = tcp_ports[:tcp_ports.find('-')] self.ip = "localhost:%s" % host_port else: ips = re.findall(r"((?:[0-9]{1,3}\.){3}[0-9]{1,3})", cmdline.split("ip=")[1]) self.ip = ips[0] self.server_ip = ips[1] self.logger.debug("qemu cmdline used:\n{}".format(cmdline)) except (IndexError, ValueError): # Try to get network configuration from runqemu output match = re.match( r'.*Network configuration: (?:ip=)*([0-9.]+)::([0-9.]+):([0-9.]+)$.*', out, re.MULTILINE | re.DOTALL) if match: self.ip, self.server_ip, self.netmask = match.groups() # network configuration is required as we couldn't get it # from the runqemu command line, so qemu doesn't run kernel # and guest networking is not configured netconf = True else: self.logger.error( "Couldn't get ip from qemu command line and runqemu output! " "Here is the qemu command line used:\n%s\n" "and output from runqemu:\n%s" % (cmdline, out)) self._dump_host() self.stop() return False self.logger.debug("Target IP: %s" % self.ip) self.logger.debug("Server IP: %s" % self.server_ip) if self.serial_ports >= 2: self.thread = LoggingThread(self.log, self.threadsock, self.logger) self.thread.start() if not self.thread.connection_established.wait(self.boottime): self.logger.error( "Didn't receive a console connection from qemu. " "Here is the qemu command line used:\n%s\nand " "output from runqemu:\n%s" % (cmdline, out)) self.stop_thread() return False self.logger.debug("Output from runqemu:\n%s", out) self.logger.debug("Waiting at most %d seconds for login banner (%s)" % (self.boottime, time.strftime("%D %H:%M:%S"))) endtime = time.time() + self.boottime socklist = [self.server_socket] reachedlogin = False stopread = False qemusock = None bootlog = b'' data = b'' while time.time() < endtime and not stopread: try: sread, swrite, serror = select.select(socklist, [], [], 5) except InterruptedError: continue for sock in sread: if sock is self.server_socket: qemusock, addr = self.server_socket.accept() qemusock.setblocking(0) socklist.append(qemusock) socklist.remove(self.server_socket) self.logger.debug("Connection from %s:%s" % addr) else: data = data + sock.recv(1024) if data: bootlog += data if self.serial_ports < 2: # this socket has mixed console/kernel data, log it to logfile self.log(data) data = b'' if self.boot_patterns[ 'search_reached_prompt'] in bootlog: self.server_socket = qemusock stopread = True reachedlogin = True self.logger.debug( "Reached login banner in %s seconds (%s)" % (time.time() - (endtime - self.boottime), time.strftime("%D %H:%M:%S"))) else: # no need to check if reachedlogin unless we support multiple connections self.logger.debug( "QEMU socket disconnected before login banner reached. (%s)" % time.strftime("%D %H:%M:%S")) socklist.remove(sock) sock.close() stopread = True if not reachedlogin: if time.time() >= endtime: self.logger.warning( "Target didn't reach login banner in %d seconds (%s)" % (self.boottime, time.strftime("%D %H:%M:%S"))) tail = lambda l: "\n".join(l.splitlines()[-25:]) bootlog = bootlog.decode("utf-8") # in case bootlog is empty, use tail qemu log store at self.msg lines = tail(bootlog if bootlog else self.msg) self.logger.warning("Last 25 lines of text:\n%s" % lines) self.logger.warning("Check full boot log: %s" % self.logfile) self._dump_host() self.stop() return False # If we are not able to login the tests can continue try: (status, output) = self.run_serial(self.boot_patterns['send_login_user'], raw=True, timeout=120) if re.search(self.boot_patterns['search_login_succeeded'], output): self.logged = True self.logger.debug("Logged as root in serial console") if netconf: # configure guest networking cmd = "ifconfig eth0 %s netmask %s up\n" % (self.ip, self.netmask) output = self.run_serial(cmd, raw=True)[1] if re.search(r"root@[a-zA-Z0-9\-]+:~#", output): self.logger.debug("configured ip address %s", self.ip) else: self.logger.debug( "Couldn't configure guest networking") else: self.logger.warning("Couldn't login into serial console" " as root using blank password") self.logger.warning("The output:\n%s" % output) except: self.logger.warning("Serial console failed while trying to login") return True
import sys # See docs for the sys module: https://docs.python.org/3.7/library/sys.html # Print out the command line arguments in sys.argv, one per line: # YOUR CODE HERE for i in sys.argv: print(i) print("done printing") # Print out the OS platform you're using: # YOUR CODE HERE # Print out the version of Python you're using: # YOUR CODE HERE print(sys.platform) import os # See the docs for the OS module: https://docs.python.org/3.7/library/os.html # Print the current process ID # YOUR CODE HERE print(os.getpgid(0)) # Print the current working directory (cwd): # YOUR CODE HERE print(os.getcwd()) # Print out your machine's login name # YOUR CODE HERE print(os.getlogin())
# READ`Instructions` file. You can access it by clicking on the 'Files' icon on the left side/column of the screen (i.e. the top most icon, above Packages and Settings) and then clicking on the 'Instructions' file to show the contents # This is the main.py script. It's the only script that you can run on this REPL platform via the run button above. This script takes care of displaying the output of stock price feed script you need to change in the `jpm_module_1` folder. # YOU SHOULD NOT CHANGE ANYTHING HERE AS IT ONLY RUNS THE STOCK PRICE FEED SCRIPT AND SHOWS THE OUTPUT # IF YOU WISH TO DO THE BONUS TASK DESCRIBED IN THE INSRUCTIONS FILE, UNCOMMENT THE CODE BELOW import os import subprocess import time import signal os.chdir(os.getcwd()+'/jpm_module_1') process = subprocess.Popen(['python', 'server3.py'], cwd=os.getcwd(), preexec_fn=os.setsid) time.sleep(.300) process2 = subprocess.Popen(['python', 'client3.py'], cwd=os.getcwd(), preexec_fn=os.setsid) process2.wait() os.killpg(os.getpgid(process.pid), signal.SIGTERM) # FOR BONUS TASK # IF YOU WANT TO DO IT THEN UNCOMMENT THE CODE BELOW # Comments are anything that's preceded with '#' # TO UNCOMMENT JUST REMOVE THE '#' #print("UNIT TEST RESULTS BELOW...") #process2 = subprocess.Popen(['python', #'client_test.py'], cwd=os.getcwd(), #preexec_fn=os.setsid) #process2.wait()
continue id, url, duration = feedback c.execute('COMMIT') c.execute('UPDATE urlstorage_feedback SET visited = 1 WHERE id = %s', (id, )) c.execute('COMMIT') if not url.startswith('http://') or url.startswith('https://'): print('Invalid scheme for URL {}'.format(url)) continue print('Processing URL {}'.format(url)) sessionid = login() p = Popen(['phantomjs', '/xss/visit.js', url, sessionid, str(duration)], preexec_fn=os.setsid) pgid = os.getpgid(p.pid) try: p.communicate() except: print('p.communicate failed') try: os.killpg(pgid, signal.SIGKILL) except: print('os.killpg failed') print('Done processing URL {}'.format(url))
def tearDown(self): os.killpg(os.getpgid(self.app.pid), signal.SIGINT)
#!/usr/bin/env python3 import atexit from signal import SIGTERM from os.path import dirname from subprocess import Popen from os import killpg, getpgid if __name__ != '__main__': pro = Popen( [dirname(__file__) + '/server/manage.py', 'runserver', '0.0.0.0:80']) atexit.register(lambda x: killpg(getpgid(x.pid), SIGTERM), pro)
def tearDown(self): self.driver.close() os.killpg(os.getpgid(self.app.pid), signal.SIGINT)
def on_kill(self): self.log.info("Sending SIGTERM signal to bash process group") os.killpg(os.getpgid(self.hook.sp.pid), signal.SIGTERM)
def proc_kill_on_timeout(done, timeout, proc): if not done.wait(timeout): os.killpg(os.getpgid(proc.pid), signal.SIGTERM)
def inner(*args): try: os.killpg(os.getpgid(process.pid), signal) except OSError: pass
has_camera = checks.get('enableCamera', False) if (route not in passive_routes) and (route not in forced_dashcam_routes) and has_camera: extra_socks.append("sendcan") if route not in passive_routes: extra_socks.append("pathPlan") recvd_socks = wait_for_sockets(tested_socks + extra_socks, timeout=30) failures = [s for s in tested_socks + extra_socks if s not in recvd_socks] print("Check if everything is running") running = manager.get_running() for p in tested_procs: if not running[p].is_alive: failures.append(p) manager.kill_managed_process(p) os.killpg(os.getpgid(unlogger.pid), signal.SIGTERM) sockets_ok = len(failures) == 0 params_ok = True try: car_params = car.CarParams.from_bytes(params.get("CarParams")) for k, v in checks.items(): if not v == getattr(car_params, k): params_ok = False failures.append(k) except Exception: params_ok = False if sockets_ok and params_ok: print("Success")
def toolTerminate(self): subprocess.check_call('./Post', shell=True) if not (self.proc is None): os.killpg(os.getpgid(self.proc.pid), signal.SIGKILL) print "Terminate tool!"
def _kill_pg(p): '''kill a process' processgroup''' os.killpg(os.getpgid(p.pid), signal.SIGKILL)
def cleanup(self): """Cleanup the scenario.""" if self.__server is not None: logging.getLogger(__name__).info('Killing server.') os.killpg(os.getpgid(self.__server.pid), signal.SIGTERM) self.__server = None
raise RuntimeError("EXPECTED TMS") arro.append( int( m.group(2), 16 ) ) vec=bytearray() ser(vec, [bits], 4) ser(vec, arri[0::2], byts) ser(vec, arri[1::2], byts) sd.send(bytearray('shift:','ascii')) sd.send(vec) tmp=bytearray() ser(tmp, arro, byts) got=sd.recv(2000) if ( got != tmp ): if (len(got) != len(tmp)): print("Length mismatch: got {} exp {}".format(len(got), len(tmp))) raise RuntimeError("TDO MISMATCH") m = None if __name__ == "__main__": (opts, args) = getopt.getopt(sys.argv[1:], "k") dokill = False for (o, a) in opts: if o == '-k': dokill = True; try: playfile('testData.txt') except: if dokill: os.kill( os.getpgid(0), 15 ) raise