def wait(self): """Loop waiting on children to die and respawning as necessary.""" LOG.debug(_('Full set of CONF:')) CONF.log_opt_values(LOG, std_logging.DEBUG) while True: self.handle_signal() self._respawn_children() if self.sigcaught: signame = _signo_to_signame(self.sigcaught) LOG.info(_('Caught %s, stopping children'), signame) if not _is_sighup(self.sigcaught): break for pid in self.children: os.kill(pid, signal.SIGHUP) self.running = True self.sigcaught = None for pid in self.children: try: os.kill(pid, signal.SIGTERM) except OSError as exc: if exc.errno != errno.ESRCH: raise # Wait for children to die if self.children: LOG.info(_('Waiting on %d children to exit'), len(self.children)) while self.children: self._wait_child()
def stop(self): """ Stop the daemon """ # Get the pid from the pidfile try: pf = file(self.pidfile, 'r') pid = int(pf.read().strip()) pf.close() except IOError: pid = None if not pid: message = "pidfile %s does not exist. Daemon not running?\n" sys.stderr.write(message % self.pidfile) return # not an error in a restart # Try killing the daemon process try: while 1: os.kill(pid, SIGTERM) time.sleep(0.1) except OSError, err: err = str(err) if err.find("No such process") > 0: if os.path.exists(self.pidfile): os.remove(self.pidfile) else: print str(err) sys.exit(1)
def test_abnormal_typechecker_exit_message(self): """ Tests that the monitor outputs a useful message when its typechecker exits abnormally. """ self.write_load_config() # Start a fresh server and monitor. launch_logs = self.check_cmd(['No errors!']) self.assertIn('Server launched with the following command', launch_logs) self.assertIn('Logs will go to', launch_logs) log_file_pattern = re.compile('Logs will go to (.*)') monitor_log_match = log_file_pattern.search(launch_logs) self.assertIsNotNone(monitor_log_match) monitor_log_path = monitor_log_match.group(1) self.assertIsNotNone(monitor_log_path) with open(monitor_log_path) as f: monitor_logs = f.read() m = re.search( 'Just started typechecker server with pid: ([0-9]+)', monitor_logs) self.assertIsNotNone(m) pid = m.group(1) self.assertIsNotNone(pid) os.kill(int(pid), signal.SIGTERM) # For some reason, waitpid in the monitor after the kill signal # sent above doesn't preserve ordering - maybe because they're # in separate processes? Give it some time. time.sleep(1) client_error = self.check_cmd(['No errors!']) self.assertIn('Last server killed by signal', client_error)
def signal(self, signum): """ signal process """ try: os.kill(self.pid, signum) except: pass
def wait_for_active_job(signal_to_send=None): """ Wait for the active job to finish, to be killed by SIGINT, or to be suspended by ctrl-z. """ _clear_dead_jobs() act = builtins.__xonsh_active_job__ if act is None: return job = builtins.__xonsh_all_jobs__[act] obj = job["obj"] if job["bg"]: return pgrp = job["pgrp"] obj.done = False # give the terminal over to the fg process _give_terminal_to(pgrp) # if necessary, send the specified signal to this process # (this hook was added because vim, emacs, etc, seem to need to have # the terminal when they receive SIGCONT from the "fg" command) if signal_to_send is not None: os.kill(obj.pid, signal_to_send) _, s = os.waitpid(obj.pid, os.WUNTRACED) if os.WIFSTOPPED(s): obj.done = True job["bg"] = True job["status"] = "stopped" print() # get a newline because ^Z will have been printed print_one_job(act) elif os.WIFSIGNALED(s): print() # get a newline because ^C will have been printed if obj.poll() is not None: builtins.__xonsh_active_job__ = None _give_terminal_to(_shell_pgrp) # give terminal back to the shell
def close(self): try: os.kill(self.pid, signal.SIGKILL) except OSError as e: self._log.warning( "Close called more than once or process ended unexpectedly") self._log.warning(e)
def check_pid(pid): try: os.kill(pid, 0) except OSError: return False else: return True
def test_signal_handler_unsubscribe(self): self._require_signal_and_kill("SIGTERM") # Although Windows has `os.kill` and SIGTERM is defined, the # platform does not implement signals and sending SIGTERM # will result in a forced termination of the process. # Therefore, this test is not suitable for Windows. if os.name == "nt": self.skip("SIGTERM not available") # Spawn a normal, undaemonized process. p = helper.CPProcess(ssl=(self.scheme.lower() == "https")) p.write_conf( extra="""unsubsig: True test_case_name: "test_signal_handler_unsubscribe" """ ) p.start(imports="cherrypy.test._test_states_demo") # Ask the process to quit os.kill(p.get_pid(), signal.SIGTERM) # This might hang if things aren't working right, but meh. p.join() # Assert the old handler ran. target_line = open(p.error_log, "rb").readlines()[-10] if not ntob("I am an old SIGTERM handler.") in target_line: self.fail("Old SIGTERM handler did not run.\n%r" % target_line)
def stop_integration_server(server, modules): server.kill() # dev_appserver.py itself. # The new dev appserver starts a _python_runtime.py process that isn't # captured by start_integration_server and so doesn't get killed. Until it's # done, our tests will never complete so we kill it manually. (stdout, unused_stderr) = subprocess.Popen( ['pgrep', '-f', '_python_runtime.py'], stdout=subprocess.PIPE ).communicate() # If tests are killed partway through, runtimes can build up; send kill # signals to all of them, JIC. pids = [int(pid.strip()) for pid in stdout.split('\n') if pid.strip()] for pid in pids: os.kill(pid, signal.SIGKILL) if modules: fp = open( os.path.join(os.path.dirname(__file__), '..', 'custom.yaml'), 'w') fp.writelines([ '# Add configuration for your application here to avoid\n' '# potential merge conflicts with new releases of the main\n' '# app.yaml file. Modules registered here should support the\n' '# standard CourseBuilder module config. (Specifically, the\n' '# imported Python module should provide a method\n' '# "register_module()", taking no parameters and returning a\n' '# models.custom_modules.Module instance.\n' '#\n' 'env_variables:\n' '# GCB_REGISTERED_MODULES_CUSTOM:\n' '# modules.my_extension_module\n' '# my_extension.modules.widgets\n' '# my_extension.modules.blivets\n' ]) fp.close()
def stop_all_httpd(self): if not self._httpd_pids: return for pid in self._httpd_pids: os.kill(pid, signal.SIGKILL) logger.info('httpd killed. PID: {0}'.format(pid)) os.wait()
def test_SIGHUP_daemonized(self): # When daemonized, SIGHUP should restart the server. try: from signal import SIGHUP except ImportError: return self.skip("skipped (no SIGHUP) ") if os.name not in ["posix"]: return self.skip("skipped (not on posix) ") # Spawn the process and wait, when this returns, the original process # is finished. If it daemonized properly, we should still be able # to access pages. p = helper.CPProcess(ssl=(self.scheme.lower() == "https"), wait=True, daemonize=True) p.write_conf(extra='test_case_name: "test_SIGHUP_daemonized"') p.start(imports="cherrypy.test._test_states_demo") pid = p.get_pid() try: # Send a SIGHUP os.kill(pid, SIGHUP) # Give the server some time to restart time.sleep(2) self.getPage("/pid") self.assertStatus(200) new_pid = int(self.body) self.assertNotEqual(new_pid, pid) finally: # Shut down the spawned process self.getPage("/exit") p.join()
def self_destruct(signum, frame): try: import traceback log.error("Self-destructing...") log.error(traceback.format_exc()) finally: os.kill(os.getpid(), signal.SIGKILL)
def test(result): pid = os.getpid() os.kill(pid, signal.SIGINT) result.breakCaught = True self.assertTrue(result.shouldStop) os.kill(pid, signal.SIGINT) self.fail("Second KeyboardInterrupt not raised")
def reap_children(self): reaped = 0 # Reap dead children. for child in self.children.keys(): try: # Check if child is alive. os.kill(child, 0) except OSError: # Not alive - remove from children list. (ip, port, _, standby_time, disposable) = self.children[child] del self.children[child] reaped += 1 # If reconnect and exclusive is on, then # we add this connection to the standby list. # NOTE: At this point, you only get on the standby # list if the IP is exclusive and with reconnect. # This means that it will *not* get selected again # and the only necessary means of removing the IP # is through the clear_standby() hook. if standby_time: self.standby[(ip, port)] = \ (time.time() + standby_time, disposable) else: if disposable: self.discard_notify(ip) else: self.locks.remove("%s:%d" % (ip, port)) # Return the number of children reaped. # This means that callers can do if self.reap_children(). return reaped
def wait(self): """Loop waiting on children to die and respawning as necessary.""" systemd.notify_once() LOG.debug('Full set of CONF:') CONF.log_opt_values(LOG, logging.DEBUG) try: while True: self.handle_signal() self._respawn_children() # No signal means that stop was called. Don't clean up here. if not self.sigcaught: return signame = _signo_to_signame(self.sigcaught) LOG.info(_LI('Caught %s, stopping children'), signame) if not _is_sighup_and_daemon(self.sigcaught): break cfg.CONF.reload_config_files() for service in set( [wrap.service for wrap in self.children.values()]): service.reset() for pid in self.children: os.kill(pid, signal.SIGHUP) self.running = True self.sigcaught = None except eventlet.greenlet.GreenletExit: LOG.info(_LI("Wait called after thread killed. Cleaning up.")) self.stop()
def stop(self): """Stop the daemon.""" # Get the pid from the pidfile try: with open(self.pidfile, 'r') as pf: pid = int(pf.read().strip()) except IOError: pid = None if not pid: message = "pidfile {0} does not exist. " + \ "Daemon not running?\n" sys.stderr.write(message.format(self.pidfile)) return # not an error in a restart # Try killing the daemon process try: while 1: os.kill(pid, signal.SIGTERM) time.sleep(0.1) except OSError as err: e = str(err.args) if e.find("No such process") > 0: if os.path.exists(self.pidfile): os.remove(self.pidfile) else: print (str(err.args)) sys.exit(1)
def _send_signal(self, sig): try: os.kill(self.pid, sig) except OSError as e: return e.errno == errno.EPERM else: return True
def skipCurrentSong(self): players = Player.objects.all() for player in players: try: os.kill(player.Pid, SIGABRT) except OSError: player.delete()
def stop(self): """ Stop the daemon """ # Get the pid from the pidfile pid = self.get_pid() if not pid: message = "pidfile %s does not exist. Not running?\n" sys.stderr.write(message % self.pidfile) # Just to be sure. A ValueError might occur if the PID file is # empty but does actually exist if os.path.exists(self.pidfile): os.remove(self.pidfile) return # Not an error in a restart # Try killing the daemon process try: i = 0 while 1: os.kill(pid, signal.SIGTERM) time.sleep(0.1) i = i + 1 if i % 10 == 0: os.kill(pid, signal.SIGHUP) except OSError, err: err = str(err) if err.find("No such process") > 0: if os.path.exists(self.pidfile): os.remove(self.pidfile) else: print str(err) sys.exit(1)
def run(self): if self.delay: self.log.debug("delaying for %.3f secs: '%s'" % (self.delay, self.cmd)) time.sleep(self.delay) self.log.debug("program starting '%s'" % self.cmd) p = Popen3(self.cmd, True) if self.stdin: if p.poll() == -1: p.tochild.write(self.stdin) p.tochild.flush() p.tochild.close() #log.debug("wrote '%s' to child" % self.stdin) else: self.log.error("child exited before stdin was written to") done = False while not done and self.killtime > 0: time.sleep(0.2) if p.poll() != -1: done = True self.killtime -= 0.2 if not done and self.killsig != -1: try: os.kill(p.pid, self.killsig) self.killed = True except OSError, e: self.log.exception("problem killing") self.exception = e return
def test_unexisting(self): watcher = self.arbiter.get_watcher("test") to_kill = [] self.assertEquals(len(watcher.processes), 2) for process in watcher.processes.values(): to_kill.append(process.pid) # the process is killed in an unsual way os.kill(process.pid, signal.SIGSEGV) # and wait for it to die try: pid, status = os.waitpid(process.pid, 0) except OSError: pass # ansure the old process is considered "unexisting" self.assertEquals(process.status, UNEXISTING) # this should clean up and create a new process watcher.reap_and_manage_processes() # we should have a new process here now self.assertEquals(len(watcher.processes), 2) for p in watcher.processes.values(): # and that one needs to have a new pid. self.assertFalse(p.pid in to_kill) # and should not be unexisting... self.assertNotEqual(p.status, UNEXISTING)
def _reap_pid(self): if self.pid: # Make sure all processes are stopped os.kill(self.pid, signal.SIGTERM) # Make sure we reap our test process self._reap_test()
def execute(self, context, args): signum = signal.SIGTERM sigidx = -1 for i,arg in enumerate(args): if not arg.startswith('-'): continue optval = arg[1:] if optval in _sigsym_to_value: signum = _sigsym_to_value[optval] sigidx = i break else: optnum = int(optval) if optnum in _sigvalue_to_sym: signum = optnum sigidx = i break else: raise ValueError("Invalid signal number: %d", optnum) if sigidx >= 0: del args[sigidx] for arg in map(int, args): os.kill(arg, signum) if context.input is not None: for arg in context.input: os.kill(arg.pid, signum) return []
def blocking_umount(mountpoint): '''Invoke fusermount and wait for daemon to terminate.''' with open('/dev/null', 'wb') as devnull: if subprocess.call(['fuser', '-m', mountpoint], stdout=devnull, stderr=devnull) == 0: raise MountInUseError(mountpoint) ctrlfile = os.path.join(mountpoint, CTRL_NAME) log.debug('Flushing cache...') llfuse.setxattr(ctrlfile, 's3ql_flushcache!', b'dummy') # Get pid log.debug('Trying to get pid') pid = parse_literal(llfuse.getxattr(ctrlfile, 's3ql_pid?'), int) log.debug('PID is %d', pid) # Get command line to make race conditions less-likely cmdline = get_cmdline(pid) # Unmount log.debug('Unmounting...') if os.getuid() == 0 or platform.system() == 'Darwin': # MacOS X always uses umount rather than fusermount umount_cmd = ['umount', mountpoint] else: umount_cmd = ['fusermount', '-u', mountpoint] if subprocess.call(umount_cmd) != 0: raise UmountSubError(mountpoint) # Wait for daemon log.debug('Uploading metadata...') step = 0.1 while True: try: os.kill(pid, 0) except OSError: log.debug('Kill failed, assuming daemon has quit.') break # Check that the process did not terminate and the PID # was reused by a different process cmdline2 = get_cmdline(pid) if cmdline2 is None: log.debug('Reading cmdline failed, assuming daemon has quit.') break elif cmdline2 == cmdline: log.debug('PID still alive and commandline unchanged.') else: log.debug('PID still alive, but cmdline changed') break # Process still exists, we wait log.debug('Daemon seems to be alive, waiting...') time.sleep(step) if step < 1: step += 0.1
def ping_multilaunch(port, stop_event): """ A single manager to ping all launches during multiprocess launches Args: port (int): Listening port number of the DataServer stop_event (Thread.Event): stop event """ ds = DataServer(address=('127.0.0.1', port), authkey=DS_PASSWORD) ds.connect() fd = FWData() lp = ds.LaunchPad() while not stop_event.is_set(): for pid, lid in fd.Running_IDs.items(): if lid: try: os.kill(pid, 0) # throws OSError if the process is dead lp.ping_launch(lid) except OSError: fd.Running_IDs[pid] = None fd.FiringState[pid] = False pass # means this process is dead! stop_event.wait(PING_TIME_SECS)
def kill_process(self): """Stop the process""" if os.name != 'nt': os.kill(self.process.pid, 9) else: import ctypes ctypes.windll.kernel32.TerminateProcess(int(self.process._handle), -1)
def verify_process_alive(proc): if proc.exitcode is None and proc in waiting_to_start: assert proc.outqR_fd in fileno_to_outq assert fileno_to_outq[proc.outqR_fd] is proc assert proc.outqR_fd in hub.readers error('Timed out waiting for UP message from %r', proc) os.kill(proc.pid, 9)
def killProcess(process): """Kill the given process :note: raises if kill is not supported by the os module""" if not hasattr(os, "kill"): raise NotImplementedError("os module does not support 'kill'ing of processes on your platform") os.kill(process.pid, signal.SIGKILL)
def run(self): class Alarm(Exception): pass def alarm_handler(signum, frame): raise Alarm try: self.process = Popen(self.args, shell=True, stdout=PIPE, stderr=PIPE) if self.timeout != -1: signal(SIGALRM, alarm_handler) alarm(self.timeout) try: self.stdout, self.stderr = self.process.communicate() if self.timeout != -1: alarm(0) except Alarm: os.kill(self.process.pid, SIGKILL) raise CloudRuntimeException("Timeout during command execution") self.success = self.process.returncode == 0 except: raise CloudRuntimeException(formatExceptionInfo()) if not self.success: logging.debug("Failed to execute:" + self.getErrMsg())
def handleAction(self): """Handle extra argument for backwards-compatibility. * C{start} will simply do minimal pid checking and otherwise let twistd take over. * C{stop} will kill an existing running process if it matches the C{pidfile} contents. * C{status} will simply report if the process is up or not. """ action = self["action"] pidfile = self.parent["pidfile"] program = settings["program"] instance = self["instance"] if action == "stop": if not exists(pidfile): print "Pidfile %s does not exist" % pidfile raise SystemExit(0) pf = open(pidfile, 'r') try: pid = int(pf.read().strip()) pf.close() except: print "Could not read pidfile %s" % pidfile raise SystemExit(1) print "Sending kill signal to pid %d" % pid try: os.kill(pid, 15) except OSError, e: if e.errno == errno.ESRCH: print "No process with pid %d running" % pid else: raise raise SystemExit(0)
print "AccessListPolicerDelete RPC Passed" res.append("AccessListPolicerDelete RPC Passed and returned %s" % (poldelresult)) else: print "AccessListPolicerDelete RPC Failed" res.append("AccessListPolicerDelete RPC Failed and returned %s" % (poldelresult)) flag += 1 pause() print "FINAL RESULT : \n" for i in res: print i if flag > 0: print "TEST FAILED" else: print "TEST PASSED" except AbortionError as e: print "code is ", e.code print "details is ", e.details # except Exception as tx: # print ("Caught Exception {0}\n".format(tx)) # except Exception as tx: # print '%s' % (tx.message) while True: import signal os.kill(os.getpid(), signal.SIGTERM)
def send_signal(self, sig): os.kill(self.pid, sig)
def cancelBtn_fun(): msg = messagebox.askyesno("Exit Conformation", "Are You Sure You To Exit ?") if msg == True: pid = os.getpid() os.kill(pid, signal.SIGTERM)
def check_kill_process(pstring): for line in os.popen("ps ax | grep " + pstring + " | grep -v grep"): fields = line.split() pid = fields[0] os.kill(int(pid), signal.SIGKILL)
def KillProcessWithID(pid): if platform.system() == 'Windows': os.popen('taskkill /T /F /PID %d' % pid) else: os.kill(pid, signal.SIGTERM)
def send_signal(self, signum, ignore_ESRCH=False): try: os.kill(self.pid, signum) except OSError as e: if not ignore_ESRCH or e.errno != errno.ESRCH: raise
def handleAction(self): """Handle extra argument for backwards-compatibility. * C{start} will simply do minimal pid checking and otherwise let twistd take over. * C{stop} will kill an existing running process if it matches the C{pidfile} contents. * C{status} will simply report if the process is up or not. """ action = self["action"] pidfile = self.parent["pidfile"] program = settings["program"] instance = self["instance"] if action == "stop": if not exists(pidfile): print("Pidfile %s does not exist" % pidfile) raise SystemExit(0) pf = open(pidfile, 'r') try: pid = int(pf.read().strip()) pf.close() except ValueError: print("Failed to parse pid from pidfile %s" % pidfile) pf.close() try: print("removing corrupted pidfile %s" % pidfile) os.unlink(pidfile) except IOError: print("Could not remove pidfile %s" % pidfile) raise SystemExit(1) except IOError: print("Could not read pidfile %s" % pidfile) raise SystemExit(1) print("Sending kill signal to pid %d" % pid) try: os.kill(pid, 15) except OSError as e: if e.errno == errno.ESRCH: print("No process with pid %d running" % pid) else: raise raise SystemExit(0) elif action == "status": if not exists(pidfile): print("%s (instance %s) is not running" % (program, instance)) raise SystemExit(1) pf = open(pidfile, "r") try: pid = int(pf.read().strip()) pf.close() except ValueError: print("Failed to parse pid from pidfile %s" % pidfile) pf.close() try: print("removing corrupted pidfile %s" % pidfile) os.unlink(pidfile) except IOError: print("Could not remove pidfile %s" % pidfile) raise SystemExit(1) except IOError: print("Failed to read pid from %s" % pidfile) raise SystemExit(1) if _process_alive(pid): print("%s (instance %s) is running with pid %d" % (program, instance, pid)) raise SystemExit(0) else: print("%s (instance %s) is not running" % (program, instance)) raise SystemExit(1) elif action == "start": if exists(pidfile): pf = open(pidfile, 'r') try: pid = int(pf.read().strip()) pf.close() except ValueError: print("Failed to parse pid from pidfile %s" % pidfile) pf.close() try: print("removing corrupted pidfile %s" % pidfile) os.unlink(pidfile) except IOError: print("Could not remove pidfile %s" % pidfile) raise SystemExit(1) except IOError: print("Could not read pidfile %s" % pidfile) raise SystemExit(1) if _process_alive(pid): print("%s (instance %s) is already running with pid %d" % (program, instance, pid)) raise SystemExit(1) else: print("Removing stale pidfile %s" % pidfile) try: os.unlink(pidfile) except IOError: print("Could not remove pidfile %s" % pidfile) # Try to create the PID directory else: if not os.path.exists(settings["PID_DIR"]): try: os.makedirs(settings["PID_DIR"]) except OSError as exc: # Python >2.5 if exc.errno == errno.EEXIST and os.path.isdir(settings["PID_DIR"]): pass else: raise print("Starting %s (instance %s)" % (program, instance)) else: print("Invalid action '%s'" % action) print("Valid actions: start stop status") raise SystemExit(1)
def cleanup(): print('Killing server...') os.kill(pid, signal.SIGKILL)
def _kill_svnserve(self): os.kill(self._svnserve.pid, signal.SIGINT) self._svnserve.communicate()
def interrupt(): time.sleep(0.5) os.kill(os.getpid(), signal.SIGINT)
def stop(self): if(self.process.pid is None): pid = self.getPid(self.process) else: # Jython bug #2221 workaround pid = self.process.pid os.kill(pid, signal.SIGKILL)
def main(): parser = argparse.ArgumentParser() parser.add_argument('input', type=str, nargs='+', help='Source file or directory.') parser.add_argument('output', type=str, help='Destination directory.') parser.add_argument('-W', '--width', type=int, default=320, help='width of output image (default:320)') parser.add_argument('-H', '--height', type=int, default=320, help='height of output image (default:320)') parser.add_argument('-m', '--mode', default='trimming', choices=['trimming', 'padding'], help='shaping mode (trimming or padding) (default:trimming)') parser.add_argument('-S', '--shuffle', choices=['True', 'False'], help='shuffle mode if not specified, train:True, val:False.' + ' Otherwise specified value will be used for both.') parser.add_argument('-N', '--file-cache-size', type=int, default=100, help='num of data in cache file (default:100)') parser.add_argument('-C', '--cache-type', default='npy', choices=['h5', 'npy'], help='cache format (h5 or npy) (default:npy)') parser.add_argument('--thinning', type=int, default=1, help='Thinning rate') args = parser.parse_args() ############################################################################ # Analyze tar # If it consists only of members corresponding to regular expression # 'n[0-9]{8}\.tar', it is judged as train data archive. # If it consists only of members corresponding to regular expression # 'ILSVRC2012_val_[0-9]{8}\.JPEG', it is judged as validation data archive. archives = {'train': None, 'val': None} for inputarg in args.input: print('Checking input file [{}]'.format(inputarg)) archive = tarfile.open(inputarg) is_train = False is_val = False names = [] for name in archive.getnames(): if re.match(r'n[0-9]{8}\.tar', name): if is_val: print('Train data {} includes in validation tar'.format(name)) exit(-1) is_train = True elif re.match(r'ILSVRC2012_val_[0-9]{8}\.JPEG', name): if is_train: print('Validation data {} includes in train tar'.format(name)) exit(-1) is_val = True else: print('Invalid member {} includes in tar file'.format(name)) exit(-1) names.append(name) if is_train: if archives['train'] is None: archives['train'] = (archive, names) else: print('Please specify only 1 training tar archive.') exit(-1) if is_val: if archives['val'] is None: archives['val'] = (archive, names) else: print('Please specify only 1 validation tar archive.') exit(-1) # Read label of validation data, (Use ascending label of wordnet_id) validation_ground_truth = [] g_file = VALIDATION_DATA_LABEL with open(g_file, 'r') as f: for l in f.readlines(): validation_ground_truth.append(int(l.rstrip())) ############################################################################ # Prepare logging tmpdir = tempfile.mkdtemp() logfilename = os.path.join(tmpdir, 'nnabla.log') # Temporarily chdir to tmpdir just before importing nnabla to reflect nnabla.conf. cwd = os.getcwd() os.chdir(tmpdir) with open('nnabla.conf', 'w') as f: f.write('[LOG]\n') f.write('log_file_name = {}\n'.format(logfilename)) f.write('log_file_format = %(funcName)s : %(message)s\n') f.write('log_console_level = CRITICAL\n') from nnabla.config import nnabla_config os.chdir(cwd) ############################################################################ # Data iterator setting nnabla_config.set('DATA_ITERATOR', 'cache_file_format', '.' + args.cache_type) nnabla_config.set('DATA_ITERATOR', 'data_source_file_cache_size', str(args.file_cache_size)) nnabla_config.set('DATA_ITERATOR', 'data_source_file_cache_num_of_threads', '1') if not os.path.isdir(args.output): os.makedirs(args.output) ############################################################################ # Prepare status monitor from nnabla.utils.progress import configure_progress configure_progress(None, _progress) ############################################################################ # Converter try: if archives['train'] is not None: from nnabla.logger import logger logger.info('StartCreatingCache') archive, names = archives['train'] output = os.path.join(args.output, 'train') if not os.path.isdir(output): os.makedirs(output) _create_train_cache(archive, output, names, args) if archives['val'] is not None: from nnabla.logger import logger logger.info('StartCreatingCache') archive, names = archives['val'] output = os.path.join(args.output, 'val') if not os.path.isdir(output): os.makedirs(output) _create_validation_cache( archive, output, names, validation_ground_truth, args) except KeyboardInterrupt: shutil.rmtree(tmpdir, ignore_errors=True) # Even if CTRL-C is pressed, it does not stop if there is a running # thread, so it sending a signal to itself. os.kill(os.getpid(), 9) ############################################################################ # Finish _finish = True shutil.rmtree(tmpdir, ignore_errors=True)
def error_listener(self): """ error listener """ (rank, original_trace) = self.error_queue.get() self.error_queue.put((rank, original_trace)) os.kill(os.getpid(), signal.SIGUSR1)
def kill_(): lock = Lock(LOCKFILE) try: kill(lock.get_pid(), signal.SIGQUIT) except Exception, e: pass
def _kill_process_type(self, process_type, allow_graceful=False, check_alive=True, wait=False): """Kill a process of a given type. If the process type is PROCESS_TYPE_REDIS_SERVER, then we will kill all of the Redis servers. If the process was started in valgrind, then we will raise an exception if the process has a non-zero exit code. Args: process_type: The type of the process to kill. allow_graceful (bool): Send a SIGTERM first and give the process time to exit gracefully. If that doesn't work, then use SIGKILL. We usually want to do this outside of tests. check_alive (bool): If true, then we expect the process to be alive and will raise an exception if the process is already dead. wait (bool): If true, then this method will not return until the process in question has exited. Raises: This process raises an exception in the following cases: 1. The process had already died and check_alive is true. 2. The process had been started in valgrind and had a non-zero exit code. """ process_infos = self.all_processes[process_type] if process_type != ray_constants.PROCESS_TYPE_REDIS_SERVER: assert len(process_infos) == 1 for process_info in process_infos: process = process_info.process # Handle the case where the process has already exited. if process.poll() is not None: if check_alive: raise RuntimeError( "Attempting to kill a process of type " "'{}', but this process is already dead." .format(process_type)) else: continue if process_info.use_valgrind: process.terminate() process.wait() if process.returncode != 0: message = ("Valgrind detected some errors in process of " "type {}. Error code {}.".format( process_type, process.returncode)) if process_info.stdout_file is not None: with open(process_info.stdout_file, "r") as f: message += "\nPROCESS STDOUT:\n" + f.read() if process_info.stderr_file is not None: with open(process_info.stderr_file, "r") as f: message += "\nPROCESS STDERR:\n" + f.read() raise RuntimeError(message) continue if process_info.use_valgrind_profiler: # Give process signal to write profiler data. os.kill(process.pid, signal.SIGINT) # Wait for profiling data to be written. time.sleep(0.1) if allow_graceful: process.terminate() # Allow the process one second to exit gracefully. timeout_seconds = 1 try: process.wait(timeout_seconds) except subprocess.TimeoutExpired: pass # If the process did not exit, force kill it. if process.poll() is None: process.kill() # The reason we usually don't call process.wait() here is that # there's some chance we'd end up waiting a really long time. if wait: process.wait() del self.all_processes[process_type]
from concurrent import futures import os import signal with futures.ProcessPoolExecutor(max_workers=2) as ex: print('getting the pid for one worker') f1 = ex.submit(os.getpid) pid1 = f1.result() print('killing process {}'.format(pid1)) os.kill(pid1, signal.SIGHUP) print('submitting another task') f2 = ex.submit(os.getpid) try: pid2 = f2.result() except futures.process.BrokenProcessPool as e: print('could not start new tasks: {}'.format(e))
import PageAdvanced import PageNewConfig import PageHelp import PageStatus # Wizards 2.0 (TMP!) wizards2_path = os.path.realpath(__file__ + "/../wizards2") if os.path.exists(wizards2_path): import wizards2 # Init translation if CTK.cfg['admin!lang']: PageIndex.language_set(CTK.cfg.get_val('admin!lang')) # Let's get asyncronous.. CTK.set_synchronous(False) # Run forever CTK.run() # Kill lingering processes try: # Parent cherokee_admin_pid = os.getppid() os.kill(cherokee_admin_pid, signal.SIGTERM) # Itself os.killpg(0, signal.SIGTERM) except OSError: pass
def simulate(config, action, nodes, federated_only, geth): """ Simulate the nucypher blockchain network Arguments ========== action - Which action to perform; The choices are: - start: Start a multi-process nucypher network simulation - stop: Stop a running simulation gracefully Options ======== --nodes - The quantity of nodes (processes) to execute during the simulation --duration = The number of periods to run the simulation before termination """ if action == 'start': # # Blockchain Connection # if not federated_only: if geth: test_provider_uri = "ipc:///tmp/geth.ipc" else: test_provider_uri = "pyevm://tester" simulation_registry = TemporaryEthereumContractRegistry() simulation_interface = BlockchainDeployerInterface(provider_uri=test_provider_uri, registry=simulation_registry, compiler=SolidityCompiler()) blockchain = TesterBlockchain(interface=simulation_interface, test_accounts=nodes, airdrop=False) accounts = blockchain.interface.w3.eth.accounts origin, *everyone_else = accounts # Set the deployer address from the freshly created test account simulation_interface.deployer_address = origin # # Blockchain Action # blockchain.ether_airdrop(amount=DEVELOPMENT_ETH_AIRDROP_AMOUNT) click.confirm("Deploy all nucypher contracts to {}?".format(test_provider_uri), abort=True) click.echo("Bootstrapping simulated blockchain network") # Deploy contracts token_deployer = NucypherTokenDeployer(blockchain=blockchain, deployer_address=origin) token_deployer.arm() token_deployer.deploy() token_agent = token_deployer.make_agent() miners_escrow_secret = os.urandom(DISPATCHER_SECRET_LENGTH) miner_escrow_deployer = MinerEscrowDeployer(token_agent=token_agent, deployer_address=origin, secret_hash=miners_escrow_secret) miner_escrow_deployer.arm() miner_escrow_deployer.deploy() miner_agent = miner_escrow_deployer.make_agent() policy_manager_secret = os.urandom(DISPATCHER_SECRET_LENGTH) policy_manager_deployer = PolicyManagerDeployer(miner_agent=miner_agent, deployer_address=origin, secret_hash=policy_manager_secret) policy_manager_deployer.arm() policy_manager_deployer.deploy() policy_agent = policy_manager_deployer.make_agent() airdrop_amount = DEVELOPMENT_TOKEN_AIRDROP_AMOUNT click.echo("Airdropping tokens {} to {} addresses".format(airdrop_amount, len(everyone_else))) _receipts = token_airdrop(token_agent=token_agent, origin=origin, addresses=everyone_else, amount=airdrop_amount) # Commit the current state of deployment to a registry file. click.echo("Writing filesystem registry") _sim_registry_name = blockchain.interface.registry.commit(filepath=DEFAULT_SIMULATION_REGISTRY_FILEPATH) click.echo("Ready to run swarm.") # # Swarm # # Select a port range to use on localhost for sim servers if not federated_only: sim_addresses = everyone_else else: sim_addresses = NotImplemented start_port = 8787 counter = 0 for sim_port_number, sim_address in enumerate(sim_addresses, start=start_port): # # Parse ursula parameters # rest_port = sim_port_number db_name = 'sim-{}'.format(rest_port) cli_exec = os.path.join(BASE_DIR, 'cli', 'main.py') python_exec = 'python' proc_params = ''' python3 {} run_ursula --rest-port {} --db-name {} '''.format(python_exec, cli_exec, rest_port, db_name).split() if federated_only: proc_params.append('--federated-only') else: token_agent = NucypherTokenAgent(blockchain=blockchain) miner_agent = MinerAgent(token_agent=token_agent) miner = Miner(miner_agent=miner_agent, checksum_address=sim_address) # stake a random amount min_stake, balance = MIN_ALLOWED_LOCKED, miner.token_balance value = random.randint(min_stake, balance) # for a random lock duration min_locktime, max_locktime = MIN_LOCKED_PERIODS, MAX_MINTING_PERIODS periods = random.randint(min_locktime, max_locktime) miner.initialize_stake(amount=value, lock_periods=periods) click.echo("{} Initialized new stake: {} tokens for {} periods".format(sim_address, value, periods)) proc_params.extend('--checksum-address {}'.format(sim_address).split()) # Spawn click.echo("Spawning node #{}".format(counter+1)) processProtocol = UrsulaProcessProtocol(command=proc_params) cli_exec = os.path.join(BASE_DIR, 'cli', 'main.py') ursula_proc = reactor.spawnProcess(processProtocol, cli_exec, proc_params) # # post-spawnProcess # # Start with some basic status data, then build on it rest_uri = "http://{}:{}".format('localhost', rest_port) sim_data = "Started simulated Ursula | ReST {}".format(rest_uri) rest_uri = "{host}:{port}".format(host='localhost', port=str(sim_port_number)) sim_data.format(rest_uri) # if not federated_only: # stake_infos = tuple(config.miner_agent.get_all_stakes(miner_address=sim_address)) # sim_data += '| ETH address {}'.format(sim_address) # sim_data += '| {} Active stakes '.format(len(stake_infos)) click.echo(sim_data) counter += 1 click.echo("Starting the reactor") click.confirm("Start the reactor?", abort=True) try: reactor.run() finally: if not federated_only: click.echo("Removing simulation registry") os.remove(DEFAULT_SIMULATION_REGISTRY_FILEPATH) click.echo("Stopping simulated Ursula processes") for process in config.sim_processes: os.kill(process.pid, 9) click.echo("Killed {}".format(process)) click.echo("Simulation completed") elif action == 'stop': # Kill the simulated ursulas for process in config.ursula_processes: process.transport.signalProcess('KILL') elif action == 'status': if not config.simulation_running: status_message = "Simulation not running." else: ursula_processes = len(config.ursula_processes) status_message = """ | Node Swarm Simulation Status | Simulation processes .............. {} """.format(ursula_processes) click.echo(status_message) elif action == 'demo': """Run the finnegans wake demo""" demo_exec = os.path.join(BASE_DIR, 'cli', 'demos', 'finnegans-wake-demo.py') process_args = [sys.executable, demo_exec] if federated_only: process_args.append('--federated-only') subprocess.run(process_args, stdout=subprocess.PIPE)
def signal_sender(self, updater): sleep(0.2) while not updater.running: sleep(0.2) os.kill(os.getpid(), signal.SIGTERM)
def kill_grunt_process(pid): self.stdout.write('>>> Closing grunt process') os.kill(pid, signal.SIGTERM)
def kill_blender(): """ Kill Blender process.""" os.kill(os.getpid(), signal.SIGUSR1)
def KillProcess(self, request, context): os.kill(request.pid, signal.SIGKILL) reply = mmsg.Empty() return reply
def __exit__(self, *args, **kwargs): """On exiting a context, kill the command and wait for it.""" os.kill(self._pid, SIGINT) os.waitpid(self._pid, 0) # Propagate exceptions. return False
def kill_actor(actor): """A helper function that kills an actor process.""" pid = ray.get(actor.get_pid.remote()) os.kill(pid, SIGKILL) wait_for_pid_to_exit(pid)
import os import sys # Read the pid of the motor.py that was lauched at start and killed # everytime you launch dcmotor.py and it lauch again with new values # pid del programa motor.py que controla el funcionamiento del motor dc, # se finaliza el programa y se vuelve a lanzar con los nuevos parametros directorio = '/tmp/miriadax/' archivo = directorio + 'motor.pid' file = os.open(archivo, os.O_RDONLY) matar = os.read(file, 30) os.close(file) # Folder of the program // directorio del programa basedir = os.path.dirname(os.path.abspath(__file__)) matar = int(matar) # Kills the script // finaliza el programa os.kill(matar, 9) # Launch again the scrip with the new values // lanza de nuevo el programa con los nuevos parametros # first parameter = modo (0->stop, 1->forward, 2->backward) # second parameter = velocidad(between 0->min and 1->max) uno = sys.argv[1] if uno != '0': dos = sys.argv[2] comando = basedir + '/inicio/motor.py ' + uno + ' ' + dos + ' &' else: comando = basedir + '/inicio/motor.py 0 &' os.system(comando)
pgrep_query = "%s.*--shard=%s" % (pgrep_query, options.shard) elif options.user is not None: # Personals mirror on behalf of another user. pgrep_query = "%s.*--user=%s" % (pgrep_query, options.user) proc = subprocess.Popen(['pgrep', '-U', os.environ["USER"], "-f", pgrep_query], stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, _err_unused = proc.communicate() for pid in map(int, out.split()): if pid == os.getpid() or pid == os.getppid(): continue # Another copy of zephyr_mirror.py! Kill it. logger.info("Killing duplicate zephyr_mirror process %s" % (pid,)) try: os.kill(pid, signal.SIGINT) except OSError: # We don't care if the target process no longer exists, so just log the error logger.exception("") if options.shard is not None and set(options.shard) != set("a"): # The shard that is all "a"s is the one that handles personals # forwarding and zulip => zephyr forwarding options.forward_personals = False options.forward_from_zulip = False if options.forward_mail_zephyrs is None: options.forward_mail_zephyrs = subscribed_to_mail_messages() if options.session_path is None: options.session_path = "/var/tmp/%s" % (options.user,)
def main(self, argv): parser = argparse.ArgumentParser() parser.add_argument( "-i", "--immediate", action="store_true", help= """Send an occurrence for a non-fatal exception. The exception is sent immediately. This is the default action.""") parser.add_argument( "-c", "--crash", action="store_true", dest="hard", help= """Send an occurrence for a fatal exception. The exception is written to disk and sent the next time squash_tester is invoked. This overrides -i.""" ) parser.add_argument( "-s", "--signal", action="store_true", help= """Send an occurrence for an unhandled signal. The signal is handled as with -c. This overrides -c and -i.""") parser.add_argument("-S", "--send", action="store_true", dest="send_only", help="""Only send previously recorded exceptions. """) parser.add_argument( "-r", "--revision", action="store", dest='rev', help= """Specify the Git revision to send with the exception. If not specified, uses `git rev-parse HEAD` to get the revision from the current directory.""" ) parser.add_argument( "-A", "--apikey", help= """Specify the API key on the command line instead of through an environment variable.""" ) parser.add_argument( "-e", "--env", help= """Specify the name of the deployment environment to associate with this occurrence. By default, uses "development".""") parser.epilog = "Before taking any other action, squash_tester reports all previously recorded errors." args = parser.parse_args(argv[1:]) client = squash_python.get_client() client.APIKey = os.getenv('SQUASH_TESTER_API_KEY', args.apikey) if not client.APIKey: print( "Error: Environment variable SQUASH_TESTER_API_KEY must be set." ) parser.print_help() return -1 client.environment = args.env or "development" client.host = os.getenv('SQUASH_TESTER_HOST', "http://localhost:3000") try: client.revision = args.rev or subprocess.check_output( "git rev-parse HEAD".split()).strip().decode('ascii') except: print( "Unable to determine source revision. Specify a revision with -r or chdir to a folder" "containing a git repository. ") raise SystemExit print("Reporting errors to %s if needed, env=%s, apikey=%s..." % (client.host, client.environment, client.APIKey[:8])) client.reportErrors() client.hook() if args.signal: print("Raising SIGABRT...") os.kill(os.getpid(), signal.SIGABRT) print("Signal recorded. Run squash_tester again to send it.") elif args.hard: print("Raising exception...") raise_it() print("Exception recorded. Run squash_tester again to send it.") elif not args.send_only: print("Catching exception...", end="") try: raise_it() except Exception: client.excepthook(*sys.exc_info()) print("Caught. Sending unsent errors...") client.reportErrors() print("Sent.")
def run(self): super().run() time.sleep(0.5) os.kill(os.getpid(), signal.SIGTERM)
def Stop(self): os.kill(int(self._process.pid), 9)
def tearDown(self): # os.kill() rather than mdata_proc.terminate() to avoid console spam. os.kill(self.mdata_proc.pid, signal.SIGKILL) self.mdata_proc.join() super(TestSerialConcurrency, self).tearDown()