def test_getsignal(self): hup = signal.signal(signal.SIGHUP, self.trivial_signal_handler) self.assertIsInstance(hup, signal.Handlers) self.assertEqual(signal.getsignal(signal.SIGHUP), self.trivial_signal_handler) signal.signal(signal.SIGHUP, hup) self.assertEqual(signal.getsignal(signal.SIGHUP), hup)
def testRemoveHandler(self): default_handler = signal.getsignal(signal.SIGINT) unittest.installHandler() unittest.removeHandler() self.assertEqual(signal.getsignal(signal.SIGINT), default_handler) unittest.removeHandler() self.assertEqual(signal.getsignal(signal.SIGINT), default_handler)
def main(): '''Runs thumbor server with the specified arguments.''' global server global ip global port global conf parser = optparse.OptionParser(usage="thumbor or type thumbor -h (--help) for help", description=__doc__, version=__version__) parser.add_option("-p", "--port", type="int", dest="port", default=8888, help = "The port to run this thumbor instance at [default: %default]." ) parser.add_option("-i", "--ip", dest="ip", default="0.0.0.0", help = "The host address to run this thumbor instance at [default: %default]." ) parser.add_option("-c", "--conf", dest="conf", default="", help = "The path of the configuration file to use for this thumbor instance [default: %default]." ) parser.add_option("-l", "--log-level", dest="log_level", default="warning", help = "The log level to be used. Possible values are: debug, info, warning, error, critical or notset. [default: %default]." ) parser.add_option("-a", "--app", dest="app", default=None, help = "A custom app to use for this thumbor server in case you subclassed ThumborServiceApp [default: %default]." ) (options, args) = parser.parse_args() if not signal.getsignal(signal.SIGHUP): signal.signal(signal.SIGHUP, handle_sighup) if not signal.getsignal(signal.SIGTERM): signal.signal(signal.SIGTERM, handle_sigterm) port = options.port ip = options.ip conf = options.conf or None log_level = options.log_level run_app(ip, port, conf, log_level, options.app)
def testTwoResults(self): unittest2.installHandler() result = unittest2.TestResult() unittest2.registerResult(result) new_handler = signal.getsignal(signal.SIGINT) result2 = unittest2.TestResult() unittest2.registerResult(result2) self.assertEqual(signal.getsignal(signal.SIGINT), new_handler) result3 = unittest2.TestResult() def test(result): pid = os.getpid() os.kill(pid, signal.SIGINT) try: test(result) except KeyboardInterrupt: self.fail("KeyboardInterrupt not handled") self.assertTrue(result.shouldStop) self.assertTrue(result2.shouldStop) self.assertFalse(result3.shouldStop)
def test_sigchld(): r = reaper.Reaper() old_sigchld = signal.getsignal(signal.SIGCHLD) r.hookup_sigchld() assert r.old_sigchld is old_sigchld assert signal.getsignal(signal.SIGCHLD) == reaper.theReaper.handle_sigchld del r waiting = [DelayedExitFork(i, (i % 32) / 32) for i in range(256)] while waiting: signal.pause() i = 0 while i < len(waiting): if waiting[i].obit is not None: f = waiting.pop(i) assert f.obit.pid == f.pid assert f.obit.exitstatus == f.expectedstatus assert f.obit.termsig is None assert f.obit.stopsig is None assert f.obit.coredump is False else: i += 1 reaper.theReaper.unhookup_sigchld() assert signal.getsignal(signal.SIGCHLD) is old_sigchld reaper.theReaper = None
def _create_daemon_options(self, options): daemon = {'detach_process': not options.no_daemon} daemon.update({ 'stdin': sys.stdin, 'stdout': sys.stdout, 'stderr': sys.stderr}) if options.no_daemon: # If we are not a daemon we must maintain the basic signal handlers daemon.update({'signal_map': { signal.SIGTTIN: signal.getsignal(signal.SIGTTIN), signal.SIGTTOU: signal.getsignal(signal.SIGTTOU), signal.SIGTSTP: signal.getsignal(signal.SIGTSTP), signal.SIGTERM: signal.getsignal(signal.SIGTERM) }}) else: daemon.update({'signal_map': {signal.SIGTERM: signal.getsignal(signal.SIGTERM)} }) daemon.update({'uid': os.getuid(), 'gid': os.getgid()}) if options.app_path: full_path = options.app_path stat = os.stat(full_path) daemon.update({'uid': stat.st_uid, 'gid': stat.st_gid}) if options.chroot: full_path = options.app_path stat = os.stat(full_path) daemon.update({'chroot_directory': full_path, 'uid': stat.st_uid, 'gid': stat.st_gid}) return daemon
def clone_and_wait(callback, flags): pid = lowlevel.clone(flags) assert pid is not None if pid: def handle_signal(signum, frame): patient_terminate(pid, wait_flags = const.WALL) old_term = signal.getsignal(signal.SIGTERM) old_int = signal.getsignal(signal.SIGINT) signal.signal(signal.SIGTERM, handle_signal) signal.signal(signal.SIGINT, handle_signal) wait_for_pid(pid, flags = const.WALL) signal.signal(signal.SIGTERM, old_term) signal.signal(signal.SIGINT, old_int) else: status = 1 try: if flags ^ const.CLONE_NEWPID: assert lowlevel.getpid() == 1 status = callback() status = int(status) if status else 0 except: sys.excepthook(*sys.exc_info()) finally: os._exit(status)
def test_sighandler_multiple(self): """Sighandler installs SIGFPE and SIGIO.""" if os.name != 'nt': self.assertEqual(signal.getsignal(signal.SIGFPE).__name__, signals_handler.__name__) self.assertEqual(signal.getsignal(signal.SIGIO).__name__, signals_handler.__name__)
def test_termination_handler(self): """ Events.terminate() exception test """ EventHandler.terminate() self.assertIs('kill_process', signal.getsignal(signal.SIGTSTP).__name__) self.assertTrue(None is not signal.getsignal(signal.SIGTERM))
def test_signal_signal(): WORKING_CASES = SUPPORTED_SIGNALS + [6] WEIRD_CASES = { 6: None, 2: signal.default_int_handler} for x in WORKING_CASES: #Ideal handler signature def a(signum, frame): return x ret_val = signal.signal(x, a) if x not in WEIRD_CASES.keys(): AreEqual(ret_val, signal.SIG_DFL) else: AreEqual(ret_val, WEIRD_CASES[x]) AreEqual(a, signal.getsignal(x)) #Strange handler signatures class KNew(object): def __call__(self, *args, **kwargs): pass a = KNew() ret_val = signal.signal(signal.SIGBREAK, a) AreEqual(a, signal.getsignal(signal.SIGBREAK))
def __enter__(self): if self._can_register_signal: self._orig_sigint = signal.getsignal(signal.SIGINT) self._orig_sigterm = signal.getsignal(signal.SIGTERM) for signum in signal.SIGINT, signal.SIGTERM: self._register_signal_handler(signum) return self
def test_getsignal(self): """ Test that signal.getsignal returns the currently installed handler. """ from signal import getsignal, signal, SIGINT, SIG_DFL, SIG_IGN def handler(*a): pass try: assert getsignal(SIGINT) == SIG_DFL signal(SIGINT, SIG_DFL) assert getsignal(SIGINT) == SIG_DFL signal(SIGINT, SIG_IGN) assert getsignal(SIGINT) == SIG_IGN signal(SIGINT, handler) assert getsignal(SIGINT) is handler finally: signal(SIGINT, SIG_DFL) raises(ValueError, getsignal, 4444) raises(ValueError, signal, 4444, lambda *args: None) import sys if sys.platform == 'win32': raises(ValueError, signal, 42, lambda *args: None) else: signal(42, lambda *args: None) signal(42, SIG_DFL)
def testTwoResults(self): if due_to_ironpython_bug("http://ironpython.codeplex.com/workitem/28171"): return unittest.installHandler() result = unittest.TestResult() unittest.registerResult(result) new_handler = signal.getsignal(signal.SIGINT) result2 = unittest.TestResult() unittest.registerResult(result2) self.assertEqual(signal.getsignal(signal.SIGINT), new_handler) result3 = unittest.TestResult() def test(result): pid = os.getpid() os.kill(pid, signal.SIGINT) try: test(result) except KeyboardInterrupt: self.fail("KeyboardInterrupt not handled") if not due_to_ironpython_bug("http://ironpython.codeplex.com/workitem/28171"): self.assertTrue(result.shouldStop) self.assertTrue(result2.shouldStop) self.assertFalse(result3.shouldStop)
def addBlackhole(srcip): """ Add an IP to blackhole.conf and tell BIND to reread its config file. """ if opt.test: return try: # Go to the end of the file and write the IP, and close out the blackhole block f = open(opt.blackhole, 'r+') f.seek(-3, 2) f.writelines([srcip, ';\n', '};\n']) f.close() os.chown(opt.blackhole, uid, gid) except: log("Can't add entry to blackhole file: %s" % opt.blackhole) return global gtimer if gtimer and int(time.time()) - int(gtimer) > opt.throttle: rndcReconfig() gtimer = time.time() signal.alarm(0) signal.signal(signal.SIGALRM, signal.SIG_IGN) else: if signal.getsignal(signal.SIGALRM) == 0 or signal.getsignal(signal.SIGALRM) == 1: signal.signal(signal.SIGALRM, alarmHandler) signal.alarm(opt.throttle) return
def setUp(self): self.mox = mox.Mox() self.mox.StubOutWithMock(os, 'abort') shutdown._shutting_down = False shutdown._num_terminate_requests = 0 self._sigint_handler = signal.getsignal(signal.SIGINT) self._sigterm_handler = signal.getsignal(signal.SIGTERM)
def test_original_signal_handlers_are_restored(self): my_sigterm = lambda signum, frame: None signal.signal(signal.SIGTERM, my_sigterm) run(join(ROOT, 'atest', 'testdata', 'misc', 'pass_and_fail.txt'), stdout=StringIO(), output=None, log=None, report=None) assert_equals(signal.getsignal(signal.SIGINT), self.orig_sigint) assert_equals(signal.getsignal(signal.SIGTERM), my_sigterm)
def test_restores_handler(self): handler_1 = lambda signum, frame: None handler_2 = lambda s, f: True with dbg._let_signal(signal.SIGALRM, handler_1): with dbg._let_signal(signal.SIGALRM, handler_2): self.assertEqual(signal.getsignal(signal.SIGALRM), handler_2) self.assertEqual(signal.getsignal(signal.SIGALRM), handler_1)
def setSignalHandlers(): if getsignal(SIGINT) == SIG_IGN: print "SIGINT was ignored, restoring to defaulthandler" signal(SIGINT, SIG_DFL) if getsignal(SIGTERM) == SIG_IGN: print "SIGTERM was ignored, restoring to defaulthandler" signal(SIGTERM, SIG_DFL)
def test_replace_handler_and_restore_nested(self): with register_sigint_fallback(lambda: None): new_handler = signal.getsignal(signal.SIGINT) self.assertNotEqual(new_handler, signal.default_int_handler) with register_sigint_fallback(lambda: None): self.assertTrue(signal.getsignal(signal.SIGINT) is new_handler) self.assertEqual( signal.getsignal(signal.SIGINT), signal.default_int_handler)
def test_original_signal_handlers_are_restored(self): my_sigterm = lambda signum, frame: None signal.signal(signal.SIGTERM, my_sigterm) suite = TestSuite(name='My Suite') suite.tests.create(name='My Test').keywords.create('Log', args=['Hi!']) run(suite) assert_equals(signal.getsignal(signal.SIGINT), self.orig_sigint) assert_equals(signal.getsignal(signal.SIGTERM), my_sigterm)
def __init__(self): _LOGGER.info("Setting up the signal catcher") self.restart_lock = mp.Lock() self.terminating = False self.stop_hooks = [] self.org_term = signal.getsignal(signal.SIGTERM) self.org_int = signal.getsignal(signal.SIGINT) self.org_usr1 = signal.getsignal(signal.SIGUSR1)
def test_timeout(self): tests = ['timeout.TimeoutTest.testTimeoutPass'] msg = 'Failed to run test using OETestTimeout' alarm_signal = signal.getsignal(signal.SIGALRM) tc = self._testLoader(modules=self.modules, tests=tests) self.assertTrue(tc.runTests().wasSuccessful(), msg=msg) msg = "OETestTimeout didn't restore SIGALRM" self.assertIs(alarm_signal, signal.getsignal(signal.SIGALRM), msg=msg)
def test_000_run_me_first(self): for x in [x for x in SUPPORTED_SIGNALS if x!=signal.SIGINT]: self.assertEqual(signal.getsignal(x), 0) self.assertEqual(signal.getsignal(signal.SIGINT), signal.default_int_handler) for x in xrange(1, 23): if x in SUPPORTED_SIGNALS: continue self.assertEqual(signal.getsignal(x), None)
def test_timeout_fail(self): tests = ['timeout.TimeoutTest.testTimeoutFail'] msg = "OETestTimeout test didn't timeout as expected" alarm_signal = signal.getsignal(signal.SIGALRM) tc = self._testLoader(modules=self.modules, tests=tests) self.assertFalse(tc.runTests().wasSuccessful(), msg=msg) msg = "OETestTimeout didn't restore SIGALRM" self.assertIs(alarm_signal, signal.getsignal(signal.SIGALRM), msg=msg)
def __init__(self): self.original_sigint = signal.getsignal(signal.SIGINT) self.original_sigterm = signal.getsignal(signal.SIGTERM) signal.signal(signal.SIGINT, self.exit_gracefully) signal.signal(signal.SIGTERM, self.exit_gracefully) if WINDOWS: self.original_sigbreak = signal.getsignal(signal.SIGBREAK) signal.signal(signal.SIGBREAK, self.exit_gracefully)
def run_me_first(): for x in [x for x in SUPPORTED_SIGNALS if x!=signal.SIGINT]: AreEqual(signal.getsignal(x), 0) AreEqual(signal.getsignal(signal.SIGINT), signal.default_int_handler) for x in xrange(1, 23): if x in SUPPORTED_SIGNALS: continue AreEqual(signal.getsignal(x), None)
def testRemoveHandler(self): default_handler = signal.getsignal(signal.SIGINT) unittest2.installHandler() unittest2.removeHandler() self.assertEqual(signal.getsignal(signal.SIGINT), default_handler) # check that calling removeHandler multiple times has no ill-effect unittest2.removeHandler() self.assertEqual(signal.getsignal(signal.SIGINT), default_handler)
def stop(self, finish_timeout=None): """Stop workers and return any unprocessed input values""" if not self.workers: return # ignore SIGINT and SIGTERM for now (restore later) sigint_handler = signal.getsignal(signal.SIGINT) sigterm_handler = signal.getsignal(signal.SIGTERM) signal.signal(signal.SIGINT, signal.SIG_IGN) signal.signal(signal.SIGTERM, signal.SIG_IGN) for worker in self.workers: worker.stop() aborted = [] inputs_vacuum = WaitableQueue.Vacuum(self.q_input, aborted) started = time.time() def any_alive(): for worker in self.workers: if worker.is_alive(): return True return False try: while True: if not any_alive(): break if finish_timeout is not None and (time.time() - started) > finish_timeout: break time.sleep(0.1) for worker in self.workers: if worker.is_alive(): worker.terminate() for worker in self.workers: if worker.is_alive(): worker.join() self.workers = [] finally: time.sleep(0.1) inputs_vacuum.stop() self._results_vacuum.stop() signal.signal(signal.SIGINT, sigint_handler) signal.signal(signal.SIGTERM, sigterm_handler) return aborted
def _install_signal_handlers(client): # Install the SIGINT/SIGTERM handlers if not done so far if signal.getsignal(signal.SIGINT) is signal.SIG_DFL: # No custom signal handling was added, install our own signal.signal(signal.SIGINT, functools.partial(_handle_signals, client)) if signal.getsignal(signal.SIGTERM) is signal.SIG_DFL: # No custom signal handling was added, install our own signal.signal(signal.SIGINT, functools.partial(_handle_signals, client))
def poll( self, timeout = 0): ''' Waits for all the job to be finished and returns True. If timeout is specified and the time runs out, immediately returns False. ''' abortionEvent = threading.Event() mutex = threading.Lock() emitted_signum = [] def signal_handler( signum, ctx ): mutex.acquire() emitted_signum.append( signum ) try: abortionEvent.set() finally: mutex.release() def poller(): while not self.isFinished() and not abortionEvent.isSet(): # updates status of all AbstractSessionProxy mutex.acquire() try: self.update() finally: mutex.release() # sleeps for update interval abortionEvent.wait( self.__theUpdateInterval ) pass prev_handler = {} t = None try: for signum in ( signal.SIGINT, signal.SIGTERM ): prev_handler[ signum ] = signal.signal( signum, signal_handler ) t = threading.Thread( target = poller ) t.start() if timeout: try: t.join( timeout ) except: pass if t.isAlive(): abortionEvent.set() return False else: t.join() return True finally: if t: del t del signal_handler del poller for signum, hdlr in prev_handler.iteritems(): signal.signal( signum, hdlr ) del prev_handler if emitted_signum: signal.getsignal( emitted_signum[ 0 ] )( emitted_signum, inspect.currentframe() )
def test(): self.assertEqual(signal.getsignal(signal.SIGINT), default_handler)
selectionInProgress = False results["buildState"] = buildServices() return results["buildState"] if key.name == 'KEY_ESCAPE': results["buildState"] = False return results["buildState"] elif key: if key == ' ': # Space pressed checkMenuItem(selection) # Update checked list setCheckedMenuItems() # Update UI memory checkForIssues() needsRender = 1 elif key == 'h': # H pressed if hideHelpText: hideHelpText = False else: hideHelpText = True needsRender = 1 else: print(key) time.sleep(0.5) selection = selection % len(menu) mainRender(menu, selection, needsRender) originalSignalHandler = signal.getsignal(signal.SIGINT) main() signal.signal(signal.SIGWINCH, originalSignalHandler)
def __enter__(self): self.__old_signal = signal.getsignal(signal.SIGTERM) signal.signal(signal.SIGTERM, self.kill_job) return self
def salt_minion(): ''' Start the salt minion in a subprocess. Auto restart minion on error. ''' import signal import salt.utils.platform import salt.utils.process salt.utils.process.notify_systemd() import salt.cli.daemons import multiprocessing if u'' in sys.path: sys.path.remove(u'') if salt.utils.platform.is_windows(): minion = salt.cli.daemons.Minion() minion.start() return if u'--disable-keepalive' in sys.argv: sys.argv.remove(u'--disable-keepalive') minion = salt.cli.daemons.Minion() minion.start() return def escalate_signal_to_process(pid, signum, sigframe): # pylint: disable=unused-argument ''' Escalate the signal received to the multiprocessing process that is actually running the minion ''' # escalate signal os.kill(pid, signum) # keep one minion subprocess running prev_sigint_handler = signal.getsignal(signal.SIGINT) prev_sigterm_handler = signal.getsignal(signal.SIGTERM) while True: try: process = multiprocessing.Process(target=minion_process) process.start() signal.signal( signal.SIGTERM, functools.partial(escalate_signal_to_process, process.pid)) signal.signal( signal.SIGINT, functools.partial(escalate_signal_to_process, process.pid)) signal.signal( signal.SIGHUP, functools.partial(escalate_signal_to_process, process.pid)) except Exception: # pylint: disable=broad-except # if multiprocessing does not work minion = salt.cli.daemons.Minion() minion.start() break process.join() # Process exited or was terminated. Since we're going to try to restart # it, we MUST, reset signal handling to the previous handlers signal.signal(signal.SIGINT, prev_sigint_handler) signal.signal(signal.SIGTERM, prev_sigterm_handler) if not process.exitcode == salt.defaults.exitcodes.SALT_KEEPALIVE: sys.exit(process.exitcode) # ontop of the random_reauth_delay already preformed # delay extra to reduce flooding and free resources # NOTE: values are static but should be fine. time.sleep(2 + randint(1, 10)) # need to reset logging because new minion objects # cause extra log handlers to accumulate rlogger = logging.getLogger() for handler in rlogger.handlers: rlogger.removeHandler(handler) logging.basicConfig()
def __init__( self, web_port=26000, window_height=40, window_width=130, auto_scroll=True, max_log_lines=500, wait_on_quit=True, min_refresh_rate=1000, bytes_to_str=helpers.hex_to_hexstr, ): """ :type web_port: int :param web_port: Webinterface port. Default 26000 :type window_height: int :param window_height: Default console height, set to on startup. Default 40 :type window_width: int :param window_width: Default console width, set to on startup. Default 130 :type auto_scroll: bool :param auto_scroll: Whether to auto-scroll the cases and crashed windows to always display the last line if there are too many lines to display all of them. Default True :type max_log_lines: int :param max_log_lines: Maximum log lines to keep in the internal storage. Additional lines exceeding this limit will not be displayed. Default 500 :type wait_on_quit: bool :param wait_on_quit: Whether to keep the GUI open and wait for user-input when the main thread is about to exit. Default True :type min_refresh_rate: int :param min_refresh_rate: The delay between two checks for a resize of the terminal in milliseconds. Increment 100 ms. Default 1000 ms :type bytes_to_str: function :param bytes_to_str: Function that converts sent/received bytes data to string for logging. """ self._title = "boofuzz" self._web_port = web_port self._max_log_lines = max_log_lines self._auto_scroll = auto_scroll self._current_data = None self._log_storage = [] self._fail_storage = [] self._wait_on_quit = wait_on_quit self._quit = False self._status = STATUS_RUNNING self._refresh_interval = min_refresh_rate self._event_resize = True self._event_log = False self._event_case_close = False self._event_crash = False self._total_index = 0 self._total_num_mutations = 0 self._current_name = "" self._current_index = 0 self._current_num_mutations = 0 self._format_raw_bytes = bytes_to_str self._version = helpers.get_boofuzz_version(helpers) # Resize console to minimum size self._width, self._height = get_terminal_size() if self._height < window_height or self._width < window_width: print("\x1b[8;{};{}t".format(window_height, window_width)) self._height, self._width = window_height, window_width self._height_old = 0 self._width_old = 0 self._min_size_ok = True sys.stdout = sys.stderr = self._std_buffer = StringIO() atexit.register(self._cleanup) self._stdscr = curses.initscr() curses.start_color() curses.use_default_colors() curses.noecho() curses.curs_set(0) self._stdscr.nodelay(True) # Curses color pairs curses.init_pair(COLOR_PAIR_WHITE, curses.COLOR_WHITE, -1) curses.init_pair(COLOR_PAIR_CYAN, curses.COLOR_CYAN, -1) curses.init_pair(COLOR_PAIR_RED, curses.COLOR_RED, -1) curses.init_pair(COLOR_PAIR_YELLOW, curses.COLOR_YELLOW, -1) curses.init_pair(COLOR_PAIR_GREEN, curses.COLOR_GREEN, -1) curses.init_pair(COLOR_PAIR_MAGENTA, curses.COLOR_MAGENTA, -1) curses.init_pair(COLOR_PAIR_BLACK, curses.COLOR_BLACK, curses.COLOR_WHITE) # Start thread and restore the original SIGWINCH handler self._draw_thread = threading.Thread(name="curses_logger", target=self._draw_screen) self._draw_thread.setDaemon(True) current_signal_handler = signal.getsignal(signal.SIGWINCH) self._draw_thread.start() signal.signal(signal.SIGWINCH, current_signal_handler)
def download_drive_file(url: str, target_path: str, chunk_size=32768) -> None: """Download a file via a public Google Drive url. Example usage: ``` download_file_from_google_drive( "https://drive.google.com/file/d/1AsY9Cs3xE0RSlr0FKlnSKHp6zIwFSvXe/view", "/home/brent/Downloads/test.pdf" ) ``` Args: url (str): Google Drive url. target_path (str): Destination to write to. """ # Create directory if it doesn't exist yet directory = os.path.dirname(target_path) if not os.path.isdir(directory): os.makedirs(directory) print("[fannypack-drive] Created directory:", directory) # Parse URL drive_id = _drive_id_from_url(url) download_url = "https://docs.google.com/uc?export=download" # Download file session = requests.Session() response = session.get( download_url, params={"id": drive_id}, stream=True, headers={"Accept-Encoding": None}, ) token = None for key, value in response.cookies.items(): if key.startswith("download_warning"): # pragma: no cover params = {"id": drive_id, "confirm": value} response = session.get(download_url, params=params, stream=True) break # Delete partially downloaded files if we hit interrupt (Ctrl+C) before download # finishes try: orig_handler = signal.getsignal(signal.SIGINT) def sigint_handler(sig, frame): # pragma: no cover print("[fannypack-drive] Deleting file:", target_path) os.remove(target_path) orig_handler(sig, frame) # Restore SIGINT handler if orig_handler is not None: signal.signal(signal.SIGINT, orig_handler) signal.signal(signal.SIGINT, sigint_handler) except ValueError as e: # pragma: no cover # signal throws a ValueError if we're not in the main thread print("[fannypack-drive] Error while attaching SIGINT handler:", e) orig_handler = None # Download file progress_bar = tqdm(unit="iB", unit_scale=True) with open(target_path, "wb") as f: for chunk in response.iter_content(chunk_size): # Filter out keep-alive new chunks if chunk: progress_bar.update(len(chunk)) f.write(chunk) # Restore SIGINT handler if orig_handler is not None: signal.signal(signal.SIGINT, orig_handler)
def _rebindSignal(self, s, new): """Bind new signal handler while storing old one in _prev_signals""" self.__prev_signals[s] = signal.getsignal(s) signal.signal(s, new)
print "Releasing socket" signal.signal(signal.SIGINT, original_sigint) if (conn != 0): conn.close() sys.exit(1) # restore the exit gracefully handler here signal.signal(signal.SIGINT, exit_gracefully) if __name__ == "__main__": database = get_database(database_file) # store the original SIGINT handler for ctrl-c handling original_sigint = signal.getsignal(signal.SIGINT) signal.signal(signal.SIGINT, exit_gracefully) # Define parameters for tcp connection TCP_IP = '192.168.1.117' TCP_PORT = 60001 BUFFER_SIZE = 1405 conn = socket._socketobject( ) # Instantiate dummy conn variable in case ctrl-c is pressed before actual connection is established # Open tcp connection s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # Allow for the socket to be reused s.bind((TCP_IP, TCP_PORT)) s.listen(1)
def isDefaultHandler(): """ Determine whether the I{SIGCHLD} handler is the default or not. """ return signal.getsignal(signal.SIGCHLD) == signal.SIG_DFL
def signal_check(self, screen): """Dummy callback for screen wrapper.""" self.assertEqual(signal.getsignal(signal.SIGWINCH), screen._resize_handler)
def installHandler(): global _interrupt_handler if _interrupt_handler is None: default_handler = signal.getsignal(signal.SIGINT) _interrupt_handler = _InterruptHandler(default_handler) signal.signal(signal.SIGINT, _interrupt_handler)
def run_get_node( *args, **kwargs) -> Tuple[Optional[Dict[str, Any]], 'ProcessNode']: """ Run the FunctionProcess with the supplied inputs in a local runner. :param args: input arguments to construct the FunctionProcess :param kwargs: input keyword arguments to construct the FunctionProcess :return: tuple of the outputs of the process and the process node """ manager = get_manager() runner = manager.get_runner() inputs = process_class.create_inputs(*args, **kwargs) # Remove all the known inputs from the kwargs for port in process_class.spec().inputs: kwargs.pop(port, None) # If any kwargs remain, the spec should be dynamic, so we raise if it isn't if kwargs and not process_class.spec().inputs.dynamic: raise ValueError( f'{function.__name__} does not support these kwargs: {kwargs.keys()}' ) process = process_class(inputs=inputs, runner=runner) # Only add handlers for interrupt signal to kill the process if we are in a local and not a daemon runner. # Without this check, running process functions in a daemon worker would be killed if the daemon is shutdown current_runner = manager.get_runner() original_handler = None kill_signal = signal.SIGINT if not current_runner.is_daemon_runner: def kill_process(_num, _frame): """Send the kill signal to the process in the current scope.""" LOGGER.critical( 'runner received interrupt, killing process %s', process.pid) result = process.kill( msg= 'Process was killed because the runner received an interrupt' ) return result # Store the current handler on the signal such that it can be restored after process has terminated original_handler = signal.getsignal(kill_signal) signal.signal(kill_signal, kill_process) try: result = process.execute() finally: # If the `original_handler` is set, that means the `kill_process` was bound, which needs to be reset if original_handler: signal.signal(signal.SIGINT, original_handler) store_provenance = inputs.get('metadata', {}).get('store_provenance', True) if not store_provenance: process.node._storable = False # pylint: disable=protected-access process.node._unstorable_message = 'cannot store node because it was run with `store_provenance=False`' # pylint: disable=protected-access return result, process.node
def _execute_helper( self, query, timeout=0, statement_params=None, binding_params=None, is_internal=False, _no_results=False, _is_put_get=None): del self.messages[:] if statement_params is not None and not isinstance( statement_params, dict): Error.errorhandler_wrapper( self.connection, self, ProgrammingError, { 'msg': "The data type of statement params is invalid. " "It must be dict.", 'errno': ER_INVALID_VALUE, }) # check if current installation include arrow extension or not, # if not, we set statement level query result format to be JSON if not CAN_USE_ARROW_RESULT: logger.debug("Cannot use arrow result format, fallback to json format") if statement_params is None: statement_params = {PARAMETER_PYTHON_CONNECTOR_QUERY_RESULT_FORMAT: 'JSON'} else: result_format_val = statement_params.get(PARAMETER_PYTHON_CONNECTOR_QUERY_RESULT_FORMAT) if str(result_format_val).upper() == 'ARROW': self.check_can_use_arrow_resultset() elif result_format_val is None: statement_params[PARAMETER_PYTHON_CONNECTOR_QUERY_RESULT_FORMAT] = 'JSON' self._sequence_counter = self._connection._next_sequence_counter() self._request_id = uuid.uuid4() if logger.getEffectiveLevel() <= logging.DEBUG: logger.debug( 'running query [%s]', self._format_query_for_log(query)) if _is_put_get is not None: # if told the query is PUT or GET, use the information self._is_file_transfer = _is_put_get else: # or detect it. self._is_file_transfer = self.PUT_SQL_RE.match( query) or self.GET_SQL_RE.match(query) logger.debug('is_file_transfer: %s', self._is_file_transfer is not None) real_timeout = timeout if timeout and timeout > 0 \ else self._connection.network_timeout if real_timeout is not None: self._timebomb = Timer( real_timeout, self.__cancel_query, [query]) self._timebomb.start() logger.debug('started timebomb in %ss', real_timeout) else: self._timebomb = None original_sigint = signal.getsignal(signal.SIGINT) def interrupt_handler(*_): try: signal.signal(signal.SIGINT, exit_handler) except (ValueError, TypeError): # ignore failures pass try: if self._timebomb is not None: self._timebomb.cancel() logger.debug('cancelled timebomb in finally') self._timebomb = None self.__cancel_query(query) finally: if original_sigint: try: signal.signal(signal.SIGINT, original_sigint) except (ValueError, TypeError): # ignore failures pass raise KeyboardInterrupt try: if not original_sigint == exit_handler: signal.signal(signal.SIGINT, interrupt_handler) except ValueError: logger.debug( 'Failed to set SIGINT handler. ' 'Not in main thread. Ignored...') ret = {'data': {}} try: ret = self._connection.cmd_query( query, self._sequence_counter, self._request_id, binding_params=binding_params, is_file_transfer=self._is_file_transfer, statement_params=statement_params, is_internal=is_internal, _no_results=_no_results) finally: try: if original_sigint: signal.signal(signal.SIGINT, original_sigint) except (ValueError, TypeError): logger.debug( 'Failed to reset SIGINT handler. Not in main ' 'thread. Ignored...') except Exception: self.connection.incident.report_incident() raise if self._timebomb is not None: self._timebomb.cancel() logger.debug('cancelled timebomb in finally') if 'data' in ret and 'parameters' in ret['data']: for kv in ret['data']['parameters']: if 'TIMESTAMP_OUTPUT_FORMAT' in kv['name']: self._timestamp_output_format = kv['value'] if 'TIMESTAMP_NTZ_OUTPUT_FORMAT' in kv['name']: self._timestamp_ntz_output_format = kv['value'] if 'TIMESTAMP_LTZ_OUTPUT_FORMAT' in kv['name']: self._timestamp_ltz_output_format = kv['value'] if 'TIMESTAMP_TZ_OUTPUT_FORMAT' in kv['name']: self._timestamp_tz_output_format = kv['value'] if 'DATE_OUTPUT_FORMAT' in kv['name']: self._date_output_format = kv['value'] if 'TIME_OUTPUT_FORMAT' in kv['name']: self._time_output_format = kv['value'] if 'TIMEZONE' in kv['name']: self._timezone = kv['value'] if 'BINARY_OUTPUT_FORMAT' in kv['name']: self._binary_output_format = kv['value'] self._connection._set_parameters( ret, self._connection._session_parameters) self._sequence_counter = -1 return ret
def start_server(self): """ Daemonize if requested. Listen for for connections. Run do_handshake() method for each connection. If the connection is a WebSockets client then call new_websocket_client() method (which must be overridden) for each new client connection. """ if self.listen_fd != None: lsock = socket.fromfd(self.listen_fd, socket.AF_INET, socket.SOCK_STREAM) if sys.hexversion < 0x3000000: # For python 2 we have to wrap the "raw" socket into a socket object, # otherwise ssl wrap_socket doesn't work. lsock = socket.socket(_sock=lsock) else: lsock = self.socket(self.listen_host, self.listen_port, False, self.prefer_ipv6, tcp_keepalive=self.tcp_keepalive, tcp_keepcnt=self.tcp_keepcnt, tcp_keepidle=self.tcp_keepidle, tcp_keepintvl=self.tcp_keepintvl) if self.daemon: keepfd = self.get_log_fd() keepfd.append(lsock.fileno()) self.daemonize(keepfd=keepfd, chdir=self.web) self.started() # Some things need to happen after daemonizing # Allow override of signals original_signals = { signal.SIGINT: signal.getsignal(signal.SIGINT), signal.SIGTERM: signal.getsignal(signal.SIGTERM), } if getattr(signal, 'SIGCHLD', None) is not None: original_signals[signal.SIGCHLD] = signal.getsignal(signal.SIGCHLD) signal.signal(signal.SIGINT, self.do_SIGINT) signal.signal(signal.SIGTERM, self.do_SIGTERM) # make sure that _cleanup is called when children die # by calling active_children on SIGCHLD if getattr(signal, 'SIGCHLD', None) is not None: signal.signal(signal.SIGCHLD, self.multiprocessing_SIGCHLD) last_active_time = self.launch_time try: while True: try: try: startsock = None pid = err = 0 child_count = 0 # Collect zombie child processes child_count = len(multiprocessing.active_children()) time_elapsed = time.time() - self.launch_time if self.timeout and time_elapsed > self.timeout: self.msg('listener exit due to --timeout %s' % self.timeout) break if self.idle_timeout: idle_time = 0 if child_count == 0: idle_time = time.time() - last_active_time else: idle_time = 0 last_active_time = time.time() if idle_time > self.idle_timeout and child_count == 0: self.msg( 'listener exit due to --idle-timeout %s' % self.idle_timeout) break try: self.poll() ready = select.select([lsock], [], [], 1)[0] if lsock in ready: startsock, address = lsock.accept() else: continue except self.Terminate: raise except Exception: _, exc, _ = sys.exc_info() if hasattr(exc, 'errno'): err = exc.errno elif hasattr(exc, 'args'): err = exc.args[0] else: err = exc[0] if err == errno.EINTR: self.vmsg("Ignoring interrupted syscall") continue else: raise if self.run_once: # Run in same process if run_once self.top_new_client(startsock, address) if self.ws_connection: self.msg('%s: exiting due to --run-once' % address[0]) break else: self.vmsg('%s: new handler Process' % address[0]) p = multiprocessing.Process( target=self.top_new_client, args=(startsock, address)) p.start() # child will not return # parent process self.handler_id += 1 except (self.Terminate, SystemExit, KeyboardInterrupt): self.msg("In exit") # terminate all child processes if not self.run_once: children = multiprocessing.active_children() for child in children: self.msg("Terminating child %s" % child.pid) child.terminate() break except Exception: exc = sys.exc_info()[1] self.msg("handler exception: %s", str(exc)) self.vmsg("exception", exc_info=True) finally: if startsock: startsock.close() finally: # Close listen port self.vmsg("Closing socket listening at %s:%s", self.listen_host, self.listen_port) lsock.close() # Restore signals for sig, func in original_signals.items(): signal.signal(sig, func)
def run(self): """Main loop. Exit on SIGHUP, SIGINT, SIGTERM. Return True if SIGHUP was received, False otherwise. """ self._web_server_addrs = os.environ.get('FCGI_WEB_SERVER_ADDRS') if self._web_server_addrs is not None: self._web_server_addrs = [ x.strip() for x in self._web_server_addrs.split(',')] sock = socket.fromfd( FCGI_LISTENSOCK_FILENO, socket.AF_INET, socket.SOCK_STREAM) try: sock.getpeername() except socket.error as exception: if exception.args[0] != errno.ENOTCONN: raise # Set up signal handlers. self._keep_going = True self._hup_received = False supported_signals = [signal.SIGINT, signal.SIGTERM] if hasattr(signal, 'SIGHUP'): supported_signals.append(signal.SIGHUP) old_sigs = [(x, signal.getsignal(x)) for x in supported_signals] for sig in supported_signals: if hasattr(signal, 'SIGHUP') and sig == signal.SIGHUP: signal.signal(sig, self._hup_handler) else: signal.signal(sig, self._int_handler) # Set close-on-exec fcntl.fcntl(sock.fileno(), fcntl.F_SETFD, fcntl.FD_CLOEXEC) # Main loop. while self._keep_going: try: rlist = select.select([sock], [], [], 1.0)[0] except select.error as exception: if exception.args[0] == errno.EINTR: continue raise if rlist: try: client_socket, addr = sock.accept() except socket.error as exception: if exception.args[0] in (errno.EINTR, errno.EAGAIN): continue raise fcntl.fcntl( client_socket.fileno(), fcntl.F_SETFD, fcntl.FD_CLOEXEC) if not (self._web_server_addrs is None or (len(addr) == 2 and addr[0] in self._web_server_addrs)): client_socket.close() continue # Hand off to Connection. conn = Connection(client_socket, addr, self) if not self._thread_pool.add_job(conn): # No thread left, immediately close the socket to hopefully # indicate to the web server that we're at our limit... # and to prevent having too many opened (and useless) # files. client_socket.close() for signum, handler in old_sigs: signal.signal(signum, handler) # Return bool based on whether or not SIGHUP was received. sock.close() self.shutdown() return self._hup_received
def launch(self, launch_cmd, get_ip=True, qemuparams=None, extra_bootparams=None, env=None): # use logfile to determine the recipe-sysroot-native path and # then add in the site-packages path components and add that # to the python sys.path so qmp.py can be found. python_path = os.path.dirname(os.path.dirname(self.logfile)) python_path += "/recipe-sysroot-native/usr/lib/python3.9/site-packages" sys.path.append(python_path) importlib.invalidate_caches() try: qmp = importlib.import_module("qmp") except: self.logger.error( "qemurunner: qmp.py missing, please ensure it's installed") return False # Path relative to tmpdir used as cwd for qemu below to avoid unix socket path length issues qmp_file = "." + next(tempfile._get_candidate_names()) qmp_param = ' -S -qmp unix:./%s,server,wait' % (qmp_file) qmp_port = self.tmpdir + "/" + qmp_file try: if self.serial_ports >= 2: self.threadsock, threadport = self.create_socket() self.server_socket, self.serverport = self.create_socket() except socket.error as msg: self.logger.error("Failed to create listening socket: %s" % msg[1]) return False bootparams = ' printk.time=1' if extra_bootparams: bootparams = bootparams + ' ' + extra_bootparams # Ask QEMU to store the QEMU process PID in file, this way we don't have to parse running processes # and analyze descendents in order to determine it. if os.path.exists(self.qemu_pidfile): os.remove(self.qemu_pidfile) self.qemuparams = 'bootparams="{0}" qemuparams="-pidfile {1} {2}"'.format( bootparams, self.qemu_pidfile, qmp_param) if qemuparams: self.qemuparams = self.qemuparams[:-1] + " " + qemuparams + " " + '\"' if self.serial_ports >= 2: launch_cmd += ' tcpserial=%s:%s %s' % (threadport, self.serverport, self.qemuparams) else: launch_cmd += ' tcpserial=%s %s' % (self.serverport, self.qemuparams) self.origchldhandler = signal.getsignal(signal.SIGCHLD) signal.signal(signal.SIGCHLD, self.handleSIGCHLD) self.logger.debug('launchcmd=%s' % (launch_cmd)) # FIXME: We pass in stdin=subprocess.PIPE here to work around stty # blocking at the end of the runqemu script when using this within # oe-selftest (this makes stty error out immediately). There ought # to be a proper fix but this will suffice for now. self.runqemu = subprocess.Popen(launch_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, stdin=subprocess.PIPE, preexec_fn=os.setpgrp, env=env, cwd=self.tmpdir) output = self.runqemu.stdout # # We need the preexec_fn above so that all runqemu processes can easily be killed # (by killing their process group). This presents a problem if this controlling # process itself is killed however since those processes don't notice the death # of the parent and merrily continue on. # # Rather than hack runqemu to deal with this, we add something here instead. # Basically we fork off another process which holds an open pipe to the parent # and also is setpgrp. If/when the pipe sees EOF from the parent dieing, it kills # the process group. This is like pctrl's PDEATHSIG but for a process group # rather than a single process. # r, w = os.pipe() self.monitorpid = os.fork() if self.monitorpid: os.close(r) self.monitorpipe = os.fdopen(w, "w") else: # child process os.setpgrp() os.close(w) r = os.fdopen(r) x = r.read() os.killpg(os.getpgid(self.runqemu.pid), signal.SIGTERM) sys.exit(0) self.logger.debug("runqemu started, pid is %s" % self.runqemu.pid) self.logger.debug("waiting at most %s seconds for qemu pid (%s)" % (self.runqemutime, time.strftime("%D %H:%M:%S"))) endtime = time.time() + self.runqemutime while not self.is_alive() and time.time() < endtime: if self.runqemu.poll(): if self.runqemu_exited: self.logger.warning("runqemu during is_alive() test") return False if self.runqemu.returncode: # No point waiting any longer self.logger.warning('runqemu exited with code %d' % self.runqemu.returncode) self._dump_host() self.logger.warning("Output from runqemu:\n%s" % self.getOutput(output)) self.stop() return False time.sleep(0.5) if self.runqemu_exited: self.logger.warning("runqemu after timeout") return False if self.runqemu.returncode: self.logger.warning('runqemu exited with code %d' % self.runqemu.returncode) return False if not self.is_alive(): self.logger.warning('is_alive() failed later') return False # Create the client socket for the QEMU Monitor Control Socket # This will allow us to read status from Qemu if the the process # is still alive self.logger.debug("QMP Initializing to %s" % (qmp_port)) # chdir dance for path length issues with unix sockets origpath = os.getcwd() try: os.chdir(os.path.dirname(qmp_port)) try: self.qmp = qmp.QEMUMonitorProtocol(os.path.basename(qmp_port)) except OSError as msg: self.logger.warning( "Failed to initialize qemu monitor socket: %s File: %s" % (msg, msg.filename)) return False self.logger.debug("QMP Connecting to %s" % (qmp_port)) if not os.path.exists(qmp_port) and self.is_alive(): self.logger.debug( "QMP Port does not exist waiting for it to be created") endtime = time.time() + self.runqemutime while not os.path.exists(qmp_port) and self.is_alive( ) and time.time() < endtime: self.logger.info("QMP port does not exist yet!") time.sleep(0.5) if not os.path.exists(qmp_port) and self.is_alive(): self.logger.warning( "QMP Port still does not exist but QEMU is alive") return False try: self.qmp.connect() except OSError as msg: self.logger.warning( "Failed to connect qemu monitor socket: %s File: %s" % (msg, msg.filename)) return False except qmp.QMPConnectError as msg: self.logger.warning( "Failed to communicate with qemu monitor: %s" % (msg)) return False finally: os.chdir(origpath) # Release the qemu porcess to continue running self.run_monitor('cont') if not self.is_alive(): self.logger.error("Qemu pid didn't appear in %s seconds (%s)" % (self.runqemutime, time.strftime("%D %H:%M:%S"))) qemu_pid = None if os.path.isfile(self.qemu_pidfile): with open(self.qemu_pidfile, 'r') as f: qemu_pid = f.read().strip() self.logger.error( "Status information, poll status: %s, pidfile exists: %s, pidfile contents %s, proc pid exists %s" % (self.runqemu.poll(), os.path.isfile(self.qemu_pidfile), str(qemu_pid), os.path.exists("/proc/" + str(qemu_pid)))) # Dump all processes to help us to figure out what is going on... ps = subprocess.Popen( ['ps', 'axww', '-o', 'pid,ppid,pri,ni,command '], stdout=subprocess.PIPE).communicate()[0] processes = ps.decode("utf-8") self.logger.debug("Running processes:\n%s" % processes) self._dump_host() op = self.getOutput(output) self.stop() if op: self.logger.error("Output from runqemu:\n%s" % op) else: self.logger.error("No output from runqemu.\n") return False # We are alive: qemu is running out = self.getOutput(output) netconf = False # network configuration is not required by default self.logger.debug( "qemu started in %s seconds - qemu procces pid is %s (%s)" % (time.time() - (endtime - self.runqemutime), self.qemupid, time.strftime("%D %H:%M:%S"))) cmdline = '' if get_ip: with open('/proc/%s/cmdline' % self.qemupid) as p: cmdline = p.read() # It is needed to sanitize the data received # because is possible to have control characters cmdline = re_control_char.sub(' ', cmdline) try: if self.use_slirp: tcp_ports = cmdline.split("hostfwd=tcp::")[1] host_port = tcp_ports[:tcp_ports.find('-')] self.ip = "localhost:%s" % host_port else: ips = re.findall(r"((?:[0-9]{1,3}\.){3}[0-9]{1,3})", cmdline.split("ip=")[1]) self.ip = ips[0] self.server_ip = ips[1] self.logger.debug("qemu cmdline used:\n{}".format(cmdline)) except (IndexError, ValueError): # Try to get network configuration from runqemu output match = re.match( r'.*Network configuration: (?:ip=)*([0-9.]+)::([0-9.]+):([0-9.]+)$.*', out, re.MULTILINE | re.DOTALL) if match: self.ip, self.server_ip, self.netmask = match.groups() # network configuration is required as we couldn't get it # from the runqemu command line, so qemu doesn't run kernel # and guest networking is not configured netconf = True else: self.logger.error( "Couldn't get ip from qemu command line and runqemu output! " "Here is the qemu command line used:\n%s\n" "and output from runqemu:\n%s" % (cmdline, out)) self._dump_host() self.stop() return False self.logger.debug("Target IP: %s" % self.ip) self.logger.debug("Server IP: %s" % self.server_ip) if self.serial_ports >= 2: self.thread = LoggingThread(self.log, self.threadsock, self.logger) self.thread.start() if not self.thread.connection_established.wait(self.boottime): self.logger.error( "Didn't receive a console connection from qemu. " "Here is the qemu command line used:\n%s\nand " "output from runqemu:\n%s" % (cmdline, out)) self.stop_thread() return False self.logger.debug("Output from runqemu:\n%s", out) self.logger.debug("Waiting at most %d seconds for login banner (%s)" % (self.boottime, time.strftime("%D %H:%M:%S"))) endtime = time.time() + self.boottime socklist = [self.server_socket] reachedlogin = False stopread = False qemusock = None bootlog = b'' data = b'' while time.time() < endtime and not stopread: try: sread, swrite, serror = select.select(socklist, [], [], 5) except InterruptedError: continue for sock in sread: if sock is self.server_socket: qemusock, addr = self.server_socket.accept() qemusock.setblocking(0) socklist.append(qemusock) socklist.remove(self.server_socket) self.logger.debug("Connection from %s:%s" % addr) else: data = data + sock.recv(1024) if data: bootlog += data if self.serial_ports < 2: # this socket has mixed console/kernel data, log it to logfile self.log(data) data = b'' if self.boot_patterns[ 'search_reached_prompt'] in bootlog: self.server_socket = qemusock stopread = True reachedlogin = True self.logger.debug( "Reached login banner in %s seconds (%s)" % (time.time() - (endtime - self.boottime), time.strftime("%D %H:%M:%S"))) else: # no need to check if reachedlogin unless we support multiple connections self.logger.debug( "QEMU socket disconnected before login banner reached. (%s)" % time.strftime("%D %H:%M:%S")) socklist.remove(sock) sock.close() stopread = True if not reachedlogin: if time.time() >= endtime: self.logger.warning( "Target didn't reach login banner in %d seconds (%s)" % (self.boottime, time.strftime("%D %H:%M:%S"))) tail = lambda l: "\n".join(l.splitlines()[-25:]) bootlog = bootlog.decode("utf-8") # in case bootlog is empty, use tail qemu log store at self.msg lines = tail(bootlog if bootlog else self.msg) self.logger.warning("Last 25 lines of text:\n%s" % lines) self.logger.warning("Check full boot log: %s" % self.logfile) self._dump_host() self.stop() return False # If we are not able to login the tests can continue try: (status, output) = self.run_serial(self.boot_patterns['send_login_user'], raw=True, timeout=120) if re.search(self.boot_patterns['search_login_succeeded'], output): self.logged = True self.logger.debug("Logged as root in serial console") if netconf: # configure guest networking cmd = "ifconfig eth0 %s netmask %s up\n" % (self.ip, self.netmask) output = self.run_serial(cmd, raw=True)[1] if re.search(r"root@[a-zA-Z0-9\-]+:~#", output): self.logger.debug("configured ip address %s", self.ip) else: self.logger.debug( "Couldn't configure guest networking") else: self.logger.warning("Couldn't login into serial console" " as root using blank password") self.logger.warning("The output:\n%s" % output) except: self.logger.warning("Serial console failed while trying to login") return True
async def inner(): assert signal.getsignal( signal.SIGINT ) != signal.default_int_handler
def start(self, argv): # Install signal handlers _prev_signals = {} if _thread_name() == '_MainThread': for s in (signal.SIGTERM, signal.SIGINT): _prev_signals[s] = signal.getsignal(s) signal.signal(s, self.__sigTERMhandler) try: # Command line options if self._argv is None: ret = self.initCmdLine(argv) if ret is not None: if ret: return True raise ServerExecutionException( "Init of command line failed") # Commands args = self._args # Interactive mode if self._conf.get("interactive", False): try: import readline except ImportError: raise ServerExecutionException("Readline not available") try: ret = True if len(args) > 0: ret = self.__processCommand(args) if ret: readline.parse_and_bind("tab: complete") self.dispInteractive() while True: cmd = input_command() if cmd == "exit" or cmd == "quit": # Exit return True if cmd == "help": self.dispUsage() elif not cmd == "": try: self.__processCommand(shlex.split(cmd)) except Exception as e: # pragma: no cover if self._conf["verbose"] > 1: logSys.exception(e) else: logSys.error(e) except (EOFError, KeyboardInterrupt): # pragma: no cover output("") raise # Single command mode else: if len(args) < 1: self.dispUsage() return False return self.__processCommand(args) except Exception as e: if self._conf["verbose"] > 1: logSys.exception(e) else: logSys.error(e) return False finally: self._alive = False for s, sh in _prev_signals.iteritems(): signal.signal(s, sh)
def main(): global STATS global VERSION_KEY global CHANGEDCT global shutdown_event def signal_handler(signum, frame): signal.signal(signal.SIGINT, SIGINT_ORIG) sys.stdout.write("\rCleaning Up ... Please Wait ...\n") shutdown_event.set() #Let the current workload finish sys.stdout.write("\tStopping Workers\n") for t in threads: t.join(1) insert_queue.put("finished") #Give the Mongo process 5 seconds to exit mongo_worker_thread.join(5) #If it's still alive, terminate it if mongo_worker_thread.is_alive(): try: mongo_worker_thread.terminate() except: pass #Attempt to update the stats try: meta.update({'metadata': options.identifier}, { '$set': { 'total': STATS['total'], 'new': STATS['new'], 'updated': STATS['updated'], 'unchanged': STATS['unchanged'], 'duplicates': STATS['duplicates'], 'changed_stats': CHANGEDCT } }) except: pass sys.stdout.write("... Done\n") sys.exit(0) optparser = OptionParser(usage='usage: %prog [options]') optparser.add_option("-f", "--file", action="store", dest="file", default=None, help="Input CSV file") optparser.add_option( "-d", "--directory", action="store", dest="directory", default=None, help= "Directory to recursively search for CSV files - prioritized over 'file'" ) optparser.add_option( "-e", "--extension", action="store", dest="extension", default='csv', help= "When scanning for CSV files only parse files with given extension (default: 'csv')" ) optparser.add_option( "-i", "--identifier", action="store", dest="identifier", type="int", default=None, help= "Numerical identifier to use in update to signify version (e.g., '8' or '20140120')" ) optparser.add_option("-m", "--mongo-host", action="store", dest="mongo_host", default='localhost', help="Location of mongo db/cluster") optparser.add_option("-p", "--mongo-port", action="store", dest="mongo_port", type="int", default=27017, help="Location of mongo db/cluster") optparser.add_option("-b", "--database", action="store", dest="database", default='whois', help="Name of database to use (default: 'whois')") optparser.add_option("-c", "--collection", action="store", dest="collection", default='whois', help="Name of collection to use (default: 'whois')") optparser.add_option("-t", "--threads", action="store", dest="threads", type="int", default=multiprocessing.cpu_count(), help="Number of worker threads") optparser.add_option("-B", "--bulk-size", action="store", dest="bulk_size", type="int", default=1000, help="Size of Bulk Insert Requests") optparser.add_option("-v", "--verbose", action="store_true", dest="verbose", default=False, help="Be verbose") optparser.add_option( "--vverbose", action="store_true", dest="vverbose", default=False, help= "Be very verbose (Prints status of every domain parsed, very noisy)") optparser.add_option("-s", "--stats", action="store_true", dest="stats", default=False, help="Print out Stats after running") optparser.add_option( "-x", "--exclude", action="store", dest="exclude", default="", help="Comma separated list of keys to exclude if updating entry") optparser.add_option( "-n", "--include", action="store", dest="include", default="", help= "Comma separated list of keys to include if updating entry (mutually exclusive to -x)" ) optparser.add_option("-o", "--comment", action="store", dest="comment", default="", help="Comment to store with metadata") optparser.add_option( "-r", "--redo", action="store_true", dest="redo", default=False, help= "Attempt to re-import a failed import or import more data, uses stored metatdata for previous import (-o and -x not required and will be ignored!!)" ) (options, args) = optparser.parse_args() if options.vverbose: options.verbose = True threads = [] work_queue = Queue.Queue(maxsize=options.bulk_size) insert_queue = mpQueue(maxsize=options.bulk_size) client = MongoClient(host=options.mongo_host, port=options.mongo_port) whodb = client[options.database] collection = whodb[options.collection] meta = whodb[options.collection + '_meta'] if options.identifier is None and options.redo is False: print "Identifier required" sys.exit(1) elif options.identifier is not None and options.redo is True: print "Redo requested and Identifier Specified. Please choose one or the other" sys.exit(1) elif options.exclude != "" and options.include != "": print "Options include and exclude are mutually exclusive, choose only one" sys.exit(1) metadata = meta.find_one({'metadata': 0}) meta_id = None if metadata is None: #Doesn't exist if options.redo is False: md = { 'metadata': 0, 'firstVersion': options.identifier, 'lastVersion': options.identifier, } meta_id = meta.insert(md) metadata = meta.find_one({'_id': meta_id}) # Setup indexes collection.ensure_index(UNIQUE_KEY, background=True, unique=True) collection.ensure_index(VERSION_KEY, background=True) collection.ensure_index('domainName', background=True) collection.ensure_index([('domainName', pymongo.ASCENDING), (VERSION_KEY, pymongo.ASCENDING)], background=True) collection.ensure_index('details.contactEmail', background=True) collection.ensure_index('details.registrant_name', background=True) collection.ensure_index('details.registrant_telephone', background=True) else: print "Cannot redo when no initial import exists" sys.exit(1) else: if options.redo is False: #Identifier is auto-pulled from db, no need to check if options.identifier < 1: print "Identifier must be greater than 0" sys.exit(1) if metadata['lastVersion'] >= options.identifier: print "Identifier must be 'greater than' previous identifier" sys.exit(1) meta_id = metadata['_id'] if options.redo is False: if options.exclude != "": options.exclude = options.exclude.split(',') else: options.exclude = None if options.include != "": options.include = options.include.split(',') else: options.include = None #Start worker threads if options.verbose: print "Starting %i worker threads" % options.threads for i in range(options.threads): t = Thread(target=process_worker, args=(work_queue, insert_queue, collection, options), name='Worker %i' % i) t.daemon = True t.start() threads.append(t) #Upate the lastVersion in the metadata meta.update({'_id': meta_id}, {'$set': { 'lastVersion': options.identifier }}) #Create the entry for this import meta_struct = { 'metadata': options.identifier, 'comment': options.comment, 'total': 0, 'new': 0, 'updated': 0, 'unchanged': 0, 'duplicates': 0, 'changed_stats': {} } if options.exclude != None: meta_struct['excluded_keys'] = options.exclude elif options.include != None: meta_struct['included_keys'] = options.include meta.insert(meta_struct) else: #redo is True #Get the record for the attempted import options.identifier = int(metadata['lastVersion']) redo_record = meta.find_one({'metadata': options.identifier}) if 'excluded_keys' in redo_record: options.exclude = redo_record['excluded_keys'] else: options.exclude = None if 'included_keys' in redo_record: options.include = redo_record['included_keys'] else: options.include = None options.comment = redo_record['comment'] STATS['total'] = int(redo_record['total']) STATS['new'] = int(redo_record['new'], ) STATS['updated'] = int(redo_record['updated']) STATS['unchanged'] = int(redo_record['unchanged']) STATS['duplicates'] = int(redo_record['duplicates']) CHANGEDCT = redo_record['changed_stats'] if options.verbose: print "Re-importing for: \n\tIdentifier: %s\n\tComment: %s" % ( options.identifier, options.comment) for ch in CHANGEDCT.keys(): CHANGEDCT[ch] = int(CHANGEDCT[ch]) #Start the reworker threads if options.verbose: print "Starting %i reworker threads" % options.threads for i in range(options.threads): t = Thread(target=process_reworker, args=(work_queue, insert_queue, collection, options), name='Worker %i' % i) t.daemon = True t.start() threads.append(t) #No need to update lastVersion or create metadata entry #Start up the Mongo Bulk Processor mongo_worker_thread = Process(target=mongo_worker, args=(insert_queue, options)) mongo_worker_thread.daemon = True mongo_worker_thread.start() #Set up signal handler before we go into the real work SIGINT_ORIG = signal.getsignal(signal.SIGINT) signal.signal(signal.SIGINT, signal_handler) #Start up Reader Thread reader_thread = Thread(target=reader_worker, args=(work_queue, collection, options), name='Reader') reader_thread.daemon = True reader_thread.start() while True: reader_thread.join(.1) if not reader_thread.isAlive(): break time.sleep(.1) while not work_queue.empty(): time.sleep(.01) work_queue.join() insert_queue.put("finished") mongo_worker_thread.join() #Update the stats meta.update({'metadata': options.identifier}, { '$set': { 'total': STATS['total'], 'new': STATS['new'], 'updated': STATS['updated'], 'unchanged': STATS['unchanged'], 'duplicates': STATS['duplicates'], 'changed_stats': CHANGEDCT } }) if options.stats: print "Stats: " print "Total Entries:\t\t %d" % STATS['total'] print "New Entries:\t\t %d" % STATS['new'] print "Updated Entries:\t %d" % STATS['updated'] print "Duplicate Entries\t %d" % STATS['duplicates'] print "Unchanged Entries:\t %d" % STATS['unchanged']
def updateFusionIO(patchResourceDict, **kwargs): firmwareUpdateRequired = kwargs['firmwareUpdateRequired'] logger = logging.getLogger("patchLogger") if firmwareUpdateRequired == 'yes': logger.info("Updating the FusionIO firmware and software.") busList = (kwargs['busList']).split() else: logger.info("Updating the FusionIO software.") try: patchBaseDir = (re.sub('\s+', '', patchResourceDict['patchBaseDir'])).rstrip('/') fusionIOSubDir = re.sub('\s+', '', patchResourceDict['fusionIOSubDir']) fusionPatchDir = patchBaseDir + '/' + fusionIOSubDir fusionSourceDir = fusionPatchDir + '/src/' fusionIODriverSrcRPM = re.sub('\s+', '', patchResourceDict['fusionIODriverSrcRPM']) except KeyError as err: logger.error("The resource key (" + str(err) + ") was not present in the resource file.") print RED + "A resource key was not present in the resource file; check the log file for errors; the FusionIO firmware and software/driver will have to be updated manually." + RESETCOLORS return 'Failure' #Get the currently used kernel and processor type, which is used as part of the driver RPM name. command = 'uname -r' result = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) out, err = result.communicate() logger.debug("The output of the command (" + command + ") used to get the currently used kernel was: " + out.strip()) if result.returncode != 0: logger.error("Unable to get the system's current kernel information.\n" + err) print RED + "Unable to get the system's current kernel information; check the log file for errors; the FusionIO firmware and software/driver will have to be updated manually." + RESETCOLORS return else: kernel = out.strip() command = 'uname -p' result = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) out, err = result.communicate() logger.debug("The output of the command (" + command + ") used to get the system's processor type was: " + out.strip()) if result.returncode != 0: logger.error("Unable to get the system's processor type.\n" + err) print RED + "Unable to get the system's processor type; check the log file for errors; the FusionIO firmware and software/driver will have to be updated manually." + RESETCOLORS return 'Failure' else: processorType = out.strip() ''' This strips off iomemory from RPM name, since it will not be needed in the regex match. Additionally, the source RPM is renamed to the driver RPM's name, which includes the current kernel and processor type in its name. ''' fusionIODriverRPM = (fusionIODriverSrcRPM.replace('iomemory-vsl', '-vsl-' + kernel)).replace('src', processorType) #Update the FusionIO firmware if it was determined that it is out of date. if firmwareUpdateRequired == 'yes': #Set traps so that the firmware update is not interrupted by the user. original_sigint_handler = signal.getsignal(signal.SIGINT) original_sigquit_handler = signal.getsignal(signal.SIGQUIT) signal.signal(signal.SIGINT, firmware_signal_handler) signal.signal(signal.SIGQUIT, firmware_signal_handler) for bus in busList: time.sleep(2) timeFeedbackThread = TimeFeedbackThread("Updating ioDIMM in slot", bus) timeFeedbackThread.start() command = "fio-update-iodrive -y -f -s " + bus + ' ' + fusionPatchDir + '/' + "*.fff" result = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) out, err = result.communicate() logger.debug("The output of the command (" + command + ") used to update the FusionIO firmware was: " + out.strip()) timeFeedbackThread.stopTimer() timeFeedbackThread.join() if result.returncode != 0: logger.error("Failed to upgrade the FusionIO firmware:\n" + err) print RED + "Failed to upgrade the FusionIO firmware; check the log file for errors; the FusionIO firmware and software/driver will have to be updated manually." + RESETCOLORS signal.signal(signal.SIGINT, original_sigint_handler) signal.signal(signal.SIGQUIT, original_sigquit_handler) return 'Failure' #Restore the signals back to their defaults. signal.signal(signal.SIGINT, original_sigint_handler) signal.signal(signal.SIGQUIT, original_sigquit_handler) #Remove the fio-util package before updating the software, since it is no longer needed for any firmware updates. command = "rpm -e fio-util" result = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) out, err = result.communicate() logger.debug("The output of the command (" + command + ") used to remove the fio-util package before updating the FusionIO software was: " + out.strip()) if result.returncode != 0: logger.error("Failed to remove the fio-util package:\n" + err) print RED + "Failed to remove the fio-util package; check the log file for errors; the FusionIO software/driver will have to be updated manually." + RESETCOLORS return 'Failure' #Build the driver for the new kernel. timeFeedbackThread = TimeFeedbackThread("Updating the FusionIO driver and software") timeFeedbackThread.start() command = "rpmbuild --rebuild " + fusionSourceDir + "*.rpm" result = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) out, err = result.communicate() logger.debug("The output of the command (" + command + ") used to build the FusionIO driver was: " + out.strip()) if result.returncode != 0: timeFeedbackThread.stopTimer() timeFeedbackThread.join() logger.error("Failed to build the FusionIO driver:\n" + err) print RED + "Failed to build the FusionIO driver; check the log file for errors; the FusionIO software/driver will have to be updated manually." + RESETCOLORS return 'Failure' out = out.strip() #Compile the regex that will be used to get the driver RPM location. fusionIODriverPattern = re.compile('.*Wrote:\s+((/[0-9,a-z,A-Z,_]+)+' + fusionIODriverRPM +')', re.DOTALL) logger.debug("The regex used to get the FusionIO driver RPM location was: " + fusionIODriverPattern.pattern) driverRPM = re.match(fusionIODriverPattern, out).group(1) logger.debug("The FuionIO driver was determined to be: " + driverRPM) #Now copy the new driver RPM to the FusionIO patch directory so that it gets installed with the rest of the RPMs. try: shutil.copy2(driverRPM, fusionPatchDir) except IOError as err: timeFeedbackThread.stopTimer() timeFeedbackThread.join() logger.error("Unable to retrieve the driver RPM.\n" + err) print RED + "Unable to retrieve the driver RPM; check log file for errors; the FusionIO firmware and software/driver will have to be updated manually." + RESETCOLORS return 'Failure' #Update the FusionIO software. command = "rpm -ivh " + fusionPatchDir + '/' + "*.rpm" result = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) out, err = result.communicate() logger.debug("The output of the command (" + command + ") used to update the FusionIO software was: " + out.strip()) if result.returncode != 0: timeFeedbackThread.stopTimer() timeFeedbackThread.join() logger.error("Failed to update the FusionIO software:\n" + err) print RED + "Failed to update the FusionIO software; check the log file for errors; the FusionIO software/driver will have to be updated manually." + RESETCOLORS return 'Failure' if firmwareUpdateRequired == 'yes': logger.info("Done Updating the FusionIO firmware and software.") else: logger.info("Done Updating the FusionIO software.") timeFeedbackThread.stopTimer() timeFeedbackThread.join() return 'Success'
def __init__(self, sig=signal.SIGINT): self.sig = sig self.interrupted = False self.released = False self.original_handler = signal.getsignal(self.sig)
def process_all_files_in_subdir(self, subdir): files = natsorted(subdir.glob(self.glob_pattern), key=lambda x: x.name) outpath0 = self.outdir.joinpath(subdir.name, subdir.name + "_filtered.fasta.gz") outpath0.parent.mkdir(parents=True, exist_ok=True) resdir = self.outdir.joinpath("result_summary", "variant_filter", subdir.name) resdir.mkdir(parents=True, exist_ok=True) self.tmpdir = Path(tempfile.mkdtemp()) if not self.quiet: print("." * shutil.get_terminal_size()[0]) print("Processing {} ({}): ".format(subdir.name, time.ctime())) self.overwrite = False self.uniq_seqs = set() if outpath0.exists(): msg = "Output file '{}' already exists. Overwrite?" if ask_user(msg.format(str(outpath0)), "n"): outpath0.unlink() self.overwrite = True else: self.uniq_seqs.update( [str(x.seq) for x in read_fastx(outpath0)]) if self.force_visual_check: for f in files: self.visual_check_force(f) else: self.queue = [] if self.n_cpu == 1: if self.quiet: res = [self.filter(f) for f in files] else: res = [ self.filter(f) for f in tqdm(files, total=len(files)) ] else: with Manager() as manager: self.mp_queue = manager.list() original_sigint_handler = signal.getsignal(signal.SIGINT) pool = Pool(self.n_cpu) signal.signal(signal.SIGINT, original_sigint_handler) try: if self.quiet: res = list(pool.imap(self.filter, files)) else: res = list( tqdm( pool.imap(self.filter, files), total=len(files), )) except KeyboardInterrupt: print("Caught KeyboardInterrupt, terminating workers") pool.terminate() pool.join() shutil.rmtree(str(self.tmpdir)) raise KeyboardInterrupt else: pool.close() pool.join() while self.mp_queue: self.queue.append(self.mp_queue.pop()) if np.sum(res) > 0: self.queue = self.queue[::-1] if not self.quiet: print("\nStart visual-checking procedure ...") while self.queue: self.visual_check(*self.queue.pop()) for ftmp in self.tmpdir.glob("tmp*.fasta"): with gzip.open(outpath0, "wb") as outfile: with ftmp.open(mode="rb") as tmp: shutil.copyfileobj(tmp, outfile, 1024 * 1024 * 10) shutil.rmtree(str(self.tmpdir)) if not self.quiet and subdir != self.subdirs[-1] and np.sum(res) > 0: if not ask_user( "Done. Proceed to the next?", default="y", quit=False): sys.exit()
async def test_open_signal_receiver_restore_handler_after_duplicate_signal(): orig = signal.getsignal(signal.SIGILL) with open_signal_receiver(signal.SIGILL, signal.SIGILL): pass # Still restored correctly assert signal.getsignal(signal.SIGILL) is orig
def _mock_subprocess_run(*args, **kwargs): handler = signal.getsignal(signal.SIGINT) self.assertEqual(handler, signal.SIG_IGN)
""" SIG_TRIGGER = False def kill_handler(sig, frame): print() exit(0) def sig_handler(sig, frame): global SIG_TRIGGER SIG_TRIGGER = True signal.signal(signal.SIGINT, kill_handler) original_sigint_handler = signal.getsignal(signal.SIGINT) signal.signal(signal.SIGINT, sig_handler) def perform_installation(device, boot_partition, language, mirrors): """ Performs the installation steps on a block device. Only requirement is that the block devices are formatted and setup prior to entering this function. """ with archinstall.Installer(device, boot_partition=boot_partition, hostname=archinstall.storage['_guided'] ['hostname']) as installation: ## if len(mirrors): # Certain services might be running that affects the system during installation.
from RMS.UploadManager import UploadManager # Flag indicating that capturing should be stopped STOP_CAPTURE = False def breakHandler(signum, frame): """ Handles what happens when Ctrl+C is pressed. """ global STOP_CAPTURE # Set the flag to stop capturing video STOP_CAPTURE = True # Save the original event for the Ctrl+C ORIGINAL_BREAK_HANDLE = signal.getsignal(signal.SIGINT) def setSIGINT(): """ Set the breakHandler function for the SIGINT signal, will be called when Ctrl+C is pressed. """ signal.signal(signal.SIGINT, breakHandler) def resetSIGINT(): """ Restore the original Ctrl+C action. """ signal.signal(signal.SIGINT, ORIGINAL_BREAK_HANDLE)
def __enter__(self): self.orig_sigint_handler = signal.getsignal(signal.SIGINT) signal.signal(signal.SIGINT, self.handler)
def _swap_handler(self, signum, signal_handler): self._old_handlers[signum] = signal.getsignal(signum) signal.signal(signum, signal_handler) if not platform._is_win: signal.siginterrupt(signum, False)
logger = logging.getLogger(__name__) # _SIGNALS stores the signals that will be handled by the ErrorHandler. These # signals were chosen as their default handler terminates the process and could # potentially occur from inside Python. Signals such as SIGILL were not # included as they could be a sign of something devious and we should terminate # immediately. if os.name != "nt": _SIGNALS = [signal.SIGTERM] for signal_code in [ signal.SIGHUP, signal.SIGQUIT, signal.SIGXCPU, signal.SIGXFSZ ]: # Adding only those signals that their default action is not Ignore. # This is platform-dependent, so we check it dynamically. if signal.getsignal(signal_code) != signal.SIG_IGN: _SIGNALS.append(signal_code) else: # POSIX signals are not implemented natively in Windows, but emulated from the C runtime. # As consumed by CPython, most of handlers on theses signals are useless, in particular # SIGTERM: for instance, os.kill(pid, signal.SIGTERM) will call TerminateProcess, that stops # immediately the process without calling the attached handler. Besides, non-POSIX signals # (CTRL_C_EVENT and CTRL_BREAK_EVENT) are implemented in a console context to handle the # CTRL+C event to a process launched from the console. Only CTRL_C_EVENT has a reliable # behavior in fact, and maps to the handler to SIGINT. However in this case, a # KeyboardInterrupt is raised, that will be handled by ErrorHandler through the context manager # protocol. Finally, no signal on Windows is electable to be handled using ErrorHandler. # # Refs: https://stackoverflow.com/a/35792192, https://maruel.ca/post/python_windows_signal, # https://docs.python.org/2/library/os.html#os.kill, # https://www.reddit.com/r/Python/comments/1dsblt/windows_command_line_automation_ctrlc_question
def setUp(self): self._default_handler = signal.getsignal(signal.SIGINT) if self.int_handler is not None: signal.signal(signal.SIGINT, self.int_handler)