def reset_itimer(self): """ Starts a signal timer to signal the current process after C{max_delay} seconds. If this process gets signaled, that means that the reactor failed to cancel the alarm, which means that the reactor has hung. """ signal.setitimer(signal.ITIMER_REAL, self.max_delay)
def waitall(self, timeout=False, killem=False): if killem and timeout is False: raise Exception("Wrong arguments") if timeout is not False: assert timeout > 0, "timeout should be positive" if timeout: signal.signal(signal.SIGALRM, lambda x,y: None) signal.setitimer(0, timeout, signal.ITIMER_REAL) interrupted = False for pid in self.get_pids(): try: #print("waiting", pid) os.waitpid(pid, 0) #print("pid", pid, "gone") except ChildProcessError: pass except InterruptedError: # ALARM raised interrupted = True if killem: self.killall(sig=signal.SIGKILL) return self.waitall(timeout=10) # again wait till all processes will die raise Exception("timeout") else: # All stopped if timeout: signal.setitimer(0, 0, signal.ITIMER_REAL) # disable timer
def test_eintr_zero_timeout(wfs, spair): a, b = spair interrupt_count = [0] def handler(sig, frame): assert sig == signal.SIGALRM interrupt_count[0] += 1 old_handler = signal.signal(signal.SIGALRM, handler) try: assert not wfs(a, read=True, timeout=0) try: # Start delivering SIGALRM 1000 times per second, # to trigger race conditions such as # https://github.com/urllib3/urllib3/issues/1396. signal.setitimer(signal.ITIMER_REAL, 0.001, 0.001) # Hammer the system call for a while to trigger the # race. for i in range(100000): wfs(a, read=True, timeout=0) finally: # Stop delivering SIGALRM signal.setitimer(signal.ITIMER_REAL, 0) finally: signal.signal(signal.SIGALRM, old_handler) assert interrupt_count[0] > 0
def getch(timeout=0.1): """Returns single character of raw input or '' if nothing after timeout""" def _handle_timeout(signum, frame): raise TimeoutError() def _getch(): try: fd = sys.stdin.fileno() old_settings = termios.tcgetattr(fd) try: tty.setraw(sys.stdin.fileno()) ch = sys.stdin.read(1) finally: termios.tcsetattr(fd, termios.TCSADRAIN, old_settings) return ch except TimeoutError: return '' signal.signal(signal.SIGALRM, _handle_timeout) signal.setitimer(signal.ITIMER_REAL, timeout) try: result = _getch() finally: signal.alarm(0) return result
def test_eintr_infinite_timeout(wfs, spair): a, b = spair interrupt_count = [0] def handler(sig, frame): assert sig == signal.SIGALRM interrupt_count[0] += 1 def make_a_readable_after_one_second(): time.sleep(1) b.send(b"x") old_handler = signal.signal(signal.SIGALRM, handler) try: assert not wfs(a, read=True, timeout=0) start = monotonic() try: # Start delivering SIGALRM 10 times per second signal.setitimer(signal.ITIMER_REAL, 0.1, 0.1) # Sleep for 1 second (we hope!) thread = threading.Thread(target=make_a_readable_after_one_second) thread.start() wfs(a, read=True) finally: # Stop delivering SIGALRM signal.setitimer(signal.ITIMER_REAL, 0) thread.join() end = monotonic() dur = end - start assert 0.9 < dur < 3 finally: signal.signal(signal.SIGALRM, old_handler) assert interrupt_count[0] > 0
def _reload(): global _reload_attempted _reload_attempted = True for fn in _reload_hooks: fn() if hasattr(signal, "setitimer"): # Clear the alarm signal set by # ioloop.set_blocking_log_threshold so it doesn't fire # after the exec. signal.setitimer(signal.ITIMER_REAL, 0, 0) if sys.platform == 'win32': # os.execv is broken on Windows and can't properly parse command line # arguments and executable name if they contain whitespaces. subprocess # fixes that behavior. subprocess.Popen([sys.executable] + sys.argv) sys.exit(0) else: try: os.execv(sys.executable, [sys.executable] + sys.argv) except OSError: # Mac OS X versions prior to 10.6 do not support execv in # a process that contains multiple threads. Instead of # re-executing in the current process, start a new one # and cause the current process to exit. This isn't # ideal since the new process is detached from the parent # terminal and thus cannot easily be killed with ctrl-C, # but it's better than not being able to autoreload at # all. # Unfortunately the errno returned in this case does not # appear to be consistent, so we can't easily check for # this error specifically. os.spawnv(os.P_NOWAIT, sys.executable, [sys.executable] + sys.argv) sys.exit(0)
def test_eintr(wfs, spair): a, b = spair interrupt_count = [0] def handler(sig, frame): assert sig == signal.SIGALRM interrupt_count[0] += 1 old_handler = signal.signal(signal.SIGALRM, handler) try: assert not wfs(a, read=True, timeout=0) start = monotonic() try: # Start delivering SIGALRM 10 times per second signal.setitimer(signal.ITIMER_REAL, 0.1, 0.1) # Sleep for 1 second (we hope!) wfs(a, read=True, timeout=1) finally: # Stop delivering SIGALRM signal.setitimer(signal.ITIMER_REAL, 0) end = monotonic() dur = end - start assert 0.9 < dur < 3 finally: signal.signal(signal.SIGALRM, old_handler) assert interrupt_count[0] > 0
def handler(signum=None, frame=None): if len(times) < N: times.append(time.perf_counter()) # 1 µs is the smallest possible timer interval, # we want to measure what the concrete duration # will be on this platform signal.setitimer(signal.ITIMER_REAL, 1e-6)
def first_handler(signum, frame): # 1e-6 is the minimum non-zero value for `setitimer()`. # Choose a random delay so as to improve chances of # triggering a race condition. Ideally the signal is received # when inside critical signal-handling routines such as # Py_MakePendingCalls(). signal.setitimer(signal.ITIMER_REAL, 1e-6 + random.random() * 1e-5)
def _send(self, command, retries=5, timeout=100): fd = self._fd if len(command) != 33: raise ValueError("command must be 33 bytes long") handler = signal.signal(signal.SIGALRM, _TimeoutError.timeout) for attempt in range(retries): signal.setitimer(signal.ITIMER_REAL, timeout/1000.0) try: if LOG.isEnabledFor(logging.DEBUG): LOG.debug("Write: {}", hexlify(command[1:])) fd.write(command) fd.flush() reply = bytearray(fd.read(32)) if LOG.isEnabledFor(logging.DEBUG): LOG.debug("Recv: {}", hexlify(reply)) signal.setitimer(signal.ITIMER_REAL, 0) if reply[0] != command[1]: msg = "Expected msg type {} but got {}" raise IOError(msg.format(command[1], reply[0])) return reply[1:] except _TimeoutError: print("IO timed out, try #%d." % attempt) # time.sleep(0.000001) finally: signal.signal(signal.SIGALRM, handler) msg = "Gving up on PlasmaTrim {}" raise IOError(msg.format(self))
def start(self): segment = self.segment or AUDIO_SEGMENT_LENGTH self.num_frames = int(RATE / FRAMES_PER_BUFFER * segment) if self.seconds: signal.setitimer(signal.ITIMER_REAL, self.seconds) if self.verbose: self._timer = time.time() if self.collect: print 'Collecting RMS values...' if self.action: # Interpret threshold self.get_threshold() try: self.is_running = True while not self._graceful: self.record() # Record stream in `AUDIO_SEGMENT_LENGTH' long data = self.output.getvalue() segment = pydub.AudioSegment(data) rms = segment.rms if self.collect: self.collect_rms(rms) self.meter(rms) if self.action: if self.is_triggered(rms): self.execute() self.monitor(rms) self.is_running = False self.stop() except self.__class__.StopException: self.is_running = False self.stop()
def main(): pipe_r, pipe_w = os.pipe() flags = fcntl.fcntl(pipe_w, fcntl.F_GETFL, 0) flags = flags | os.O_NONBLOCK fcntl.fcntl(pipe_w, fcntl.F_SETFL, flags) signal.signal(signal.SIGCHLD, lambda x,y: None) signal.signal(signal.SIGALRM, lambda x,y: None) signal.siginterrupt(signal.SIGCHLD,False) #makes no difference signal.siginterrupt(signal.SIGALRM,False) #makes no difference signal.set_wakeup_fd(pipe_w) signal.setitimer(signal.ITIMER_REAL, 2, 2) poller = select.epoll() poller.register(pipe_r, select.EPOLLIN) poller.register(sys.stdin, select.EPOLLIN) print "Main screen turn on" while True: events=[] try: events = poller.poll() try: for fd, flags in events: ch=os.read(fd, 1) if fd==pipe_r: sys.stdout.write( "We get Signal" ) if fd==sys.stdin.fileno(): sys.stdout.write( ch ) sys.stdout.flush() except IOError as e: print "exception loop" + str(e) except IOError as e: print "exception poll" + str(e)
def profile_signal_handler(signum, frame): if state.profile_level > 0: state.accumulate_time(clock()) sample_stack_procs(frame) signal.setitimer(signal.ITIMER_PROF, state.sample_interval, 0.0) state.last_start_time = clock()
def main(): global options if os.geteuid() != 0: sys.stderr.write("ping uses RAW SOCKETs therefore must running with root privilegies\n") sys.exit(-1) options = parse_options(sys.argv[1:]) signal.signal(signal.SIGALRM, signanl_handler) try: sockfd = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_RAW) sockfd.setsockopt(socket.IPPROTO_IP, socket.IP_HDRINCL, True) sockfd_r = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_ICMP) options.sockfd = sockfd options.packet_id = os.getpid() % 0xFFFFFF options.destination = socket.gethostbyname(options.destination) options.sequence_number = 0 print("PING %s, interval %d, packed identifier %d" % (options.destination, options.i, options.packet_id)) signal.setitimer(signal.ITIMER_REAL, options.i, options.i) while True: receive_ip_packet(sockfd_r, options.packet_id) except socket.error as e: sys.stderr.write("Exception: " + e.strerror + '\n')
def setUpClass(cls): cls.orig_handler = signal.signal(signal.SIGALRM, lambda *args: None) signal.setitimer(signal.ITIMER_REAL, cls.signal_delay, cls.signal_period) # Issue #25277: Use faulthandler to try to debug a hang on FreeBSD faulthandler.dump_traceback_later(10 * 60, exit=True)
def play(self,pBoard,pDue): pBoard.print_out() print("value: %f, turn: %s" % (pBoard.evaluate(),"we" if pBoard.player() == CELL_OWN else "they")) self.free_unused_state_dicts(pBoard.numberOfPieces()) print("Length of state dict list: %d" % len(self._states)) self.max_depth = 1 move = self.a_b_search(pBoard) signal.signal(signal.SIGALRM, signal_handler) signal.setitimer(signal.ITIMER_REAL, pDue-time.time()) ultimate_max_depth = 500 continue_search = True try: for self.max_depth in range(2,ultimate_max_depth): move, continue_search = self.a_b_search(pBoard) if not continue_search: break signal.setitimer(signal.ITIMER_REAL,0) except Exception as e: print("Interrupted: " + str(e)) return move
def sleep(seconds): ''' Delay execution for a given number of seconds, or until it is request that we stop sleeping. @param seconds:float The number of seconds to sleep ''' # Sleep only if the sleep duration is existent if not seconds == 0: try: with sleep_condition: # Set a sleep timer, signal.setitimer(signal.ITIMER_REAL, seconds) # and with for it or for something else to # request that we stop sleeping. sleep_condition.wait() except KeyboardInterrupt: # Emulate `kill -TERM` on Control+c signal_SIGTERM(0, None) except: try: # setitimer may not be supported, # in such case, use a regular sleep time.sleep(seconds) except KeyboardInterrupt: # Emulate `kill -TERM` on Control+c signal_SIGTERM(0, None)
def acknowledgement_handler(): # print("acknowledgement_handler started") global window_ceil, window_floor, packets_length, piped_packet, ACK, completed # Create a UDP server socket to listen for ACKs from the FTP server ack_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) ack_sock.bind((ACK_HOST, ACK_PORT)) # Listen forever, until the final ACK is received while True: # Receive a ACK message from the server server_message = ack_sock.recv(65535) # print("server message", server_message) fields = pickle.loads(server_message) # print("ACK", fields) # Check if the type of message is ACK if fields[2] == TYPE_ACK: # Get the ACK number from fields[0] ACK = fields[0] # If ACK is a valid field # print("ack num:", ACK) if ACK > -1: # synchronize thread_lock.acquire() # If received ACK is higher than the number of packets, implies all transferred if ACK == packets_length: # print("if 1") eof_pkt_list = ["0", "0", TYPE_EOF, "0"] eof_pkt = pickle.dumps(eof_pkt_list) client_socket.sendto(eof_pkt, (SERVER_HOST, SERVER_PORT)) # print("All packets sent!") thread_lock.release() completed = True break # Check the ACK limits elif packets_length > ACK >= window_floor: # print("if 2") # Reset the timer signal.alarm(0) signal.setitimer(signal.ITIMER_REAL, RTT) # Number of ACKed packets number_of_acked = ACK - window_floor window_floor = ACK # print("num of ack ", number_of_acked) # Update the window ceiling outdate_ceil = window_ceil window_ceil = min(window_ceil + number_of_acked, packets_length) # print("if 2a ", window_floor, outdate_ceil, window_ceil, piped_packet) # Send out new packets that have seq number between old_ceil and new window_ceil # Using window_ceil - outdate_ceil because the number_of_acked doesn't always give the right remaining packets for i in range(window_ceil - outdate_ceil): # print("piped", piped_packet) send_packet(piped_packet) if piped_packet < packets_length - 1: piped_packet += 1 thread_lock.release()
def __setup_log_status_timer(self): def __timer_handler(signum, frame): logging.info(self.__arm_info.get_summary()) signal.signal(signal.SIGALRM, __timer_handler) interval = self.__status_log_interval signal.setitimer(signal.ITIMER_REAL, interval, interval)
def __exit__(self, unused_type=None, unused_value=True, unused_traceback=None): if self._autotick: signal.setitimer(signal.ITIMER_VIRTUAL, *self._old_itimer) signal.signal(signal.SIGVTALRM, self._old_handler) if not self.TimedOut(): self._WriteMark(' ')
def reload_server(io_loop): # wait for other files to be written time.sleep(0.5) for fd in io_loop._handlers.keys(): try: os.close(fd) except: pass if hasattr(signal, "setitimer"): # Clear the alarm signal set by # ioloop.set_blocking_log_threshold so it doesn't fire # after the exec. signal.setitimer(signal.ITIMER_REAL, 0, 0) try: os.execv(sys.executable, [sys.executable] + sys.argv) except OSError: # Mac OS X versions prior to 10.6 do not support execv in # a process that contains multiple threads. Instead of # re-executing in the current process, start a new one # and cause the current process to exit. This isn't # ideal since the new process is detached from the parent # terminal and thus cannot easily be killed with ctrl-C, # but it's better than not being able to autoreload at # all. # Unfortunately the errno returned in this case does not # appear to be consistent, so we can't easily check for # this error specifically. os.spawnv(os.P_NOWAIT, sys.executable, [sys.executable] + sys.argv) sys.exit(0)
def run(self): """When the consumer is ready to start running, kick off all of our consumer consumers and then loop while we process messages. """ self.set_state(self.STATE_ACTIVE) self.setup_consumers() # Set the SIGCHLD handler for child creation errors signal.signal(signal.SIGCHLD, self.on_sigchld) # Set the SIGALRM handler for poll interval signal.signal(signal.SIGALRM, self.on_timer) # Kick off the poll timer signal.setitimer(signal.ITIMER_REAL, self.poll_interval, 0) # Loop for the lifetime of the app, pausing for a signal to pop up while self.is_running: if not self.is_sleeping: self.set_state(self.STATE_SLEEPING) signal.pause() # Note we're exiting run LOGGER.info('Exiting Master Control Program')
def stop(self): """Shutdown the MCP and child processes cleanly""" LOGGER.info('Shutting down controller') self.set_state(self.STATE_STOP_REQUESTED) # Clear out the timer signal.setitimer(signal.ITIMER_PROF, 0, 0) self._mcp.stop_processes() if self._mcp.is_running: LOGGER.info('Waiting up to 3 seconds for MCP to shut things down') signal.setitimer(signal.ITIMER_REAL, 3, 0) signal.pause() LOGGER.info('Post pause') # Force MCP to stop if self._mcp.is_running: LOGGER.warning('MCP is taking too long, requesting process kills') self._mcp.stop_processes() del self._mcp else: LOGGER.info('MCP exited cleanly') # Change our state self._stopped() LOGGER.info('Shutdown complete')
def read(judge): nextstep = judge.nextstep; p = judge.popen[nextstep]; #test if the program is inited succefully if (p == None): print "read: can't find program reading stop" return None; if (judge.info[nextstep] == None): info = ""; else: info = judge.info[nextstep]; success = True; print info #try communicate with the program try: signal.signal(signal.SIGALRM, read); signal.setitimer(signal.ITIMER_REAL, judge.runtime); p.stdin.write(info); p.stdin.flush(); s = p.stdout.readline(), print "read: success" except: success = False; print "read: timeout try:", finally: signal.setitimer(signal.ITIMER_REAL, 0); #if the program can't return an answer #the program will be killed and restarted if (success == False): p.kill(); judge.popen[nextstep] = open(judge.name[nextstep], judge.inittime); return None; return s;
def start(self): 'start sampling interrupts' if self.started: return self.started = True self.rollback = signal.signal(self.signal, self._resample) signal.setitimer(self.which, 0.01 * (1 + random.random()))
def setup_reporter(processor, arguments): if arguments['--no-follow']: return global LOGGING_SAMPLES if LOGGING_SAMPLES is None: scr = curses.initscr() atexit.register(curses.endwin) def print_report(sig, frame): global LOGGING_SAMPLES output = processor.report() if LOGGING_SAMPLES is None: scr.erase() try: scr.addstr(output) except curses.error: pass scr.refresh() else: print(output) LOGGING_SAMPLES -= 1 if LOGGING_SAMPLES == 0: sys.exit(0) signal.signal(signal.SIGALRM, print_report) interval = float(arguments['--interval']) signal.setitimer(signal.ITIMER_REAL, 0.1, interval)
def _sample(self, _, _2): for frame in sys._current_frames().values(): formatted_stack = self._format_stack(frame) if formatted_stack: self.stack_counts[formatted_stack] += 1 if self._started: signal.setitimer(signal.ITIMER_PROF, self.interval, 0)
def run(self): ctx = zmq.Context() sock = ctx.socket(zmq.PAIR) sock.connect(IPC_ADDR) # Set up interval timer with a dummy handler signal.signal(signal.SIGALRM, self._dummyHandler) signal.setitimer(signal.ITIMER_REAL, self.interval, self.interval) while True: # Wait for the timer signal.pause() try: # Non-blocking recv (comm, data) = sock.recv_multipart(flags=zmq.NOBLOCK) except zmq.Again: # No data ready, go back to wait for the timer continue # Handling command if comm == b"say": print("You want me to say: {}".format(data)) elif comm == b"quit": print("Goodbye!") break
def open(name, inittime): #extend the commond on the condition of the type of the program execmd = ""; if (name[-1] == "y"): execmd = "python " + name; if (name[-1] == "e"): execmd = "./" + name; if (name[-1] == 's'): execmd = "java " + name; #try to init the popen of the program try: p = subprocess.Popen(execmd, stdin = subprocess.PIPE, stdout = subprocess.PIPE, stderr = subprocess.PIPE, shell = False); except: print "init can't find program" return None; #try to run the program if timeout colose the program success = True; try: signal.signal(signal.SIGALRM, open); signal.setitimer(signal.ITIMER_REAL, inittime); p.stdout.readline(), print "init success" except: success = False; print "init timeout" finally: signal.setitimer(signal.ITIMER_REAL, 0); if (success == False): p.kill(); return None; else: return p;
def __move_straight(self): RPIO.setup(CONST_STRAIGHT, RPIO.OUT, initial=RPIO.LOW) signal.setitimer(signal.ITIMER_REAL, 0.01, 0.5)
def tearDown(self): signal.signal(signal.SIGALRM, self.old_alarm) if self.itimer is not None: # test_itimer_exc doesn't change this attr # just ensure that itimer is stopped signal.setitimer(self.itimer, 0)
def handler(signum, frame): global sheet1 global book global x global now if GPIO.input(rf) == 0: print("connect your seatbelt") cb3.select() T.delete('1.0', END) T.insert(END, "apply seat belt") else: if GPIO.input(alco) == 0: print("Dont drive the car") cb3.select() T.delete('1.0', END) T.insert(END, "alcohol detected") else: #print("alco not detected") cb3.deselect() cb2.deselect() cb1.select() global c global count1, count2, count3, count4, count5, count6 T.delete('1.0', END) a = c.query(rp, force=True) i = c.query(sp, force=True) aa = c.query(el, force=True) ab = c.query(tem, force=True) ac = c.query(alt_vtg, force=True) ad = c.query(dwm, force=True) sheet1.write(x, 0, str(a)) sheet1.write(x, 1, str(i)) sheet1.write(x, 2, str(aa)) sheet1.write(x, 3, str(ab)) sheet1.write(x, 4, str(ac)) sheet1.write(x, 5, str(ad)) #print 'rpm =',a.value #print 'speed=',i.value #print 'engine_load=',aa.value #print 'temp=',ab.value #print 'alt_vtg=',ac.value #print 'dwm=',ad.value T1.delete('1.0', END) T1.insert(END, str(i)) T2.delete('1.0', END) T2.insert(END, str(a)) #print x if GPIO.input(gear1) == 1: T3.delete('1.0', END) T3.insert(END, "1") sheet1.write(x, 6, 'Gear 1') if i.value > 15: T.insert(END, "Speed is exceeding change the gear\n") sheet1.write(x, 12, "Speed is exceeding") count1 = count1 + 1 cb2.select() elif GPIO.input(gear2) == 1: T3.delete('1.0', END) T3.insert(END, "2") sheet1.write(x, 6, 'Gear 2') if i.value > 30: T.insert(END, "Speed is exceeding change the gear\n") sheet1.write(x, 12, "Speed is exceeding") count1 = count1 + 1 cb2.select() elif GPIO.input(gear3) == 1: T3.delete('1.0', END) T3.insert(END, "3") sheet1.write(x, 6, 'Gear 3') if i.value > 40: T.insert(END, "Speed is exceeding change the gear\n") sheet1.write(x, 12, "Speed is exceeding") count1 = count1 + 1 cb2.select() elif GPIO.input(gear4) == 1: T3.delete('1.0', END) T3.insert(END, "4") sheet1.write(x, 6, 'Gear 4') if i.value > 60: T.insert(END, "Speed is exceeding change the gear\n") sheet1.write(x, 12, "Speed is exceeding") count1 = count1 + 1 cb2.select() elif GPIO.input(gear5) == 1: T3.delete('1.0', END) T3.insert(END, "5") sheet1.write(x, 6, 'Gear 5') if i.value > 80: T.insert(END, "Speed is exceeding\n") sheet1.write(x, 12, "Speed is exceeding") count1 = count1 + 1 cb2.select() elif GPIO.input(rev) == 1: T3.delete('1.0', END) T3.insert(END, "R") sheet1.write(x, 6, 'Reverse Gear') if i.value > 30: T.insert(END, "Speed is exceeding\n") sheet1.write(x, 12, "Speed is exceeding") count1 = count1 + 1 cb2.select() else: T3.delete('1.0', END) T3.insert(END, "N") sheet1.write(x, 6, 'Neutral') if a.value > 2200: #print 'RPM IS HIGHER AND AFFECTING FUEL ECONOMY' T.insert(END, "RPM IS HIGHER AND AFFECTING FUEL ECONOMY\n") sheet1.write(x, 11, 'RPM IS HIGHER') count2 = count2 + 1 cb2.select() if aa.value < 30: if a.value > 2200: #print 'Release the clutch or accelerator pedal ' T.insert(END, "Release the clutch or accelerator pedal\n") count3 = count3 + 1 sheet1.write(x, 8, 'release the cutch or acclerator') cb2.select() if ac.value > 14: #print 'Alternator voltage is greater ,and it is affecting battery' T.insert( END, "Alternator voltage is greater ,and it is affecting battery\n" ) count4 = count4 + 1 cb3.select() sheet1.write(x, 9, 'Alternator voltage is greater') if ac.value < 14: #print 'Alternator voltage is lower ,and it is not charging battery' T.insert( END, "Alternator voltage is lower ,and it is not charging battery\n" ) count5 = count5 + 1 sheet1.write(x, 10, 'Alternator voltage is lower') cb3.select() if ab.value > 98: #& i.value == 0 : #print 'check the coolant and visit to service centre' T.insert(END, "Check the coolant and visit to service centre\n") count6 = count6 + 1 sheet1.write(x, 7, 'check the coolant') cb3.select() book.save("/home/pi/Desktop/program1/" + now + ".xls") x = x + 1 signal.signal(signal.SIGALRM, handler) signal.setitimer(signal.ITIMER_REAL, 0.001)
def stopProgram(): global count1, count2, count3, count4, count5, count6 signal.setitimer(signal.ITIMER_REAL, 0) print('gear error=', count1) print('RPM error=', count2) print('clutch error=', count3) print('alternator voltage greater=', count4) print('battery not charging=', count5) print('coolant temp=', count6) sheet1.write(1, 13, str(count1)) sheet1.write(1, 14, str(count2)) sheet1.write(1, 15, str(count3)) sheet1.write(1, 16, str(count4)) sheet1.write(1, 17, str(count5)) sheet1.write(1, 18, str(count6)) book.save("/home/pi/Desktop/program1/" + now + ".xls") root = Tk() root.title("Bar Graph") count4 = count4 + count5 z_width = 320 z_height = 180 z = Canvas(root, width=z_width, height=z_height) z.pack() if ((count1 > 400) | (count2 > 400) | (count3 > 400) | (count4 > 400) | (count6 > 400)): q = 6 z.create_text(20, 127, text="200") z.create_text(20, 93, text="400") z.create_text(20, 60, text="600") z.create_text(20, 27, text="800") z.create_text(20, 10, text="Count") else: q = 3 z.create_text(20, 127, text="100") z.create_text(20, 93, text="200") z.create_text(20, 60, text="300") z.create_text(20, 27, text="400") z.create_text(20, 10, text="Count") speed1 = 160 - (count1 / q) rpm1 = 160 - (count2 / q) C_T1 = 160 - (count6 / q) Alt_Vtg1 = 160 - (count4 / q) Clutch1 = 160 - (count3 / q) z.create_rectangle(60, speed1, 90, 160, fill="red") z.create_text(75, 168, text="speed") z.create_rectangle(100, rpm1, 130, 160, fill="blue") z.create_text(115, 168, text="rpm") z.create_rectangle(140, C_T1, 170, 160, fill="green") z.create_text(155, 168, text="C_T") z.create_rectangle(180, Alt_Vtg1, 210, 160, fill="orange") z.create_text(195, 168, text="Alt_vtg") z.create_rectangle(220, Clutch1, 250, 160, fill="yellow") z.create_text(255, 168, text="clutch error") z.create_line(0, 160, 500, 160) z.create_line(40, 0, 40, 500) z.create_text(75, (speed1 - 10), text=count1) z.create_text(115, (rpm1 - 10), text=count2) z.create_text(155, (C_T1 - 10), text=count6) z.create_text(195, (Alt_Vtg1 - 10), text=count4) z.create_text(235, (Clutch1 - 10), text=count3) root.after(30000, lambda: root.destroy()) root.mainloop()
def alarm_itimer(seconds): signal.setitimer(signal.ITIMER_REAL, seconds)
def test_itimer_real(self): self.itimer = signal.ITIMER_REAL signal.setitimer(self.itimer, 1.0) signal.pause() self.assertEqual(self.hndl_called, True)
def sig_prof(self, *args): self.hndl_called = True signal.setitimer(signal.ITIMER_PROF, 0)
def sig_prof(self, *args): self.hndl_called = True signal.setitimer(signal.ITIMER_PROF, 0) if support.verbose: print("SIGPROF handler invoked", args)
def stop_alarm(cls): signal.setitimer(signal.ITIMER_REAL, 0, 0)
def __enter__(self): if self._autotick: self._old_handler = signal.signal(signal.SIGVTALRM, self._Spin) self._old_itimer = signal.setitimer( signal.ITIMER_VIRTUAL, self._tick_delay, self._tick_delay) return self
def cleanup(): if hasattr(signal, "setitimer"): signal.setitimer(signal.ITIMER_REAL, 0) else: signal.alarm(0) signal.signal(signal.SIGALRM, old_handler)
buf = f.read() buf_b64e = base64.b64encode(buf) URL = URL_PREFIX + buf_b64e q.put((time_stamp, URL)) # with open("URL.txt", "w") as fw: # fw.write(URL) # system("firefox " + URL.replace(';', '\;') + buf_b64e) def cyclic_task(delay, interval): do_capture() def sigint_handler(): global cap print("[*] terminating webcam client...") cap.release() exit() signal.signal(signal.SIGINT, sigint_handler) signal.signal(signal.SIGALRM, cyclic_task) signal.setitimer(signal.ITIMER_REAL, 1, 1) s = socket(AF_INET) s.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1) print("[*] connecting to %s:%s" % (HOST, PORT)) s.connect((HOST, PORT)) while True: sleep(1) if not q.empty(): time_stamp, capture = q.get() s.sendall(' '.join([time_stamp, capture]) + "\n") conn.close()
def alarm(secs): if hasattr(signal, "setitimer"): signal.setitimer(signal.ITIMER_REAL, secs) else: signal.alarm(math.ceil(secs))
def __enter__(self): signal.signal(signal.SIGALRM, self.timeout) signal.setitimer(signal.ITIMER_REAL, self.seconds) return self
global loop, timer_triggered LogMsg("Got triggered by SIGALRM.") if loop: timer_triggered = True UpdateSNMPObjsAsync() signal.signal(signal.SIGALRM, AlarmHandler) signal.setitimer(signal.ITIMER_REAL, float(options.interval)) msg = "Installing SIGALRM handler triggered every {0} seconds." msg = msg.format(options.interval) LogMsg(msg) signal.signal(signal.SIGALRM, AlarmHandler) signal.setitimer(signal.ITIMER_REAL, float(options.interval)) # The threading agent's main loop. We loop endlessly until our signal # handler above changes the "loop" variable. LogMsg("Now serving SNMP requests, press ^C to terminate.") loop = True while loop: # Block until something happened (signal arrived, SNMP packets processed) timer_triggered = False res = agent.check_and_process() if res == -1 and not timer_triggered and loop: loop = False LogMsg("Error {0} in SNMP packet processing!".format(res)) elif loop and timer_triggered: LogMsg("net-snmp's check_and_process() returned due to SIGALRM (res={0}), doing another loop.".format(res))
def __exit__(self, exc_type, exc_val, exc_tbf): """Disables statistical profiler.""" self.run_time = time.time() - self._start_time signal.setitimer(signal.ITIMER_PROF, 0)
def __exit__(self, type, value, traceback): signal.setitimer(signal.ITIMER_REAL, 0) return False
def arm_alarm(self, seconds): _signal.setitimer(_signal.ITIMER_REAL, seconds)
def do_work(io_ratio): workload = [do_io if random.random() < io_ratio else do_compute for _ in range(10)] signal.setitimer(signal.ITIMER_VIRTUAL, 0.1, 0.1) for work in workload: work() signal.setitimer(signal.ITIMER_VIRTUAL, 0, 0)
elif ackNum >= firstInWindow & ackNum <= lastInWindow : sendBuffer[ackNum % windowSize] = None timeoutTimers[ackNum % windowSize] = 0 numAcked += 1 if sendComplete and numAcked >= lastInWindow: print ("bum") ackedComplete = True # Start thread looking for acknowledgements threadForAck = threading.Thread(target=LookforACKs, args=()) threadForAck.start() signal.signal(signal.SIGALRM, Signalhandler) signal.setitimer(signal.ITIMER_REAL, 0.01, 0.01) firstInWindow = 0 # Send packets while not sendComplete: toSend = lastInWindow + 1 msg = GetMessage() header = int('0101010101010101', 2) cs = pack('IH' + str(len(msg)) + 's', seqNum, header, msg) checksum = CalculateChecksum(cs) packet = pack('IHH' + str(len(msg))+'s', seqNum, checksum, header, msg) if toSend < windowSize: sendBuffer.append(packet) timeoutTimers.append(TIMEOUT)
def __enter__(self): """Enables statistical profiler.""" signal.signal(signal.SIGPROF, self.sample) signal.setitimer(signal.ITIMER_PROF, _SAMPLE_INTERVAL) self._start_time = time.time() return self
except (OSError, IOError), e: if e.args[0] == errno.EPIPE: # Happens when the client closes the connection pass else: logging.error("Exception in I/O handler for fd %d", fd, exc_info=True) except Exception: logging.error("Exception in I/O handler for fd %d", fd, exc_info=True) # reset the stopped flag so another start/stop pair can be issued self._stopped = False if self._blocking_signal_threshold is not None: signal.setitimer(signal.ITIMER_REAL, 0, 0) def stop(self): """Stop the loop after the current event loop iteration is complete. If the event loop is not currently running, the next call to start() will return immediately. To use asynchronous methods from otherwise-synchronous code (such as unit tests), you can start and stop the event loop like this:: ioloop = IOLoop() async_method(ioloop=ioloop, callback=ioloop.stop) ioloop.start() ioloop.start() will return after async_method has run its callback, whether that callback was invoked before or after ioloop.start.
def launch_tor(tor_cmd = 'tor', args = None, torrc_path = None, completion_percent = 100, init_msg_handler = None, timeout = DEFAULT_INIT_TIMEOUT, take_ownership = False, close_output = True, stdin = None): """ Initializes a tor process. This blocks until initialization completes or we error out. If tor's data directory is missing or stale then bootstrapping will include making several requests to the directory authorities which can take a little while. Usually this is done in 50 seconds or so, but occasionally calls seem to get stuck, taking well over the default timeout. **To work to must log at NOTICE runlevel to stdout.** It does this by default, but if you have a 'Log' entry in your torrc then you'll also need 'Log NOTICE stdout'. Note: The timeout argument does not work on Windows or when outside the main thread, and relies on the global state of the signal module. .. versionchanged:: 1.6.0 Allowing the timeout argument to be a float. .. versionchanged:: 1.7.0 Added the **close_output** argument. :param str tor_cmd: command for starting tor :param list args: additional arguments for tor :param str torrc_path: location of the torrc for us to use :param int completion_percent: percent of bootstrap completion at which this'll return :param functor init_msg_handler: optional functor that will be provided with tor's initialization stdout as we get it :param int timeout: time after which the attempt to start tor is aborted, no timeouts are applied if **None** :param bool take_ownership: asserts ownership over the tor process so it aborts if this python process terminates or a :class:`~stem.control.Controller` we establish to it disconnects :param bool close_output: closes tor's stdout and stderr streams when bootstrapping is complete if true :param str stdin: content to provide on stdin :returns: **subprocess.Popen** instance for the tor subprocess :raises: **OSError** if we either fail to create the tor process or reached a timeout without success """ if stem.util.system.is_windows(): if timeout is not None and timeout != DEFAULT_INIT_TIMEOUT: raise OSError('You cannot launch tor with a timeout on Windows') timeout = None elif threading.current_thread().__class__.__name__ != '_MainThread': if timeout is not None and timeout != DEFAULT_INIT_TIMEOUT: raise OSError('Launching tor with a timeout can only be done in the main thread') timeout = None # sanity check that we got a tor binary if os.path.sep in tor_cmd: # got a path (either relative or absolute), check what it leads to if os.path.isdir(tor_cmd): raise OSError("'%s' is a directory, not the tor executable" % tor_cmd) elif not os.path.isfile(tor_cmd): raise OSError("'%s' doesn't exist" % tor_cmd) elif not stem.util.system.is_available(tor_cmd): raise OSError("'%s' isn't available on your system. Maybe it's not in your PATH?" % tor_cmd) # double check that we have a torrc to work with if torrc_path not in (None, NO_TORRC) and not os.path.exists(torrc_path): raise OSError("torrc doesn't exist (%s)" % torrc_path) # starts a tor subprocess, raising an OSError if it fails runtime_args, temp_file = [tor_cmd], None if args: runtime_args += args if torrc_path: if torrc_path == NO_TORRC: temp_file = tempfile.mkstemp(prefix = 'empty-torrc-', text = True)[1] runtime_args += ['-f', temp_file] else: runtime_args += ['-f', torrc_path] if take_ownership: runtime_args += ['__OwningControllerProcess', str(os.getpid())] tor_process = None try: tor_process = subprocess.Popen(runtime_args, stdout = subprocess.PIPE, stdin = subprocess.PIPE, stderr = subprocess.PIPE) if stdin: tor_process.stdin.write(stem.util.str_tools._to_bytes(stdin)) tor_process.stdin.close() if timeout: def timeout_handler(signum, frame): raise OSError('reached a %i second timeout without success' % timeout) signal.signal(signal.SIGALRM, timeout_handler) signal.setitimer(signal.ITIMER_REAL, timeout) bootstrap_line = re.compile('Bootstrapped ([0-9]+)%: ') problem_line = re.compile('\[(warn|err)\] (.*)$') last_problem = 'Timed out' while True: # Tor's stdout will be read as ASCII bytes. This is fine for python 2, but # in python 3 that means it'll mismatch with other operations (for instance # the bootstrap_line.search() call later will fail). # # It seems like python 2.x is perfectly happy for this to be unicode, so # normalizing to that. init_line = tor_process.stdout.readline().decode('utf-8', 'replace').strip() # this will provide empty results if the process is terminated if not init_line: raise OSError('Process terminated: %s' % last_problem) # provide the caller with the initialization message if they want it if init_msg_handler: init_msg_handler(init_line) # return the process if we're done with bootstrapping bootstrap_match = bootstrap_line.search(init_line) problem_match = problem_line.search(init_line) if bootstrap_match and int(bootstrap_match.group(1)) >= completion_percent: return tor_process elif problem_match: runlevel, msg = problem_match.groups() if 'see warnings above' not in msg: if ': ' in msg: msg = msg.split(': ')[-1].strip() last_problem = msg except: if tor_process: tor_process.kill() # don't leave a lingering process tor_process.wait() raise finally: if timeout: signal.alarm(0) # stop alarm if tor_process and close_output: if tor_process.stdout: tor_process.stdout.close() if tor_process.stderr: tor_process.stderr.close() if temp_file: try: os.remove(temp_file) except: pass
def start(handler, interval=1): signal.signal(signal.SIGPROF, handler) signal.setitimer(signal.ITIMER_PROF, interval, interval) atexit.register(lambda: signal.setitimer(signal.ITIMER_PROF, 0))
def start(self): """Starts the I/O loop. The loop will run until one of the I/O handlers calls stop(), which will make the loop stop after the current event iteration completes. """ if self._stopped: self._stopped = False return self._thread_ident = thread.get_ident() self._running = True while True: # Never use an infinite timeout here - it can stall epoll poll_timeout = 0.2 # Prevent IO event starvation by delaying new callbacks # to the next iteration of the event loop. with self._callback_lock: callbacks = self._callbacks self._callbacks = [] for callback in callbacks: self._run_callback(callback) if self._timeouts: now = time.time() while self._timeouts: if self._timeouts[0].callback is None: # the timeout was cancelled heapq.heappop(self._timeouts) elif self._timeouts[0].deadline <= now: timeout = heapq.heappop(self._timeouts) self._run_callback(timeout.callback) else: milliseconds = self._timeouts[0].deadline - now poll_timeout = min(milliseconds, poll_timeout) break if self._callbacks: # If any callbacks or timeouts called add_callback, # we don't want to wait in poll() before we run them. poll_timeout = 0.0 if not self._running: break if self._blocking_signal_threshold is not None: # clear alarm so it doesn't fire while poll is waiting for # events. signal.setitimer(signal.ITIMER_REAL, 0, 0) try: event_pairs = self._impl.poll(poll_timeout) except Exception, e: # Depending on python version and IOLoop implementation, # different exception types may be thrown and there are # two ways EINTR might be signaled: # * e.errno == errno.EINTR # * e.args is like (errno.EINTR, 'Interrupted system call') if (getattr(e, 'errno', None) == errno.EINTR or (isinstance(getattr(e, 'args', None), tuple) and len(e.args) == 2 and e.args[0] == errno.EINTR)): continue else: raise if self._blocking_signal_threshold is not None: signal.setitimer(signal.ITIMER_REAL, self._blocking_signal_threshold, 0) # Pop one fd at a time from the set of pending fds and run # its handler. Since that handler may perform actions on # other file descriptors, there may be reentrant calls to # this IOLoop that update self._events self._events.update(event_pairs) while self._events: fd, events = self._events.popitem() try: self._handlers[fd](fd, events) except (OSError, IOError), e: if e.args[0] == errno.EPIPE: # Happens when the client closes the connection pass else: logging.error("Exception in I/O handler for fd %d", fd, exc_info=True) except Exception: logging.error("Exception in I/O handler for fd %d", fd, exc_info=True)
# -*- encoding: utf-8 -*- import signal def SampleHandler(sig_count, current_frame): print sig_count, current_frame return signal.signal(signal.SIGVTALRM, SampleHandler) signal.setitimer(signal.ITIMER_VIRTUAL, 0.01, 0.01)
def start(self): """Starts the I/O loop. The loop will run until one of the I/O handlers calls stop(), which will make the loop stop after the current event iteration completes. """ if not logging.getLogger().handlers: # The IOLoop catches and logs exceptions, so it's # important that log output be visible. However, python's # default behavior for non-root loggers (prior to python # 3.2) is to print an unhelpful "no handlers could be # found" message rather than the actual log entry, so we # must explicitly configure logging if we've made it this # far without anything. logging.basicConfig() if self._stopped: self._stopped = False return old_current = getattr(IOLoop._current, "instance", None) IOLoop._current.instance = self self._thread_ident = thread.get_ident() self._running = True # signal.set_wakeup_fd closes a race condition in event loops: # a signal may arrive at the beginning of select/poll/etc # before it goes into its interruptible sleep, so the signal # will be consumed without waking the select. The solution is # for the (C, synchronous) signal handler to write to a pipe, # which will then be seen by select. # # In python's signal handling semantics, this only matters on the # main thread (fortunately, set_wakeup_fd only works on the main # thread and will raise a ValueError otherwise). # # If someone has already set a wakeup fd, we don't want to # disturb it. This is an issue for twisted, which does its # SIGCHILD processing in response to its own wakeup fd being # written to. As long as the wakeup fd is registered on the IOLoop, # the loop will still wake up and everything should work. old_wakeup_fd = None if hasattr(signal, 'set_wakeup_fd') and os.name == 'posix': # requires python 2.6+, unix. set_wakeup_fd exists but crashes # the python process on windows. try: old_wakeup_fd = signal.set_wakeup_fd( self._waker.write_fileno()) if old_wakeup_fd != -1: # Already set, restore previous value. This is a little racy, # but there's no clean get_wakeup_fd and in real use the # IOLoop is just started once at the beginning. signal.set_wakeup_fd(old_wakeup_fd) old_wakeup_fd = None except ValueError: # non-main thread pass while True: poll_timeout = 3600.0 # Prevent IO event starvation by delaying new callbacks # to the next iteration of the event loop. with self._callback_lock: callbacks = self._callbacks self._callbacks = [] for callback in callbacks: self._run_callback(callback) if self._timeouts: now = self.time() while self._timeouts: if self._timeouts[0].callback is None: # the timeout was cancelled heapq.heappop(self._timeouts) elif self._timeouts[0].deadline <= now: timeout = heapq.heappop(self._timeouts) self._run_callback(timeout.callback) else: seconds = self._timeouts[0].deadline - now poll_timeout = min(seconds, poll_timeout) break if self._callbacks: # If any callbacks or timeouts called add_callback, # we don't want to wait in poll() before we run them. poll_timeout = 0.0 if not self._running: break if self._blocking_signal_threshold is not None: # clear alarm so it doesn't fire while poll is waiting for # events. signal.setitimer(signal.ITIMER_REAL, 0, 0) try: event_pairs = self._impl.poll(poll_timeout) except Exception, e: # Depending on python version and IOLoop implementation, # different exception types may be thrown and there are # two ways EINTR might be signaled: # * e.errno == errno.EINTR # * e.args is like (errno.EINTR, 'Interrupted system call') if (getattr(e, 'errno', None) == errno.EINTR or (isinstance(getattr(e, 'args', None), tuple) and len(e.args) == 2 and e.args[0] == errno.EINTR)): continue else: raise if self._blocking_signal_threshold is not None: signal.setitimer(signal.ITIMER_REAL, self._blocking_signal_threshold, 0) # Pop one fd at a time from the set of pending fds and run # its handler. Since that handler may perform actions on # other file descriptors, there may be reentrant calls to # this IOLoop that update self._events self._events.update(event_pairs) while self._events: fd, events = self._events.popitem() try: self._handlers[fd](fd, events) except (OSError, IOError), e: if e.args[0] == errno.EPIPE: # Happens when the client closes the connection pass else: app_log.error("Exception in I/O handler for fd %s", fd, exc_info=True) except Exception: app_log.error("Exception in I/O handler for fd %s", fd, exc_info=True)
def execute_with_timeout( fn, args=None, kwargs=None, timeout=None, fail_if_no_timer=True, signal_type=_default_signal_type, timer_type=_default_timer_type, timeout_exception_cls=TimeoutError, ): """ Executes specified function with timeout. Uses SIGALRM to interrupt it. :type fn: function :param fn: function to execute :type args: tuple :param args: function args :type kwargs: dict :param kwargs: function kwargs :type timeout: float :param timeout: timeout, seconds; 0 or None means no timeout :type fail_if_no_timer: bool :param fail_if_no_timer: fail, if timer is nor available; normally it's available only in the main thread :type signal_type: signalnum :param signal_type: type of signal to use (see signal module) :type timer_type: signal.ITIMER_REAL, signal.ITIMER_VIRTUAL or signal.ITIMER_PROF :param timer_type: type of timer to use (see signal module) :type timeout_exception_cls: class :param timeout_exception_cls: exception to throw in case of timeout :return: fn call result. """ if args is None: args = empty_tuple if kwargs is None: kwargs = empty_dict if timeout is None or timeout == 0 or signal_type is None or timer_type is None: return fn(*args, **kwargs) def signal_handler(signum, frame): raise timeout_exception_cls(inspection.get_function_call_str(fn, args, kwargs)) old_signal_handler = none timer_is_set = False try: try: old_signal_handler = signal.signal(signal_type, signal_handler) signal.setitimer(timer_type, timeout) timer_is_set = True except ValueError: if fail_if_no_timer: raise NotSupportedError( "Timer is not available; the code is probably invoked from outside the main " "thread." ) return fn(*args, **kwargs) finally: if timer_is_set: signal.setitimer(timer_type, 0) if old_signal_handler is not none: signal.signal(signal_type, old_signal_handler)
def apply_celery_task_with_retry( task_func, args=None, kwargs=None, max_retries=5, countdown=10, time_limit=None ): """ When executing a (bind=True) task synchronously (with `mytask.apply()` or just calling it as a function `mytask()`) the `self.retry()` does not work, but the original exception is raised (without any retry) so you "lose" the exception management logic written in the task code. This function overcome such limitation. Example: # Celery task: @shared_task(bind=True) def normalize_name_task(self, first_name, last_name, nick_name=''): try: result = ... network call ... except RequestException as exc: exception = None raise self.retry(max_retries=3, countdown=5, exc=exception) return result # Call the task sync with retry: result = apply_celery_task_with_retry( normalize_name_task, args=('John', 'Doe'), kwargs=dict(nick_name='Dee'), max_retries=2, countdown=5*60, time_limit=2*60*60 ) Note: it assumes that @shared_task is the first (the one on top) decorator for the Celery task. Args: task_func: Celery task function to be run. args: the positional arguments to pass on to the task. kwargs: the keyword arguments to pass on to the task. max_retries: maximum number of retries before raising MaxRetriesExceededError. countdown: hard time limit for each attempt. If the last attempt It can be a callable, eg: backoff = lambda retry_count: 2 ** (retry_count + 1) apply_celery_task_with_retry(..., countdown=backoff) time_limit: hard time limit for each single attempt in seconds. If the last attempt fails because of the time limit, raises TimeLimitExceeded. Returns: what the task_func returns. """ def handler(signum, frame): raise TimeLimitExceeded args = args or tuple() kwargs = kwargs or dict() retry_mixin = RetryMixin() retry_mixin.request.retries = 0 time_limit_exceeded = False # Get the actual function decorated by @shared_task(bind=True). unbound_func = task_func.__wrapped__.__func__ for _ in range(max_retries + 1): try: if time_limit: signal.signal(signal.SIGALRM, handler) signal.setitimer(signal.ITIMER_REAL, time_limit) result = unbound_func(retry_mixin, *args, **kwargs) except (Retry, TimeLimitExceeded) as exc: if time_limit: signal.alarm(0) # Disable the alarm. if isinstance(exc, TimeLimitExceeded): time_limit_exceeded = True else: time_limit_exceeded = False sleep_time = countdown if callable(countdown): sleep_time = countdown(retry_mixin.request.retries) time.sleep(sleep_time) retry_mixin.request.retries += 1 continue if time_limit: signal.alarm(0) # Disable the alarm. return result exception = retry_mixin.exc exception = exception or MaxRetriesExceededError if time_limit_exceeded: exception = TimeLimitExceeded raise exception
def test_stress_delivery_simultaneous(self): """ This test uses simultaneous signal handlers. """ N = self.decide_itimer_count() sigs = [] def handler(signum, frame): sigs.append(signum) self.setsig(signal.SIGUSR1, handler) self.setsig(signal.SIGALRM, handler) # for ITIMER_REAL expected_sigs = 0 deadline = time.monotonic() + 15.0 while expected_sigs < N: # Hopefully the SIGALRM will be received somewhere during # initial processing of SIGUSR1. signal.setitimer(signal.ITIMER_REAL, 1e-6 + random.random() * 1e-5) os.kill(os.getpid(), signal.SIGUSR1) expected_sigs += 2 # Wait for handlers to run to avoid signal coalescing while len(sigs) < expected_sigs and time.monotonic() < deadline: time.sleep(1e-5) # All ITIMER_REAL signals should have been delivered to the # Python handler self.assertEqual(len(sigs), N, "Some signals were lost")