Exemplo n.º 1
0
Arquivo: jobs.py Projeto: laerus/xonsh
 def give_terminal_to(pgid):
     if pgid is None:
         return False
     oldmask = signal.pthread_sigmask(signal.SIG_BLOCK,
                                      _block_when_giving)
     try:
         os.tcsetpgrp(FD_STDERR, pgid)
         return True
     except ProcessLookupError:
         # when the process finished before giving terminal to it,
         # see issue #2288
         return False
     except OSError as e:
         if e.errno == 22:  # [Errno 22] Invalid argument
             # there are cases that all the processes of pgid have
             # finished, then we don't need to do anything here, see
             # issue #2220
             return False
         elif e.errno == 25:  # [Errno 25] Inappropriate ioctl for device
             # There are also cases where we are not connected to a
             # real TTY, even though we may be run in interactive
             # mode. See issue #2267 for an example with emacs
             return False
         else:
             raise
     finally:
         signal.pthread_sigmask(signal.SIG_SETMASK, oldmask)
Exemplo n.º 2
0
    def ensure_running(self):
        '''Make sure that semaphore tracker process is running.

        This can be run from any process.  Usually a child process will use
        the semaphore created by its parent.'''
        with self._lock:
            if self._pid is not None:
                # semaphore tracker was launched before, is it still running?
                try:
                    pid, _ = os.waitpid(self._pid, os.WNOHANG)
                except ChildProcessError:
                    # The process terminated
                    pass
                else:
                    if not pid:
                        # => still alive
                        return

                # => dead, launch it again
                os.close(self._fd)
                self._fd = None
                self._pid = None

                warnings.warn('semaphore_tracker: process died unexpectedly, '
                              'relaunching.  Some semaphores might leak.')

            fds_to_pass = []
            try:
                fds_to_pass.append(sys.stderr.fileno())
            except Exception:
                pass
            cmd = 'from multiprocessing.semaphore_tracker import main;main(%d)'
            r, w = os.pipe()
            try:
                fds_to_pass.append(r)
                # process will out live us, so no need to wait on pid
                exe = spawn.get_executable()
                args = [exe] + util._args_from_interpreter_flags()
                args += ['-c', cmd % r]
                # bpo-33613: Register a signal mask that will block the signals.
                # This signal mask will be inherited by the child that is going
                # to be spawned and will protect the child from a race condition
                # that can make the child die before it registers signal handlers
                # for SIGINT and SIGTERM. The mask is unregistered after spawning
                # the child.
                try:
                    if _HAVE_SIGMASK:
                        signal.pthread_sigmask(signal.SIG_BLOCK, _IGNORED_SIGNALS)
                    pid = util.spawnv_passfds(exe, args, fds_to_pass)
                finally:
                    if _HAVE_SIGMASK:
                        signal.pthread_sigmask(signal.SIG_UNBLOCK, _IGNORED_SIGNALS)
            except:
                os.close(w)
                raise
            else:
                self._fd = w
                self._pid = pid
            finally:
                os.close(r)
Exemplo n.º 3
0
    def _serve(self):
        if hasattr(signal, "pthread_sigmask"):
            signal.pthread_sigmask(signal.SIG_BLOCK, range(1, signal.NSIG))
        while 1:
            try:
                conn = self._listener.accept()
                msg = conn.recv()
                if msg is None:
                    break
                key, destination_pid = msg
                send, close = self._cache.pop(key)
                send(conn, destination_pid)
                close()
                conn.close()
            except:
                if not is_exiting():
                    import traceback

                    sub_warning(
                        "thread for sharing handles raised exception :\n"
                        + "-" * 79
                        + "\n"
                        + traceback.format_exc()
                        + "-" * 79
                    )
Exemplo n.º 4
0
 def _give_terminal_to(pgid):
     st = _shell_tty()
     if st is not None and os.isatty(st):
         oldmask = signal.pthread_sigmask(signal.SIG_BLOCK,
                                          _block_when_giving)
         os.tcsetpgrp(st, pgid)
         signal.pthread_sigmask(signal.SIG_SETMASK, oldmask)
Exemplo n.º 5
0
    def _run(self):
        if hasattr(signal, 'pthread_sigmask'):
            # this thread should not handle any signal
            mask = range(1, signal.NSIG)
            signal.pthread_sigmask(signal.SIG_BLOCK, mask)

        self.schedule()

        while self.stop_lock.acquire(0):
            self.stop_lock.release()
            delay = self.once()
            if delay is not None:
                assert delay > 0.0
                with self.sleep_lock:
                    interrupted = self.sleep_lock.wait(timeout=delay)
                if interrupted:
                    break
                continue

            task = self._task_ref()
            try:
                task.call()
            except Exception as err:
                # the task is not rescheduled on error
                exc_type, exc_value, exc_tb = sys.exc_info()
                # FIXME: log the traceback
                print(("%s: %s" % (exc_type, exc_value)), file=sys.stderr)
                break
            if self.ncall is not None:
                self.ncall -= 1
                if self.ncall <= 0:
                    break
            self.schedule()
Exemplo n.º 6
0
    def _do_exit(self, signo):
        if signo == 0 or signo == signal.SIGINT:
            return

        curses.endwin()
        signal.pthread_sigmask(signal.SIG_UNBLOCK, [signo])
        signal.signal(signo, signal.SIG_DFL)
        os.kill(self._pid, signo)
Exemplo n.º 7
0
Arquivo: jobs.py Projeto: selepo/xonsh
 def _give_terminal_to(pgid):
     # over-simplified version of:
     #    give_terminal_to from bash 4.3 source, jobs.c, line 4030
     # this will give the terminal to the process group pgid
     if _shell_tty is not None and os.isatty(_shell_tty):
         oldmask = signal.pthread_sigmask(signal.SIG_BLOCK, _block_when_giving)
         os.tcsetpgrp(_shell_tty, pgid)
         signal.pthread_sigmask(signal.SIG_SETMASK, oldmask)
Exemplo n.º 8
0
    def halt(self):
        oldh = signal.signal(signal.SIGINT, lambda i,f: None)
        signal.pthread_sigmask(signal.SIG_BLOCK, {signal.SIGINT})

        for thr in self.threads:
            thr.request.halt_loop()

        signal.pthread_sigmask(signal.SIG_UNBLOCK, {signal.SIGINT})
        signal.signal(signal.SIGINT, oldh)
Exemplo n.º 9
0
 def run(self):
     self.take_snapshot()
     if hasattr(signal, 'pthread_sigmask'):
         # Available on UNIX with Python 3.3+
         signal.pthread_sigmask(signal.SIG_BLOCK, range(1, signal.NSIG))
     time.sleep(init_delay)
     while True:
         self.take_snapshot()
         time.sleep(snapshot_delay)
Exemplo n.º 10
0
 def init_signal_handling(self):
     # In Python 3.5 system calls are no longer interrupted by signals.
     # Thus, we can no longer use time.sleep() or otherwise the processing
     # of signals would be delayed until the sleep has finished.
     # So, we now use signal.sigtimedwait() instead. Due to the lack of
     # documentation (and my unwillingness to spend more time on this than
     # necessary) I'm not quite sure, if I'm doing this completely right.
     # In the following, we set all signals that we're interested in as
     # blocked, so that they queue up. In TimeProvider.sleep() they're taken
     # again from the queue by signal.sigtimedwait().
     signal.pthread_sigmask(signal.SIG_BLOCK, SIGNALS)
Exemplo n.º 11
0
def main(fd):
    '''Run semaphore tracker.'''
    # protect the process from ^C and "killall python" etc
    signal.signal(signal.SIGINT, signal.SIG_IGN)
    signal.signal(signal.SIGTERM, signal.SIG_IGN)
    if _HAVE_SIGMASK:
        signal.pthread_sigmask(signal.SIG_UNBLOCK, _IGNORED_SIGNALS)

    for f in (sys.stdin, sys.stdout):
        try:
            f.close()
        except Exception:
            pass

    cache = set()
    try:
        # keep track of registered/unregistered semaphores
        with open(fd, 'rb') as f:
            for line in f:
                try:
                    cmd, name = line.strip().split(b':')
                    if cmd == b'REGISTER':
                        cache.add(name)
                    elif cmd == b'UNREGISTER':
                        cache.remove(name)
                    else:
                        raise RuntimeError('unrecognized command %r' % cmd)
                except Exception:
                    try:
                        sys.excepthook(*sys.exc_info())
                    except:
                        pass
    finally:
        # all processes have terminated; cleanup any remaining semaphores
        if cache:
            try:
                warnings.warn('semaphore_tracker: There appear to be %d '
                              'leaked semaphores to clean up at shutdown' %
                              len(cache))
            except Exception:
                pass
        for name in cache:
            # For some reason the process which created and registered this
            # semaphore has failed to unregister it. Presumably it has died.
            # We therefore unlink it.
            try:
                name = name.decode('ascii')
                try:
                    _multiprocessing.sem_unlink(name)
                except Exception as e:
                    warnings.warn('semaphore_tracker: %r: %s' % (name, e))
            finally:
                pass
Exemplo n.º 12
0
def run(host, port, Payload):
    """Select loop of scheduler.

    :param host: A string with the host to bind to.
    :param port: An integer with the port to bind to.
    :param Payload: A class that follows the interface of ``types.Payload``.

    """
    scheduler = Scheduler()

    sock = init_socket('127.0.0.1', 8000)
    selector = selectors.DefaultSelector()
    callback = partial(handle_request, klass=Payload)
    selector.register(sock, selectors.EVENT_READ, callback)

    sigint_fd = linuxfd.signalfd(
        signalset={signal.SIGINT, signal.SIGTERM}, nonBlocking=True
    )
    selector.register(sigint_fd, selectors.EVENT_READ, True)
    sighup_fd = linuxfd.signalfd(signalset={signal.SIGHUP}, nonBlocking=True)
    selector.register(sighup_fd, selectors.EVENT_READ, scheduler.report)
    signal.pthread_sigmask(
        signal.SIG_BLOCK, {signal.SIGINT, signal.SIGHUP, signal.SIGTERM}
    )

    timestamp = None
    should_exit = False
    while True:
        if should_exit:
            break
        if timestamp is None:
            timeout = timestamp
        else:
            timeout = timestamp - time.time()
            assert timeout >= 0
        logger.debug('Selecting on timeout {0}'.format(timeout))
        events = selector.select(timeout)
        if not events:
            item = scheduler.pop()
            item.execute()
            timestamp = getattr(scheduler.top(), 'timestamp', None)
        for key, mask in events:
            callback = key.data
            if not callable(callback):
                should_exit = True
            elif key.fileobj == sock:
                item = callback(key.fileobj)
                scheduler.push(item)
                timestamp = scheduler.top().timestamp
            else:
                key.fileobj.read()
                callback()
    close_socket(sock)
Exemplo n.º 13
0
 def run(self):
     if hasattr(signal, 'pthread_sigmask'):
         # Available on UNIX with Python 3.3+
         signal.pthread_sigmask(signal.SIG_BLOCK, range(1, signal.NSIG))
     while True:
         if self.interval and self.snapshot_q.empty():
             time.sleep(self.interval)
         item = self.snapshot_q.get()
         logger.info('Sending {0} snapshot file'.format(item))
         logger.info('{0} items pending to send.'.format(
             self.snapshot_q.qsize())
         )
         snapshot = tracemalloc.Snapshot.load(item)
         self.client.send(snapshot)
Exemplo n.º 14
0
    def check_sigwait(self, wait_func):
        signum = signal.SIGUSR1
        pid = os.getpid()

        old_handler = signal.signal(signum, lambda *args: None)
        self.addCleanup(signal.signal, signum, old_handler)

        code = '\n'.join((
            'import os, time',
            'pid = %s' % os.getpid(),
            'signum = %s' % int(signum),
            'sleep_time = %r' % self.sleep_time,
            'time.sleep(sleep_time)',
            'os.kill(pid, signum)',
        ))

        old_mask = signal.pthread_sigmask(signal.SIG_BLOCK, [signum])
        self.addCleanup(signal.pthread_sigmask, signal.SIG_UNBLOCK, [signum])

        t0 = time.monotonic()
        proc = self.subprocess(code)
        with kill_on_error(proc):
            wait_func(signum)
            dt = time.monotonic() - t0

        self.assertEqual(proc.wait(), 0)
Exemplo n.º 15
0
 def _run_reactor(self):
     """Run the twisted reactor."""
     threading.Thread(target=reactor.run,
                      daemon=True,
                      kwargs=dict(installSignalHandlers=0)).start()
     signal_map = {
         signal.SIGHUP: self._handle_reconfigure,
         signal.SIGINT: self._handle_shutdown,
         signal.SIGTERM: self._handle_shutdown,
         signal.SIGQUIT: self._handle_shutdown,
         signal.SIGUSR1: self._handle_debug,
     }
     while True:
         signal.pthread_sigmask(signal.SIG_BLOCK, signal_map.keys())
         signum = signal.sigwait(set(signal_map.keys()))
         logging.info("Got signal %s" % signum)
         if signum in signal_map:
             signal_map[signum](signum, None)
Exemplo n.º 16
0
 def run(self):
     if hasattr(signal, 'pthread_sigmask'):
         # Available on UNIX with Python 3.3+
         signal.pthread_sigmask(signal.SIG_BLOCK, range(1, signal.NSIG))
     while True:
         logger.debug('Sleeping {0} secongs...'.format(self.interval))
         time.sleep(self.interval)
         filename = ("/tmp/tracemalloc-%d-%04d.dump"
                     % (os.getpid(), self.counter))
         logger.info("Write snapshot into %s..." % filename)
         gc.collect()
         snapshot = tracemalloc.take_snapshot()
         snapshot.dump(filename )
         self.snapshot_q.put(filename)
         logger.debug('Queue size: {0}'.format(self.snapshot_q.qsize()))
         snapshot = None
         logger.info("Snapshot written into %s" % filename)
         self.counter += 1
Exemplo n.º 17
0
 def _serve(self):
     if hasattr(signal, 'pthread_sigmask'):
         signal.pthread_sigmask(signal.SIG_BLOCK, range(1, signal.NSIG))
     while 1:
         try:
             with self._listener.accept() as conn:
                 msg = conn.recv()
                 if msg is None:
                     break
                 key, destination_pid = msg
                 send, close = self._cache.pop(key)
                 try:
                     send(conn, destination_pid)
                 finally:
                     close()
         except:
             if not util.is_exiting():
                 sys.excepthook(*sys.exc_info())
Exemplo n.º 18
0
def give_terminal_to(pgid):
    signals = {
        signal.SIGTTOU,
        signal.SIGTTIN,
        signal.SIGTSTP,
        signal.SIGCHLD,
    }

    old_mask = signal.pthread_sigmask(signal.SIG_BLOCK, signals)
    try:
        os.tcsetpgrp(2, pgid)
        return True
    except ProcessLookupError:
        return False
    except OSError:
        return False
    finally:
        signal.pthread_sigmask(signal.SIG_SETMASK, old_mask)
Exemplo n.º 19
0
 def _serve(self):
     if hasattr(signal, 'pthread_sigmask'):
         signal.pthread_sigmask(signal.SIG_BLOCK, range(1, signal.NSIG))
     while 1:
         try:
             with self._listener.accept() as conn:
                 msg = conn.recv()
                 if msg is None:
                     break
                 key, destination_pid = msg
                 send, close = self._cache.pop(key)
                 try:
                     send(conn, destination_pid)
                 finally:
                     close()
         except:
             if not util.is_exiting():
                 sys.excepthook(*sys.exc_info())
Exemplo n.º 20
0
    def serve_forever(self):
        asyncio.set_event_loop(self.loop)
        try:
            self.loop.add_signal_handler(signal.SIGTERM, self.signal_handler)
            signal.pthread_sigmask(signal.SIG_UNBLOCK, [signal.SIGTERM])

            self.run_loop_forever()
            self.server.close()

            self.loop.run_until_complete(self.server.wait_closed())
            self.logger.debug('Server shutting down')
        finally:
            if self.close_loop:
                if sys.version_info >= (3, 6):
                    self.loop.run_until_complete(self.loop.shutdown_asyncgens())
                self.loop.close()

            if self._cleanup_socket is not None:
                self._cleanup_socket()
Exemplo n.º 21
0
	def action_toggle(self, from_, chan, msg, parts):
		t = time.time()
		if t - self.last_toggle >= 5:
			self.last_toggle = t
			try:
				signal.pthread_sigmask(signal.SIG_BLOCK, {signal.SIGUSR1})
				subprocess.call(["/home/simark/avr/serieViaUSB/serieViaUSB", "-e", "-f", "/home/simark/avr/serieViaUSB/fichier"])
				self.irc.privmsg(chan, "Your wish is my command")
				#time.sleep(1)
				if get_plafond_status():
					self.irc.privmsg(chan, "Light is now on")
				else:
					self.irc.privmsg(chan, "Light is now off")
			except:
				raise
			finally:
				signal.pthread_sigmask(signal.SIG_UNBLOCK, {signal.SIGUSR1})
		else:
			self.irc.privmsg(chan, "You have to wait 5 seconds between two toggles.")
Exemplo n.º 22
0
 def _runloop(self):
     signal.pthread_sigmask(signal.SIG_BLOCK, {
         signal.SIGXCPU,
         signal.SIGINT,
         signal.SIGQUIT,
         signal.SIGUSR1,
     })
     try:
         while self.alive:
             self.check_limits()
             self.multi.pipe_ping(self.watchdog_pipe)
             self.sleep()
             if not self.alive:
                 break
             self.process_work()
     except:
         _logger.exception("Worker %s (%s) Exception occured, exiting...",
                           self.__class__.__name__, self.pid)
         sys.exit(1)
Exemplo n.º 23
0
def run_with_temporary_dir(run_cmd_generator):
    global run_with_temporary_dir_pids
    global run_pytest_pids
    # Below, there is a small time window, after we fork and the child
    # started running but before we save this child's process id in
    # run_with_temporary_dir_pids. In that small time window, a signal may
    # kill the parent process but not cleanup the child. So we use sigmask
    # to postpone signal delivery during that time window:
    mask = signal.pthread_sigmask(signal.SIG_BLOCK, {})
    signal.pthread_sigmask(signal.SIG_BLOCK,
                           {signal.SIGINT, signal.SIGQUIT, signal.SIGTERM})
    sys.stdout.flush()
    sys.stderr.flush()
    pid = os.fork()
    if pid == 0:
        # Child
        run_with_temporary_dir_pids = set()  # no children to clean up on child
        run_pytest_pids = set()
        pid = os.getpid()
        dir = pid_to_dir(pid)
        os.mkdir(dir)
        (cmd, env) = run_cmd_generator(pid, dir)
        # redirect stdout and stderr to log file, as in a shell's >log 2>&1:
        log = os.path.join(dir, 'log')
        fd = os.open(log, os.O_WRONLY | os.O_CREAT, mode=0o666)
        sys.stdout.flush()
        os.close(1)
        os.dup2(fd, 1)
        sys.stderr.flush()
        os.close(2)
        os.dup2(fd, 2)
        # Detach child from parent's "session", so that a SIGINT will be
        # delivered just to the parent, not to the child. Instead, the parent
        # will eventually deliver a SIGKILL as part of cleanup_all().
        os.setsid()
        os.execve(cmd[0], cmd, dict(os.environ, **env))
        # execve will not return. If it cannot run the program, it will raise
        # an exception.
    # Parent
    run_with_temporary_dir_pids.add(pid)
    signal.pthread_sigmask(signal.SIG_SETMASK, mask)
    return pid
Exemplo n.º 24
0
 def _serve(self):
     if hasattr(signal, 'pthread_sigmask'):
         signal.pthread_sigmask(signal.SIG_BLOCK, range(1, signal.NSIG))
     while 1:
         try:
             conn = self._listener.accept()
             msg = conn.recv()
             if msg is None:
                 break
             key, destination_pid = msg
             send, close = self._cache.pop(key)
             send(conn, destination_pid)
             close()
             conn.close()
         except:
             if not is_exiting():
                 import traceback
                 sub_warning(
                     'thread for sharing handles raised exception :\n' +
                     '-' * 79 + '\n' + traceback.format_exc() + '-' * 79)
Exemplo n.º 25
0
def akashi_cli() -> None:
    # [XXX] argument_parse() must be called before configuring signals, or weird bugs occur
    parsed_option = argument_parse()

    if parsed_option.action == 'init':
        return do_init(parsed_option)

    if 'LD_LIBRARY_PATH' in os.environ.keys():
        os.environ['LD_LIBRARY_PATH'] += os.pathsep + LIBRARY_PATH
    else:
        os.environ['LD_LIBRARY_PATH'] = LIBRARY_PATH

    if 'LD_PRELOAD' in os.environ.keys():
        os.environ['LD_PRELOAD'] += os.pathsep + libpython_path()
    else:
        os.environ['LD_PRELOAD'] = libpython_path()

    if 'QT_LOGGING_RULES' not in os.environ.keys():
        os.environ['QT_LOGGING_RULES'] = '*=false;*.critical=true'

    os.environ['QT_XCB_GL_INTEGRATION'] = 'xcb_egl'

    if 'AK_ASSET_DIR' not in os.environ.keys():
        os.environ['AK_ASSET_DIR'] = ASSETS_DIR

    os.environ['AK_CORE_ARGS'] = " ".join(
        [path.abspath(parsed_option.conf_path)] + parsed_option.run_args)

    sigset: list[signal.Signals] = []
    sigset += [signal.SIGINT, signal.SIGHUP, signal.SIGQUIT, signal.SIGTERM]
    sigset += [signal.SIGPIPE, signal.SIGCHLD]

    signal.pthread_sigmask(signal.SIG_BLOCK, sigset)

    th_server = ServerThread(parsed_option)
    th_server.start()

    signal.sigwait(sigset)

    th_server.terminate()
    print('')
Exemplo n.º 26
0
 def action_toggle(self, from_, chan, msg, parts):
     t = time.time()
     if t - self.last_toggle >= 5:
         self.last_toggle = t
         try:
             signal.pthread_sigmask(signal.SIG_BLOCK, {signal.SIGUSR1})
             subprocess.call(
                 ["/home/simark/avr/serieViaUSB/serieViaUSB", "-e", "-f", "/home/simark/avr/serieViaUSB/fichier"])
             self.irc.privmsg(chan, "Your wish is my command")
             # time.sleep(1)
             if get_plafond_status():
                 self.irc.privmsg(chan, "Light is now on")
             else:
                 self.irc.privmsg(chan, "Light is now off")
         except:
             raise
         finally:
             signal.pthread_sigmask(signal.SIG_UNBLOCK, {signal.SIGUSR1})
     else:
         self.irc.privmsg(
             chan, "You have to wait 5 seconds between two toggles.")
Exemplo n.º 27
0
    def run(self):
        with no_daemon_context(self.working_dir, self.lock_file, self.signals):
            signal_map = {
                signal.SIGHUP: self._handle_reconfigure,
                signal.SIGINT: self._handle_shutdown,
                signal.SIGTERM: self._handle_shutdown,
                signal.SIGQUIT: self._handle_shutdown,
                signal.SIGUSR1: self._handle_debug,
            }
            signal.pthread_sigmask(signal.SIG_BLOCK, signal_map.keys())

            self._run_mcp()
            self._run_www_api()
            self._run_manhole()
            self._run_reactor()

            while True:
                signum = signal.sigwait(list(signal_map.keys()))
                if signum in signal_map:
                    logging.info(f"Got signal {str(signum)}")
                    signal_map[signum](signum, None)
Exemplo n.º 28
0
    def run(self):
        with no_daemon_context(self.working_dir, self.lock_file, self.signals):
            signal_map = {
                signal.SIGHUP: self._handle_reconfigure,
                signal.SIGINT: self._handle_shutdown,
                signal.SIGTERM: self._handle_shutdown,
                signal.SIGQUIT: self._handle_shutdown,
                signal.SIGUSR1: self._handle_debug,
            }
            signal.pthread_sigmask(signal.SIG_BLOCK, signal_map.keys())

            self._run_mcp()
            self._run_www_api()
            self._run_manhole()
            self._run_reactor()

            while True:
                signum = signal.sigwait(list(signal_map.keys()))
                if signum in signal_map:
                    logging.info(f"Got signal {str(signum)}")
                    signal_map[signum](signum, None)
Exemplo n.º 29
0
    def blocked_signals() -> Iterator[None]:
        """Block all signals for e.g. starting a worker thread."""
        # valid_signals() was added in Python 3.8 (and not using it results
        # in a warning on pthread_sigmask() call)
        mask: Iterable[int]
        try:
            mask = signal.valid_signals()
        except AttributeError:
            mask = set(range(1, signal.NSIG))

        old_mask = signal.pthread_sigmask(  # type: ignore[attr-defined]
            signal.SIG_SETMASK,  # type: ignore[attr-defined]
            mask,
        )
        try:
            yield
        finally:
            signal.pthread_sigmask(  # type: ignore[attr-defined]
                signal.SIG_SETMASK,  # type: ignore[attr-defined]
                old_mask,
            )
Exemplo n.º 30
0
def _extra_main(extra_func, threaded, stop_signals, intr_event, proc_idx,
                args):
    interrupted: Union[threading.Event, mp.synchronize.Event]
    if threaded:
        interrupted = threading.Event()
    else:
        interrupted = mp.Event()
    if not threaded:

        # Since signals only work for the main thread in Python,
        # extra processes in use_threading=True mode should check
        # the intr_event by themselves (probably in their loops).

        def raise_stop(signum, frame):
            if interrupted.is_set():
                pass
            else:
                interrupted.set()
                if signum == signal.SIGINT:
                    raise KeyboardInterrupt
                elif signum == signal.SIGTERM:
                    raise SystemExit
                else:
                    raise InterruptedBySignal(signum)

        # restore signal handler.
        for signum in stop_signals:
            signal.signal(signum, raise_stop)
        signal.pthread_sigmask(signal.SIG_UNBLOCK, stop_signals)
        intr_event = None

    try:
        if not interrupted.is_set():
            extra_func(intr_event, proc_idx, args)
    except (SystemExit, KeyboardInterrupt, InterruptedBySignal):
        log.warning(f'extra_proc[{proc_idx}] did not handle stop signals.')
    finally:
        if not threaded:
            # same as in _worker_main()
            signal.pthread_sigmask(signal.SIG_BLOCK, stop_signals)
Exemplo n.º 31
0
def initialize(n_parallel):
    """Initialize the worker pool."""
    # SIGINT is blocked for all processes created in parallel_sampler to avoid
    # the creation of sleeping and zombie processes.
    #
    # If the user interrupts run_experiment, there's a chance some processes
    # won't die due to a dead lock condition where one of the children in the
    # parallel sampler exits without releasing a lock once after it catches
    # SIGINT.
    #
    # Later the parent tries to acquire the same lock to proceed with his
    # cleanup, but it remains sleeping waiting for the lock to be released.
    # In the meantime, all the process in parallel sampler remain in the zombie
    # state since the parent cannot proceed with their clean up.
    try:
        signal.pthread_sigmask(signal.SIG_BLOCK, [signal.SIGINT])
        singleton_pool.initialize(n_parallel)
        singleton_pool.run_each(_worker_init,
                                [(id, )
                                 for id in range(singleton_pool.n_parallel)])
    finally:
        signal.pthread_sigmask(signal.SIG_UNBLOCK, [signal.SIGINT])
Exemplo n.º 32
0
def show(impact_parameter, speed=0.1):
    signal.signal(signal.SIGUSR1, signal.SIG_IGN)           # We don't care about pending signals
    signal.signal(signal.SIGUSR2, signal.SIG_IGN)           # We just watch and enjoy
    signal.pthread_sigmask(signal.SIG_SETMASK, [signal.SIGUSR1, signal.SIGUSR2])

    bg_path = "images/stock-photo.jpg"
    background_image = sf.Texture.from_file(bg_path)
    window = sf.RenderWindow(sf.VideoMode(1920, 1080), "A Swagabitch game")

    background_sprite = sf.Sprite(background_image)
    window.draw(background_sprite)
    window.display()
    window.framerate_limit = 60                            # Not to spend 100% CPU time

    rocket = Simulator.Simulator("images/rocket_tiny.png", impact_parameter, speed)
    our_planet = Simulator.PhysicalBody("images/Earth128.png")

    while window.is_open:
        dt = 1e-2                                           # Not that accurate as in test(), but who cares

        window.clear()
        if sf.Keyboard.is_key_pressed(sf.Keyboard.ESCAPE):
            break

        for event in window.events:
            if not event:
                break

        # print(rocket)
        # print(our_planet)
        window.draw(background_sprite)
        rocket.draw(window)
        our_planet.draw(window)
        for i in range(GAME_SPEED):
            rocket.physics()
            rocket.move(dt)
        window.display()
    signal.signal(signal.SIGUSR1, signal.SIG_DFL)
    signal.signal(signal.SIGUSR2, signal.SIG_DFL)
Exemplo n.º 33
0
        def inner():
            # Block signals unconditionally to avoid them impacting our return
            # code.
            signal.pthread_sigmask(signal.SIG_SETMASK, test_signals)
            for sig in test_signals:
                os.kill(pid, sig)

            received_signal = False
            received_signals = signal.sigpending()
            while received_signals != test_signals:
                result = signal.sigtimedwait(forwarded_signals, 0.1)
                if result is not None:
                    received_signal = True
                    received_signals.add(result.si_signo)
                elif received_signal:
                    # Only trace after the first empty response.
                    received_signal = False
                    logger.debug("Waiting for %s",
                                 test_signals - received_signals)

            output_path.write_text(to_string(received_signals),
                                   encoding="utf-8")
Exemplo n.º 34
0
def _extra_main(extra_func, threaded, intr_event, proc_idx, args):
    if not threaded:

        _interrupted = False

        def raise_kbdintr(signum, frame):
            nonlocal _interrupted
            if not _interrupted:
                _interrupted = True
                raise KeyboardInterrupt

        # restore signal handler.
        signal.signal(signal.SIGINT, raise_kbdintr)
        signal.pthread_sigmask(signal.SIG_UNBLOCK, {signal.SIGINT})
        intr_event = None
    try:
        extra_func(intr_event, proc_idx, args)
    except SystemExit:
        pass
    finally:
        if not threaded:
            # same as in _worker_main()
            signal.pthread_sigmask(signal.SIG_BLOCK, {signal.SIGINT})
Exemplo n.º 35
0
def calc_inflection(impact_parameter, speed):
    signal.signal(signal.SIGUSR1, handler)           # We don't care only about USR2 (Success signal)
    signal.signal(signal.SIGUSR2, signal.SIG_IGN)    # We just watch and enjoy
    signal.pthread_sigmask(signal.SIG_SETMASK, [signal.SIGUSR2])

    rocket = Simulator.Simulator("images/rocket_tiny.png", impact_parameter, speed)

    dt = 1e-3
    global zhopa
    zhopa = 0
    while zhopa == 0:
        rocket.physics()
        rocket.move(dt)
        if rocket.is_far_away_enough():
            angle = rocket.calc_inflection_angle()
            break
    else:
        angle = -1

    signal.signal(signal.SIGUSR1, signal.SIG_DFL)
    signal.signal(signal.SIGUSR2, signal.SIG_DFL)
    # print("angle ", angle)
    return angle
Exemplo n.º 36
0
	def __init__(self, signal_names=['SIGINFO'], key_values=[20], skip_input_if_possible=True):
		"""signal_names is a list of signal names, which will be listened for if they exist.
		key_values is a list of (terminal input) key values, each will
		be considered equivalent of a signal if pressed.
		If skip_input_if_possible is True no input will be read if all signals exist.
		No differentiation is made between the signals."""

		self.key_values = set(key_values)
		self.restore = {}
		self.signal_set = False
		self.clean = False
		self.use_input = False
		all_sigs = True
		atexit.register(self.cleanup)
		for name in signal_names:
			sig = getattr(signal, name, None)
			if sig is not None:
				old = signal.signal(sig, self.signal_arrived)
				self.restore[sig] = old
				signal.siginterrupt(sig, False)
				if hasattr(signal, 'pthread_sigmask'):
					signal.pthread_sigmask(signal.SIG_UNBLOCK, {sig})
			else:
				all_sigs = False
		if all_sigs and skip_input_if_possible:
			return
		try:
			self.tc_original = termios.tcgetattr(0)
		except Exception:
			# If this fails we can't use terminal input.
			# Probably because we don't have a terminal, but the
			# reason doesn't really matter, ignore it regardless.
			return
		tc_changed = list(self.tc_original)
		tc_changed[3] &= ~(termios.ICANON | termios.IEXTEN)
		self.use_input = True
		termios.tcsetattr(0, termios.TCSADRAIN, tc_changed)
Exemplo n.º 37
0
        def inner():
            # Block signals we expect to receive
            signal.pthread_sigmask(signal.SIG_BLOCK,
                                   test_signals | {signal.SIGUSR1})
            # Save our pid so it is accessible to the test process, avoiding
            # any race conditions where the file may be empty.
            pid = os.getpid()
            fd, path = tempfile.mkstemp()
            os.write(fd, str(pid).encode("utf-8"))
            os.fsync(fd)
            os.close(fd)
            os.rename(path, runner_pid_file)

            for sig in test_signals:
                logger.debug("Waiting for %s", sig)
                signal.sigwait({sig})
                # Stop self to indicate success to test process.
                os.kill(pid, signal.SIGSTOP)

            logger.debug("Waiting for signal to exit")
            # This is required otherwise we may exit while the test is checking
            # for our status.
            signal.sigwait({resume_signal})
            logger.debug("All signals received")
Exemplo n.º 38
0
 def test_pthread_sigmask_arguments(self):
     self.assertRaises(TypeError, signal.pthread_sigmask)
     self.assertRaises(TypeError, signal.pthread_sigmask, 1)
     self.assertRaises(TypeError, signal.pthread_sigmask, 1, 2, 3)
     self.assertRaises(OSError, signal.pthread_sigmask, 1700, [])
     with self.assertRaises(ValueError):
         signal.pthread_sigmask(signal.SIG_BLOCK, [signal.NSIG])
     with self.assertRaises(ValueError):
         signal.pthread_sigmask(signal.SIG_BLOCK, [0])
     with self.assertRaises(ValueError):
         signal.pthread_sigmask(signal.SIG_BLOCK, [1 << 1000])
Exemplo n.º 39
0
 def test_pthread_sigmask_arguments(self):
     self.assertRaises(TypeError, signal.pthread_sigmask)
     self.assertRaises(TypeError, signal.pthread_sigmask, 1)
     self.assertRaises(TypeError, signal.pthread_sigmask, 1, 2, 3)
     self.assertRaises(OSError, signal.pthread_sigmask, 1700, [])
     with self.assertRaises(ValueError):
         signal.pthread_sigmask(signal.SIG_BLOCK, [signal.NSIG])
     with self.assertRaises(ValueError):
         signal.pthread_sigmask(signal.SIG_BLOCK, [0])
     with self.assertRaises(ValueError):
         signal.pthread_sigmask(signal.SIG_BLOCK, [1<<1000])
Exemplo n.º 40
0
    def check_sigwait(self, wait_func):
        signum = signal.SIGUSR1
        pid = os.getpid()

        old_handler = signal.signal(signum, lambda *args: None)
        self.addCleanup(signal.signal, signum, old_handler)

        code = '\n'.join((
            'import os, time',
            'pid = %s' % os.getpid(),
            'signum = %s' % int(signum),
            'sleep_time = %r' % self.sleep_time,
            'time.sleep(sleep_time)',
            'os.kill(pid, signum)',
        ))

        old_mask = signal.pthread_sigmask(signal.SIG_BLOCK, [signum])
        self.addCleanup(signal.pthread_sigmask, signal.SIG_UNBLOCK, [signum])

        proc = self.subprocess(code)
        with kill_on_error(proc):
            wait_func(signum)

        self.assertEqual(proc.wait(), 0)
Exemplo n.º 41
0
 def test_pthread_sigmask_valid_signals(self):
     s = signal.pthread_sigmask(signal.SIG_BLOCK, signal.valid_signals())
     self.addCleanup(signal.pthread_sigmask, signal.SIG_SETMASK, s)
     # Get current blocked set
     s = signal.pthread_sigmask(signal.SIG_UNBLOCK, signal.valid_signals())
     self.assertLessEqual(s, signal.valid_signals())
Exemplo n.º 42
0
def reset_signal_handling():
    if _HAS_SIGWAIT:
        signal.pthread_sigmask(signal.SIG_SETMASK, {})
Exemplo n.º 43
0
def block_all_signals():
    """Block asynchronous delivery of all signals to this process."""
    if _HAS_SIGWAIT:
        signal.pthread_sigmask(signal.SIG_BLOCK, _ALL_SIGNALS)
Exemplo n.º 44
0
 def block_sigint():
     import signal
     signal.pthread_sigmask(signal.SIG_BLOCK, {signal.SIGINT})
Exemplo n.º 45
0
efd = linuxfd.eventfd(initval=0,semaphore=True,nonBlocking=True)
for i in range(0,3):
	print("   writing to sempahore")
	efd.write()
try:
	while True:
		value = efd.read()
		print("   read '{}' from semaphore".format(value))
except BlockingIOError:
	print("   semaphore exhausted")

#
# test signalfd
#
sfd = linuxfd.signalfd(signalset={signal.SIGALRM})
signal.pthread_sigmask(signal.SIG_SETMASK,{signal.SIGALRM})
print("\ntesting signalfd (fd={}) with SIGALRM".format(sfd.fileno()))
print("guarded signals = {}".format(sfd.signals()))
print("starting alarm timer (3 seconds)")
signal.alarm(3)
value = sfd.read()
print("received SIGALRM, signalfd.read() returned:")
pprint.pprint(value)
signal.alarm(0)

#
# test timerfd
#
tfd = linuxfd.timerfd(rtc=True)
print("\ntesting timerfd (fd={})".format(sfd.fileno()))
print("   {:.2f}: setting timer (value=3,interval=1)".format(time.time()))
Exemplo n.º 46
0
# Can't do anything in this handler - python libs are not thread safe, so not safe to call e.g. print.
def sighup_handler(sig, frame):
    pass


print('Running {}'.format(str(sys.argv)))

if sys.argv[1] == '-v':
    print('Asked for version')
    sys.exit(0)

if sys.argv[1] == '-t':
    print('Asked for config validation')
    sys.exit(0)

# The parent golang process blocks SIGQUIT in subprocesses, for some reason.
# So we unblock it manually - same as what nginx does.
signal.pthread_sigmask(signal.SIG_UNBLOCK, {signal.SIGQUIT, signal.SIGHUP})
signal.signal(signal.SIGQUIT, sigquit_handler)
signal.signal(signal.SIGHUP, sighup_handler)
signal.pause

startup_marker_file_name = str.join(
    '/', sys.argv[2].split('/')[:-1]) + '/nginx-started'
with open(startup_marker_file_name, 'w') as f:
    f.write('started!')

time.sleep(5)
print('Quit after 5 seconds of nada')
sys.exit(-1)
Exemplo n.º 47
0
def main():  # noqa
    args = parse_arguments()
    for path in args.path:
        sys.path.insert(0, path)

    try:
        if args.pid_file:
            setup_pidfile(args.pid_file)
    except RuntimeError as e:
        logger = setup_parent_logging(args, stream=args.log_file or sys.stderr)
        logger.critical(e)
        return RET_PIDFILE

    worker_pipes = []
    worker_processes = []
    for worker_id in range(args.processes):
        read_fd, write_fd = os.pipe()
        pid = os.fork()
        if pid != 0:
            os.close(write_fd)
            worker_pipes.append(os.fdopen(read_fd))
            worker_processes.append(pid)
            continue

        os.close(read_fd)
        return worker_process(args, worker_id, write_fd)

    parent_read_fd, parent_write_fd = os.pipe()
    parent_read_pipe = os.fdopen(parent_read_fd)
    parent_write_pipe = os.fdopen(parent_write_fd, "w")
    logger = setup_parent_logging(args, stream=parent_write_pipe)
    logger.info("Remoulade %r is booting up." % __version__)
    if args.pid_file:
        atexit.register(remove_pidfile, args.pid_file, logger)

    running, reload_process = True, False

    # To avoid issues with signal delivery to user threads on
    # platforms such as FreeBSD 10.3, we make the main thread block
    # the signals it expects to handle before spawning the file
    # watcher and log watcher threads so that those threads can
    # inherit the blocking behaviour.
    if hasattr(signal, "pthread_sigmask"):
        signal.pthread_sigmask(
            signal.SIG_BLOCK,
            {signal.SIGINT, signal.SIGTERM, signal.SIGHUP},
        )

    if HAS_WATCHDOG and args.watch:
        file_watcher = setup_file_watcher(args.watch, args.watch_use_polling)

    def watch_logs(worker_pipes):
        nonlocal running

        log_file = args.log_file or sys.stderr
        selector = selectors.DefaultSelector()
        for pipe in [parent_read_pipe] + worker_pipes:
            selector.register(pipe, selectors.EVENT_READ)

        buffers = defaultdict(str)  # type: Dict[int, str]
        while running:
            events = selector.select(timeout=1)
            for key, _ in events:
                data = os.read(key.fd, BUFSIZE)
                if not data:
                    selector.unregister(key.fileobj)
                    log_file.write(buffers[key.fd])
                    log_file.flush()
                    continue

                buffers[key.fd] += data.decode("utf-8", errors="ignore")
                while buffers[key.fd]:
                    index = buffers[key.fd].find("\n")
                    if index == -1:
                        break

                    line = buffers[key.fd][:index + 1]
                    buffers[key.fd] = buffers[key.fd][index + 1:]
                    log_file.write(line)
                    log_file.flush()

        logger.debug("Closing selector...")
        selector.close()

    log_watcher = Thread(target=watch_logs, args=(worker_pipes, ), daemon=True)
    log_watcher.start()

    def sighandler(signum, frame):
        nonlocal reload_process, worker_processes
        reload_process = signum == signal.SIGHUP
        signum = {
            signal.SIGINT: signal.SIGTERM,
            signal.SIGTERM: signal.SIGTERM,
            signal.SIGHUP: signal.SIGHUP,
        }[signum]

        logger.info("Sending %r to worker processes...", signum.name)
        for pid in worker_processes:
            try:
                os.kill(pid, signum)
            except OSError:  # pragma: no cover
                logger.warning("Failed to send %r to pid %d.", signum.name,
                               pid)

    # Now that the watcher threads have been started, it should be
    # safe to unblock the signals that were previously blocked.
    if hasattr(signal, "pthread_sigmask"):
        signal.pthread_sigmask(
            signal.SIG_UNBLOCK,
            {signal.SIGINT, signal.SIGTERM, signal.SIGHUP},
        )

    retcode = RET_OK
    signal.signal(signal.SIGINT, sighandler)
    signal.signal(signal.SIGTERM, sighandler)
    signal.signal(signal.SIGHUP, sighandler)
    for pid in worker_processes:
        pid, rc = os.waitpid(pid, 0)
        retcode = max(retcode, rc >> 8)

    running = False
    if HAS_WATCHDOG and args.watch:
        file_watcher.stop()
        file_watcher.join()

    log_watcher.join()
    for pipe in [parent_read_pipe, parent_write_pipe, *worker_pipes]:
        pipe.close()

    if reload_process:
        if sys.argv[0].endswith("/remoulade/__main__.py"):
            return os.execvp(sys.executable,
                             ["python", "-m", "remoulade", *sys.argv[1:]])
        return os.execvp(sys.argv[0], sys.argv)

    return retcode
Exemplo n.º 48
0
#!/usr/bin/python3

import signal
import sys
import time

def signal_handler(signal, frame):
    print('Received sigquit, doing graceful shutdown')
    time.sleep(0.5)
    sys.exit(0)

print('Running {}'.format(str(sys.argv)))

if sys.argv[1] == '-v':
    print('Asked for version')
    sys.exit(0)

# The parent golang process blocks SIGQUIT in subprocesses, for some reason.
# So we unblock it manually - same as what nginx does.
signal.pthread_sigmask(signal.SIG_UNBLOCK, {signal.SIGQUIT})
signal.signal(signal.SIGQUIT, signal_handler)
signal.pause
time.sleep(5)
print('Should have handled SIGQUIT')
sys.exit(-1)
Exemplo n.º 49
0
 def unblock_signals():
     signal.pthread_sigmask(signal.SIG_UNBLOCK, [signal.SIGTERM, signal.SIGINT])
Exemplo n.º 50
0
Arquivo: child.py Projeto: pybus/pybus
import event
import event.log
import signal, os

signal.pthread_sigmask(signal.SIG_BLOCK, [signal.SIGCHLD])
e_loop=event.Loop(log=event.log.Stderr())

def c(cld, status):
	print('Done', status)

@e_loop.main_callback
def stuff():
	pid=event.Child._raw_spawn(
		b'/usr/bin/sleep', (b'sleep', b'.5'), (), fd=(0,0,1,1,2,2)
	)
	cld=e_loop.new(event.Child, pid, callback_dead=c)

e_loop.run()
Exemplo n.º 51
0
 def run(self):
     if hasattr(signal, 'pthread_sigmask'):
         signal.pthread_sigmask(signal.SIG_BLOCK, range(1, signal.NSIG))
     super(WebSocketClient, self).run()
Exemplo n.º 52
0
def block_all_signals():
    """Block asynchronous delivery of all signals to this process."""
    if _HAS_SIGWAIT:
        signal.pthread_sigmask(signal.SIG_BLOCK, _ALL_SIGNALS)
Exemplo n.º 53
0
def reset_signal_handling():
    if _HAS_SIGWAIT:
        signal.pthread_sigmask(signal.SIG_SETMASK, {})
Exemplo n.º 54
0
 def test_pthread_sigmask_valid_signals(self):
     s = signal.pthread_sigmask(signal.SIG_BLOCK, signal.valid_signals())
     self.addCleanup(signal.pthread_sigmask, signal.SIG_SETMASK, s)
     # Get current blocked set
     s = signal.pthread_sigmask(signal.SIG_UNBLOCK, signal.valid_signals())
     self.assertLessEqual(s, signal.valid_signals())
Exemplo n.º 55
0
def main(args=None):  # noqa
    args = args or make_argument_parser().parse_args()
    for path in args.path:
        sys.path.insert(0, path)

    if args.use_spawn:
        multiprocessing.set_start_method("spawn")

    try:
        if args.pid_file:
            setup_pidfile(args.pid_file)
    except RuntimeError as e:
        with file_or_stderr(args.log_file) as stream:
            logger = setup_parent_logging(args, stream=stream)
            logger.critical(e)
            return RET_PIDFILE

    canteen = multiprocessing.Value(Canteen)
    worker_pipes = []
    worker_processes = []
    for worker_id in range(args.processes):
        read_pipe, write_pipe = multiprocessing.Pipe()
        proc = multiprocessing.Process(
            target=worker_process,
            args=(args, worker_id, StreamablePipe(write_pipe), canteen),
            daemon=True,
        )
        proc.start()
        worker_pipes.append(read_pipe)
        worker_processes.append(proc)

    fork_pipes = []
    fork_processes = []
    for fork_id, fork_path in enumerate(chain(args.forks,
                                              canteen_get(canteen))):
        read_pipe, write_pipe = multiprocessing.Pipe()
        proc = multiprocessing.Process(
            target=fork_process,
            args=(args, fork_id, fork_path, StreamablePipe(write_pipe)),
            daemon=True,
        )
        proc.start()
        fork_pipes.append(read_pipe)
        fork_processes.append(proc)

    parent_read_pipe, parent_write_pipe = multiprocessing.Pipe()
    logger = setup_parent_logging(args,
                                  stream=StreamablePipe(parent_write_pipe))
    logger.info("Dramatiq %r is booting up." % __version__)
    if args.pid_file:
        atexit.register(remove_pidfile, args.pid_file, logger)

    running, reload_process = True, False

    # To avoid issues with signal delivery to user threads on
    # platforms such as FreeBSD 10.3, we make the main thread block
    # the signals it expects to handle before spawning the file
    # watcher and log watcher threads so that those threads can
    # inherit the blocking behaviour.
    if hasattr(signal, "pthread_sigmask"):
        signal.pthread_sigmask(
            signal.SIG_BLOCK,
            {signal.SIGINT, signal.SIGTERM, signal.SIGHUP},
        )

    if HAS_WATCHDOG and args.watch:
        file_watcher = setup_file_watcher(args.watch, args.watch_use_polling)

    log_watcher = Thread(
        target=watch_logs,
        args=(args.log_file, [parent_read_pipe, *worker_pipes, *fork_pipes]),
        daemon=False,
    )
    log_watcher.start()

    def stop_subprocesses(signum):
        nonlocal running
        running = False

        for proc in chain(worker_processes, fork_processes):
            try:
                os.kill(proc.pid, signum)
            except OSError:  # pragma: no cover
                if proc.exitcode is None:
                    logger.warning("Failed to send %r to PID %d.", signum.name,
                                   proc.pid)

    def sighandler(signum, frame):
        nonlocal reload_process
        reload_process = signum == getattr(signal, "SIGHUP", None)
        if signum == signal.SIGINT:
            signum = signal.SIGTERM

        logger.info("Sending signal %r to subprocesses...",
                    getattr(signum, "name", signum))
        stop_subprocesses(signum)

    # Now that the watcher threads have been started, it should be
    # safe to unblock the signals that were previously blocked.
    if hasattr(signal, "pthread_sigmask"):
        signal.pthread_sigmask(
            signal.SIG_UNBLOCK,
            {signal.SIGINT, signal.SIGTERM, signal.SIGHUP},
        )

    retcode = RET_OK
    signal.signal(signal.SIGINT, sighandler)
    signal.signal(signal.SIGTERM, sighandler)
    if hasattr(signal, "SIGHUP"):
        signal.signal(signal.SIGHUP, sighandler)
    if hasattr(signal, "SIGBREAK"):
        signal.signal(signal.SIGBREAK, sighandler)

    # Wait for all workers to terminate.  If any of the processes
    # terminates unexpectedly, then shut down the rest as well.
    while any(p.exitcode is None for p in worker_processes):
        for proc in worker_processes:
            proc.join(timeout=1)
            if proc.exitcode is None:
                continue

            if running:  # pragma: no cover
                logger.critical(
                    "Worker with PID %r exited unexpectedly (code %r). Shutting down...",
                    proc.pid, proc.exitcode)
                stop_subprocesses(signal.SIGTERM)
                retcode = proc.exitcode
                break

            else:
                retcode = max(retcode, proc.exitcode)

    for pipe in [
            parent_read_pipe, parent_write_pipe, *worker_pipes, *fork_pipes
    ]:
        try:
            pipe.close()
        # If the worker process was killed, the handle may already be
        # closed.
        except (EOFError, OSError):
            pass

    # The log watcher can't be a daemon in case we log to a file.  So
    # we have to wait for it to complete on exit.  Closing all the
    # pipes above is what should trigger said exit.
    log_watcher.join()

    if HAS_WATCHDOG and args.watch:
        file_watcher.stop()
        file_watcher.join()

    if reload_process:
        if sys.argv[0].endswith("/dramatiq/__main__.py"):
            return os.execvp(sys.executable,
                             ["python", "-m", "dramatiq", *sys.argv[1:]])
        return os.execvp(sys.argv[0], sys.argv)

    return retcode
Exemplo n.º 56
0
def container_run(platform: str,
                  nvidia_runtime: bool,
                  docker_registry: str,
                  shared_memory_size: str,
                  local_ccache_dir: str,
                  command: List[str],
                  cleanup: Cleanup,
                  dry_run: bool = False) -> int:
    """Run command in a container"""
    container_wait_s = 600
    #
    # Environment setup
    #
    environment = {
        'CCACHE_MAXSIZE': '500G',
        'CCACHE_TEMPDIR': '/tmp/ccache',  # temp dir should be local and not shared
        'CCACHE_DIR': '/work/ccache',  # this path is inside the container as /work/ccache is
                                       # mounted
        'CCACHE_LOGFILE': '/tmp/ccache.log',  # a container-scoped log, useful for ccache
                                              # verification.
    }
    # These variables are passed to the container to the process tree killer can find runaway
    # process inside the container
    # https://wiki.jenkins.io/display/JENKINS/ProcessTreeKiller
    # https://github.com/jenkinsci/jenkins/blob/578d6bacb33a5e99f149de504c80275796f0b231/core/src/main/java/hudson/model/Run.java#L2393
    #
    jenkins_env_vars = ['BUILD_NUMBER', 'BUILD_ID', 'BUILD_TAG']
    environment.update({k: os.environ[k] for k in jenkins_env_vars if k in os.environ})
    environment.update({k: os.environ[k] for k in ['CCACHE_MAXSIZE'] if k in os.environ})

    tag = get_docker_tag(platform=platform, registry=docker_registry)
    mx_root = get_mxnet_root()
    local_build_folder = buildir()
    # We need to create it first, otherwise it will be created by the docker daemon with root only permissions
    os.makedirs(local_build_folder, exist_ok=True)
    os.makedirs(local_ccache_dir, exist_ok=True)
    logging.info("Using ccache directory: %s", local_ccache_dir)
    docker_client = docker.from_env()
    # Equivalent command
    docker_cmd_list = [
        get_docker_binary(nvidia_runtime),
        'run',
        "--cap-add",
        "SYS_PTRACE", # Required by ASAN
        '--rm',
        '--shm-size={}'.format(shared_memory_size),
        # mount mxnet root
        '-v', "{}:/work/mxnet".format(mx_root),
        # mount mxnet/build for storing build
        '-v', "{}:/work/build".format(local_build_folder),
        '-v', "{}:/work/ccache".format(local_ccache_dir),
        '-u', '{}:{}'.format(os.getuid(), os.getgid()),
        '-e', 'CCACHE_MAXSIZE={}'.format(environment['CCACHE_MAXSIZE']),
        # temp dir should be local and not shared
        '-e', 'CCACHE_TEMPDIR={}'.format(environment['CCACHE_TEMPDIR']),
        # this path is inside the container as /work/ccache is mounted
        '-e', "CCACHE_DIR={}".format(environment['CCACHE_DIR']),
        # a container-scoped log, useful for ccache verification.
        '-e', "CCACHE_LOGFILE={}".format(environment['CCACHE_LOGFILE']),
        '-ti',
        tag]
    docker_cmd_list.extend(command)
    docker_cmd = ' \\\n\t'.join(docker_cmd_list)
    logging.info("Running %s in container %s", command, tag)
    logging.info("Executing the equivalent of:\n%s\n", docker_cmd)
    # return code of the command inside docker
    ret = 0
    if not dry_run:
        #############################
        #
        signal.pthread_sigmask(signal.SIG_BLOCK, {signal.SIGINT, signal.SIGTERM})
        # noinspection PyShadowingNames
        runtime = None
        if nvidia_runtime:
            # noinspection PyShadowingNames
            # runc is default (docker info | grep -i runtime)
            runtime = 'nvidia'
        container = docker_client.containers.run(
            tag,
            runtime=runtime,
            detach=True,
            command=command,
            shm_size=shared_memory_size,
            user='******'.format(os.getuid(), os.getgid()),
            cap_add='SYS_PTRACE',
            volumes={
                mx_root:
                    {'bind': '/work/mxnet', 'mode': 'rw'},
                local_build_folder:
                    {'bind': '/work/build', 'mode': 'rw'},
                local_ccache_dir:
                    {'bind': '/work/ccache', 'mode': 'rw'},
            },
            environment=environment)
        try:
            logging.info("Started container: %s", trim_container_id(container.id))
            # Race condition:
            # If the previous call is interrupted then it's possible that the container is not cleaned up
            # We avoid by masking the signals temporarily
            cleanup.add_container(container)
            signal.pthread_sigmask(signal.SIG_UNBLOCK, {signal.SIGINT, signal.SIGTERM})
            #
            #############################

            stream = container.logs(stream=True, stdout=True, stderr=True)
            sys.stdout.flush()
            for chunk in stream:
                sys.stdout.buffer.write(chunk)
                sys.stdout.buffer.flush()
            sys.stdout.flush()
            stream.close()
            try:
                logging.info("Waiting for status of container %s for %d s.",
                            trim_container_id(container.id),
                            container_wait_s)
                wait_result = container.wait(timeout=container_wait_s)
                logging.info("Container exit status: %s", wait_result)
                ret = wait_result.get('StatusCode', 200)
            except Exception as e:
                logging.exception(e)
                ret = 150

            # Stop
            try:
                logging.info("Stopping container: %s", trim_container_id(container.id))
                container.stop()
            except Exception as e:
                logging.exception(e)
                ret = 151

            # Remove
            try:
                logging.info("Removing container: %s", trim_container_id(container.id))
                container.remove()
            except Exception as e:
                logging.exception(e)
                ret = 152
            cleanup.remove_container(container)
            containers = docker_client.containers.list()
            if containers:
                logging.info("Other running containers: %s", [trim_container_id(x.id) for x in containers])
        except docker.errors.NotFound as e:
            logging.info("Container was stopped before cleanup started: %s", e)
    return ret
Exemplo n.º 57
0
def container_run(docker_client: SafeDockerClient,
                  platform: str,
                  nvidia_runtime: bool,
                  docker_registry: str,
                  shared_memory_size: str,
                  local_ccache_dir: str,
                  command: List[str],
                  environment: Dict[str, str],
                  dry_run: bool = False) -> int:
    """Run command in a container"""
    container_wait_s = 600
    #
    # Environment setup
    #
    environment.update({
        'CCACHE_MAXSIZE': '500G',
        'CCACHE_TEMPDIR':
        '/tmp/ccache',  # temp dir should be local and not shared
        'CCACHE_DIR':
        '/work/ccache',  # this path is inside the container as /work/ccache is
        # mounted
        'CCACHE_LOGFILE':
        '/tmp/ccache.log',  # a container-scoped log, useful for ccache
        # verification.
    })
    environment.update(
        {k: os.environ[k]
         for k in ['CCACHE_MAXSIZE'] if k in os.environ})

    tag = get_docker_tag(platform=platform, registry=docker_registry)
    mx_root = get_mxnet_root()
    local_build_folder = buildir()
    # We need to create it first, otherwise it will be created by the docker daemon with root only permissions
    os.makedirs(local_build_folder, exist_ok=True)
    os.makedirs(local_ccache_dir, exist_ok=True)
    logging.info("Using ccache directory: %s", local_ccache_dir)

    # Equivalent command
    docker_cmd_list = [
        get_docker_binary(nvidia_runtime),
        'run',
        "--cap-add",
        "SYS_PTRACE",  # Required by ASAN
        '--rm',
        '--shm-size={}'.format(shared_memory_size),
        # mount mxnet root
        '-v',
        "{}:/work/mxnet".format(mx_root),
        # mount mxnet/build for storing build
        '-v',
        "{}:/work/build".format(local_build_folder),
        '-v',
        "{}:/work/ccache".format(local_ccache_dir),
        '-u',
        '{}:{}'.format(os.getuid(), os.getgid()),
        '-e',
        'CCACHE_MAXSIZE={}'.format(environment['CCACHE_MAXSIZE']),
        # temp dir should be local and not shared
        '-e',
        'CCACHE_TEMPDIR={}'.format(environment['CCACHE_TEMPDIR']),
        # this path is inside the container as /work/ccache is mounted
        '-e',
        "CCACHE_DIR={}".format(environment['CCACHE_DIR']),
        # a container-scoped log, useful for ccache verification.
        '-e',
        "CCACHE_LOGFILE={}".format(environment['CCACHE_LOGFILE']),
        '-ti',
        tag
    ]
    docker_cmd_list.extend(command)
    docker_cmd = ' \\\n\t'.join(docker_cmd_list)
    logging.info("Running %s in container %s", command, tag)
    logging.info("Executing the equivalent of:\n%s\n", docker_cmd)

    if not dry_run:
        #############################
        #
        signal.pthread_sigmask(signal.SIG_BLOCK,
                               {signal.SIGINT, signal.SIGTERM})
        # noinspection PyShadowingNames
        runtime = None
        if nvidia_runtime:
            # noinspection PyShadowingNames
            # runc is default (docker info | grep -i runtime)
            runtime = 'nvidia'

        return docker_client.run(tag,
                                 runtime=runtime,
                                 command=command,
                                 shm_size=shared_memory_size,
                                 user='******'.format(os.getuid(), os.getgid()),
                                 cap_add='SYS_PTRACE',
                                 volumes={
                                     mx_root: {
                                         'bind': '/work/mxnet',
                                         'mode': 'rw'
                                     },
                                     local_build_folder: {
                                         'bind': '/work/build',
                                         'mode': 'rw'
                                     },
                                     local_ccache_dir: {
                                         'bind': '/work/ccache',
                                         'mode': 'rw'
                                     },
                                 },
                                 environment=environment)
    return 0
Exemplo n.º 58
0
 def signal_handler(signum, _):
     signal.pthread_sigmask(signal.SIG_BLOCK, {signum})
     logging.warning("Signal %d received, cleaning up...", signum)
     cleanup()
     logging.warning("done. Exiting with error.")
     sys.exit(1)
Exemplo n.º 59
0
def ignore_signals(signals):
    signal.pthread_sigmask(signal.SIG_BLOCK, signals)
    yield
    signal.pthread_sigmask(signal.SIG_UNBLOCK, signals)
Exemplo n.º 60
0
#!/usr/bin/env python
''' demo showing child process management '''
import signal
import atexit
import subprocess

_ALL_SIGNALS = [signal.SIGCHLD, signal.SIGINT, signal.SIGTERM]

signal.pthread_sigmask(signal.SIG_BLOCK, _ALL_SIGNALS)


def on_signal(_signo, _):
    '''  使用空函数避免SIGCHLD被丢弃或忽略 '''


signal.signal(signal.SIGCHLD, on_signal)


def on_exit():
    ''' clean before exit: terminate child processes '''
    if CHILDS is not None:
        for child in CHILDS:
            try:
                child.terminate()
            except Exception as err:
                print('Failed to terminate child: {}'.format(err))


atexit.register(on_exit)