Пример #1
0
def ctrlCHandler(sig, frame):
    print("CTRL-C Handler:")
    print("Trackback:")
    faulthandler.dump_traceback()
    print("")
    print("Aborting now")
    assert (False)
Пример #2
0
    def handleKillTimer(self):
        print("Full Traceback With Threads:")
        faulthandler.dump_traceback()
        print("")

        e_string = "HardwareFunctionality timed out!"
        raise halExceptions.HardwareException(e_string)
Пример #3
0
def start_threads(threads, unlock=None):
    import faulthandler
    threads = list(threads)
    started = []
    try:
        try:
            for t in threads:
                t.start()
                started.append(t)
        except:
            if support.verbose:
                print("Can't start %d threads, only %d threads started" %
                      (len(threads), len(started)))
            raise
        yield
    finally:
        try:
            if unlock:
                unlock()
            endtime = time.monotonic()
            for timeout in range(1, 16):
                endtime += 60
                for t in started:
                    t.join(max(endtime - time.monotonic(), 0.01))
                started = [t for t in started if t.is_alive()]
                if not started:
                    break
                if support.verbose:
                    print('Unable to join %d threads during a period of '
                          '%d minutes' % (len(started), timeout))
        finally:
            started = [t for t in started if t.is_alive()]
            if started:
                faulthandler.dump_traceback(sys.stdout)
                raise AssertionError('Unable to join %d threads' % len(started))
Пример #4
0
    def handleKillTimer(self):
        print("Full Traceback With Threads:")
        faulthandler.dump_traceback()
        print("")

        e_string = "HardwareFunctionality timed out!"
        raise halExceptions.HardwareException(e_string)
Пример #5
0
def receiveSIGTERM(signalNumber, frame):
    now = datetime.now()
    dtstr = now.strftime('%Y-%m-%d-%H:%M:%S.%f')
    print(f'Received signal {signalNumber} at {dtstr}',
          file=sys.stderr,
          flush=True)
    faulthandler.dump_traceback()
Пример #6
0
def ctrlCHandler(sig, frame):
    print("CTRL-C Handler:")
    print("Trackback:")
    faulthandler.dump_traceback()
    print("")
    print("Aborting now")
    assert(False)
Пример #7
0
    def _event_listener(parent_pipe, signal_pipe, rank: int):
        logger.info(f"Starting event listener thread for rank {rank}")
        while True:
            ready_pipes = multiprocessing.connection.wait(
                [parent_pipe, signal_pipe])

            if parent_pipe in ready_pipes:

                if parent_pipe.closed:
                    logger.info(
                        f"Pipe closed for process {rank}, stopping event listener thread"
                    )
                    return

                event = parent_pipe.recv()
                logger.info(f"Received event {event} on process {rank}")

                if event == MultiProcessTestCase.Event.GET_TRACEBACK:
                    # Return traceback to the parent process.
                    with tempfile.NamedTemporaryFile(mode="r+") as tmp_file:
                        faulthandler.dump_traceback(tmp_file)
                        # Flush buffers and seek to read from the beginning
                        tmp_file.flush()
                        tmp_file.seek(0)
                        parent_pipe.send(tmp_file.read())

                        logger.info(f"Process {rank} sent traceback")

            if signal_pipe in ready_pipes:
                return
Пример #8
0
def _threadstacks(*args, **kwargs):  # pragma: no cover
    '''
    A signal handler used to print thread stacks.
    '''
    print(80 * '*')
    print('Faulthandler stack frames per thread:')
    faulthandler.dump_traceback()
    print(80 * '*')
Пример #9
0
def _threadstacks(*args, **kwargs):  # pragma: no cover
    '''
    A signal handler used to print thread stacks.
    '''
    print(80 * '*')
    print('Faulthandler stack frames per thread:')
    faulthandler.dump_traceback()
    print(80 * '*')
Пример #10
0
def delayed_dump(signo, frame):
    # Avoid intermingling the output of all the workers
    time.sleep(worker_id / 4)
    print()
    msg = 'Worker %d (PID %d) dumping all threads' % (worker_id, os.getpid())
    print('-' * len(msg))
    print(msg)
    print('-' * len(msg))
    faulthandler.dump_traceback()
Пример #11
0
 def showStacks(pv, op):
     import faulthandler
     from tempfile import TemporaryFile
     with TemporaryFile('r+') as F:
         faulthandler.dump_traceback(file=F)
         F.seek(0)
         V = pv.current().raw
         V.unmark()
         V['value'] = F.read()
         op.done(V)
Пример #12
0
def _handlesigint(signal, stack):
    import signal
    import sys
    import faulthandler
    signal.signal(signal.SIGINT, signal.SIG_DFL)

    if _log.isEnabledFor(_INFO):
        faulthandler.dump_traceback()

    sys.exit('%s: exit due to keyboard interrupt' % (NAME.lower()))
Пример #13
0
def printstack(sig, frame):
    try:
        import faulthandler
    except ImportError:
        return
    try:
        faulthandler.dump_traceback()
        with open(os.path.join(Env.paths.pathvar, "daemon.stack"), "w") as f:
            faulthandler.dump_traceback(file=f)
    except Exception:
        pass
Пример #14
0
    def __enter__(self) -> bool:
        self._acquired = self.acquire(timeout=ThreadingCondition.TIMEOUT)

        if self._acquired:
            return True

        logger.error("Failed to acquire CV (timeout %f)",
                     ThreadingCondition.TIMEOUT)
        import faulthandler

        faulthandler.dump_traceback(all_threads=True)
        raise RuntimeError("Failed to acquire CV")
Пример #15
0
 def test_stderr_None(self):
     # Issue #21497: provide a helpful error if sys.stderr is None,
     # instead of just an attribute error: "None has no attribute fileno".
     with self.check_stderr_none():
         faulthandler.enable()
     with self.check_stderr_none():
         faulthandler.dump_traceback()
     with self.check_stderr_none():
         faulthandler.dump_traceback_later(1e-3)
     if hasattr(faulthandler, "register"):
         with self.check_stderr_none():
             faulthandler.register(signal.SIGUSR1)
Пример #16
0
def _asynciostacks(*args, **kwargs):  # pragma: no cover
    '''
    A signal handler used to print asyncio task stacks and thread stacks.
    '''
    print(80 * '*')
    print('Asyncio tasks stacks:')
    tasks = asyncio.all_tasks(_glob_loop)
    for task in tasks:
        task.print_stack()
    print(80 * '*')
    print('Faulthandler stack frames per thread:')
    faulthandler.dump_traceback()
    print(80 * '*')
Пример #17
0
 def test_stderr_None(self):
     # Issue #21497: provide a helpful error if sys.stderr is None,
     # instead of just an attribute error: "None has no attribute fileno".
     with self.check_stderr_none():
         faulthandler.enable()
     with self.check_stderr_none():
         faulthandler.dump_traceback()
     if hasattr(faulthandler, 'dump_traceback_later'):
         with self.check_stderr_none():
             faulthandler.dump_traceback_later(1e-3)
     if hasattr(faulthandler, "register"):
         with self.check_stderr_none():
             faulthandler.register(signal.SIGUSR1)
Пример #18
0
def _asynciostacks(*args, **kwargs):  # pragma: no cover
    '''
    A signal handler used to print asyncio task stacks and thread stacks.
    '''
    print(80 * '*')
    print('Asyncio tasks stacks:')
    tasks = asyncio.all_tasks(_glob_loop)
    for task in tasks:
        task.print_stack()
    print(80 * '*')
    print('Faulthandler stack frames per thread:')
    faulthandler.dump_traceback()
    print(80 * '*')
Пример #19
0
def log_running_threads() -> None:
    # If we can't exit, list the threads that are holding us open. *This* thread runs as a daemon,
    # so it won't block the exit itself.
    last = ''
    while True:
        time.sleep(10)
        threads = ','.join(thread.name for thread in threading.enumerate()
                           if not thread.daemon)
        if threads != last:
            logger.error('Still running nondaemon threads: %s', threads)
            with open('impbot-traceback.log', 'a') as f:
                faulthandler.dump_traceback(file=f)
            logger.error('Tracebacks dumped to impbot-traceback.log')
            last = threads
Пример #20
0
def _check_executor_started(executor):
    # Submit a small job to make sure that the pool is an working state
    res = executor.submit(id, None)
    try:
        res.result(timeout=TIMEOUT)
    except TimeoutError:
        print('\n' * 3, res.done(), executor._call_queue.empty(),
              executor._result_queue.empty())
        print(executor._processes)
        print(threading.enumerate())
        from faulthandler import dump_traceback
        dump_traceback()
        executor.submit(dump_traceback).result(TIMEOUT)
        raise RuntimeError("Executor took too long to run basic task.")
Пример #21
0
    def render_GET(self, request):
        logger.info("Handling thread dump request")

        try:
            with open(
                    '/var/log/anchore/pid_{}_thread_dump-{}'.format(
                        os.getpid(),
                        datetime.datetime.now().isoformat()), 'w') as dest:
                faulthandler.dump_traceback(dest, all_threads=True)
        except:
            logger.exception('Error dumping thread frames')
            return b'Failed'

        return b'Sucess'
Пример #22
0
    def __dump_debug(self, signum, frame):
        """
        Dumps current threads + pending RPC requests
        """

        # Prepare debug output
        output = self._log_folder / f"RpcServerDump-{time.strftime('%Y%m%d%H%M%S')}.txt"
        with output.open("w") as f:
            # Dump threads
            faulthandler.dump_traceback(f, all_threads=True)

            # Dump pending calls
            f.write("\n\nPending RPC calls:\n")
            for call in list(self.calls):
                f.write(f"{call}\n")
Пример #23
0
 def _watchdog_function(self):
     """The watchdog thread."""
     logging.info("Starting watchdog thread with timeout %r", self._timeout)
     while not self._stopped:
         time.sleep(self._timeout / 10.0)
         current_time = time.time()
         if current_time - self._last_activity_time >= self._timeout:
             logging.warning(
                 "No activity for ClusterCoordinator for %r seconds. "
                 "Dumping stack traces.", self._timeout)
             if self._on_triggered:
                 self._on_triggered()
             faulthandler.dump_traceback(file=self._traceback_file)
             self._traceback_file.write("==== End of stack traces ====\n")
             self._last_activity_time = current_time
Пример #24
0
def suicide(**kwargs):
    time.sleep(config.suicide_timeout)
    try:
        logging.critical('SUICIDE')
        if config.show_traceback:
            faulthandler.dump_traceback()
    finally:
        try:
            parent = psutil.Process(os.getpid())
            for child in parent.children(recursive=True):
                try:
                    child.kill()
                except:
                    log_traceback()
        finally:
            os.kill(os.getpid(), signal.SIGKILL)
Пример #25
0
    def handleWorkerTimer(self):
        """
        If this timer fires that means the worker took longer than
        expected to complete a task, so it is probably hung.

        Not sure whether we handle this or just crash, but for now 
        we're going with crash. This may be all that we can do anyway
        as there is no way to kill a QRunnable that is stuck.
        """
        # Print a complete traceback including what all the threads were doing.
        print("Full Traceback With Threads:")
        faulthandler.dump_traceback()
        print("")

        e_string = "HALWorker for '" + self.module_name + "' module timed out handling '" + self.worker.message.m_type + "'!"
        raise halExceptions.HalException(e_string)
Пример #26
0
    def handleWorkerTimer(self):
        """
        If this timer fires that means the worker took longer than
        expected to complete a task, so it is probably hung.

        Not sure whether we handle this or just crash, but for now 
        we're going with crash. This may be all that we can do anyway
        as there is no way to kill a QRunnable that is stuck.
        """
        # Print a complete traceback including what all the threads were doing.
        print("Full Traceback With Threads:")
        faulthandler.dump_traceback()
        print("")

        e_string = "HALWorker for '" + self.module_name + "' module timed out handling '" + self.worker.message.m_type + "'!"
        raise halExceptions.HalException(e_string)
 def _fail_on_deadlock(self, executor):
     # If we did not recover before TIMEOUT seconds, consider that the
     # executor is in a deadlock state and forcefully clean all its
     # composants.
     import faulthandler
     from tempfile import TemporaryFile
     with TemporaryFile(mode="w+") as f:
         faulthandler.dump_traceback(file=f)
         f.seek(0)
         tb = f.read()
     for p in executor._processes.values():
         p.terminate()
     # This should be safe to call executor.shutdown here as all possible
     # deadlocks should have been broken.
     executor.shutdown(wait=True)
     print(f"\nTraceback:\n {tb}", file=sys.__stderr__)
     self.fail(f"Executor deadlock:\n\n{tb}")
Пример #28
0
 def _fail_on_deadlock(self, executor):
     # If we did not recover before TIMEOUT seconds, consider that the
     # executor is in a deadlock state and forcefully clean all its
     # composants.
     import faulthandler
     from tempfile import TemporaryFile
     with TemporaryFile(mode="w+") as f:
         faulthandler.dump_traceback(file=f)
         f.seek(0)
         tb = f.read()
     for p in executor._processes.values():
         p.terminate()
     # This should be safe to call executor.shutdown here as all possible
     # deadlocks should have been broken.
     executor.shutdown(wait=True)
     print(f"\nTraceback:\n {tb}", file=sys.__stderr__)
     self.fail(f"Executor deadlock:\n\n{tb}")
Пример #29
0
 def _run(self):
     with self._cv:
         print('Timeout monitor active...')
         while self._running:
             self._cv.wait_for(lambda: self._waiting or not self._running)
             if not self._running:
                 break
             not_expired = self._cv.wait_for(
                 lambda: not self._waiting or not self._running,
                 self._timeout)
             if not not_expired:
                 print('TIMEOUT!')
                 self._dump_memory()
                 with open('./freeze-trace.txt', "w+") as f:
                     faulthandler.dump_traceback(f)
                 self._killed = True
                 self.unwrapped.proc1.kill()
                 time.sleep(15)
Пример #30
0
    def check_threads(cls, join_timeout):
        threads = threading.enumerate()
        result = True
        if len(threads) > 1:
            print('Several alive threads spotted. Dumping stacks')
            faulthandler.dump_traceback()
            print(
                'Trying to join threads for the second time. Join timeout - %s'
                % str(join_timeout))
            for thread in threads:
                if thread != threading.current_thread():
                    print('Join thread "%s"' % thread.name)
                    thread.join(join_timeout)
                    if thread.is_alive() is True:
                        print('Thread "%s" still alive' % thread.name)
                        result = False

        return result
Пример #31
0
def main(argv):
    del argv
    logging.info('Starting RL training.')

    gin_configs = FLAGS.config if FLAGS.config is not None else []
    gin.enter_interactive_mode()
    gin.parse_config_files_and_bindings(FLAGS.config_file, gin_configs)
    gin.exit_interactive_mode()

    logging.info('Gin config:')
    logging.info(gin_configs)

    train_rl(output_dir=FLAGS.output_dir)

    # TODO(afrozm): This is for debugging.
    logging.info('Dumping stack traces of all stacks.')
    faulthandler.dump_traceback(all_threads=True)

    logging.info('Training is done, should exit.')
Пример #32
0
 def __call__(self, locus, format):
     """We assume locus has been split and is an array with no return character at end"""
     global genotype_values
     try:
         if self.genokey not in format:
             faulthandler.dump_traceback(all_threads=True)
         data_index = format.index(self.genokey)
     except:
         Exit(
             f"Unable to find data key, {self.genokey}, in  format list: {format}"
         )
     genotypes = GenotypeData()
     for genotype in locus[9:]:
         genotype = genotype.split(":")
         if len(genotype) > data_index:
             genotypes.append(genotype[data_index])
         else:
             genotypes.append(self.missing)
     return genotypes
Пример #33
0
    def _event_listener(pipe, rank: int):
        logger.info(f'Starting event listener thread for {rank}')
        while True:
            if pipe.poll(None):

                if pipe.closed:
                    logger.info(f'Pipe closed for process {rank}, stopping event listener thread')
                    return

                event = pipe.recv()
                logger.info(f'Received event {event} on process {rank}')

                if event == MultiProcessTestCase.Event.GET_TRACEBACK:
                    # Return traceback to the parent process.
                    with tempfile.NamedTemporaryFile(mode='r+') as tmp_file:
                        faulthandler.dump_traceback(tmp_file)
                        # Flush buffers and seek to read from the beginning
                        tmp_file.flush()
                        tmp_file.seek(0)
                        pipe.send(tmp_file.read())

                        logger.info(f'Process {rank} sent traceback')
Пример #34
0
def main(argv):
    del argv
    logging.info('Starting RL training.')

    gin_configs = FLAGS.config or []
    gin.parse_config_files_and_bindings(FLAGS.config_file, gin_configs)

    logging.info('Gin cofig:')
    logging.info(gin_configs)

    train_rl(
        output_dir=FLAGS.output_dir,
        train_batch_size=FLAGS.train_batch_size,
        eval_batch_size=FLAGS.eval_batch_size,
        trajectory_dump_dir=(FLAGS.trajectory_dump_dir or None),
    )

    # TODO(afrozm): This is for debugging.
    logging.info('Dumping stack traces of all stacks.')
    faulthandler.dump_traceback(all_threads=True)

    logging.info('Training is done, should exit.')
Пример #35
0
    def _logInfoWidget(self):
        group = QGroupBox()
        group.setTitle(catalog.i18nc("@title:groupbox", "Logs"))
        layout = QVBoxLayout()

        text_area = QTextEdit()
        tmp_file_fd, tmp_file_path = tempfile.mkstemp(prefix = "cura-crash", text = True)
        os.close(tmp_file_fd)
        with open(tmp_file_path, "w") as f:
            faulthandler.dump_traceback(f, all_threads=True)
        with open(tmp_file_path, "r") as f:
            logdata = f.read()

        text_area.setText(logdata)
        text_area.setReadOnly(True)

        layout.addWidget(text_area)
        group.setLayout(layout)

        self.data["log"] = logdata

        return group
Пример #36
0
    def _logInfoWidget(self):
        group = QGroupBox()
        group.setTitle(catalog.i18nc("@title:groupbox", "Logs"))
        layout = QVBoxLayout()

        text_area = QTextEdit()
        tmp_file_fd, tmp_file_path = tempfile.mkstemp(prefix = "cura-crash", text = True)
        os.close(tmp_file_fd)
        with open(tmp_file_path, "w", encoding = "utf-8") as f:
            faulthandler.dump_traceback(f, all_threads=True)
        with open(tmp_file_path, "r", encoding = "utf-8") as f:
            logdata = f.read()

        text_area.setText(logdata)
        text_area.setReadOnly(True)

        layout.addWidget(text_area)
        group.setLayout(layout)

        self.data["log"] = logdata

        return group
Пример #37
0
def get_traces_parallel(statespace, contract):
    faulthandler.dump_traceback(file=sys.stderr, all_threads=True)
    states = []

    for k in statespace.nodes:
        node = statespace.nodes[k]
        for state in node.states:
            if state.get_current_instruction()['opcode'] in ["STOP", "RETURN"]:
                state.contract = contract
                states.append(state)

    #print("Parallel Trace building")
    #signal.signal(signal.SIGSEGV, sig_handler)
    #max_threads = 2
    #results = [None]*len(states)
    #threads = [Thread(target=get_traces_mod, args=(states, results, i, max_threads)) for i in range(max_threads)]
    #for thread in threads:
    #    thread.start()
    #for thread in threads:
    #    thread.join()
    pool = ThreadPool(4)
    results = pool.map(get_trace_for_state, states)
    pool.close()
    pool.join()
    #results = []
    #with ThreadPoolExecutor(max_workers=4) as e:
        #for state in states:
        #    results.append(e.submit(get_trace_for_state, state))
        #results = [result.result() for result in results]
        ##results = e.map(get_trace_for_state, states)

    print("Finished")
    print(results)

    return [trace for trace_type, trace in results if trace is not None and trace_type == "c"], \
           [trace for trace_type, trace in results if trace is not None and trace_type == "t"]
Пример #38
0
 def test_dump_traceback(self):
     import faulthandler, sys
     faulthandler.dump_traceback()
     faulthandler.dump_traceback(file=sys.stderr, all_threads=True)
Пример #39
0
    test_lookup(admin)
    test_admin(admin)

if __name__ == '__main__':
    try:
        import os,threading,time,faulthandler,traceback
        import logging
        #logging.getLogger().setLevel(logging.DEBUG)
        #logging.getLogger('modules.Message').setLevel(logging.DEBUG)
        #logging.getLogger('modules.MUCJabberBot').setLevel(logging.DEBUG)
        #logging.getLogger('sleekxmpp').setLevel(logging.DEBUG)
        #logging.getLogger('sleekxmpp.xmlstream.xmlstream').setLevel(logging.FATAL)
        logging.getLogger('modules.SweetieAdmin').setLevel(logging.DEBUG)
        #logging.getLogger('modules.SweetieLookup').setLevel(logging.DEBUG)
        #logging.getLogger('modules.SweetieSeen').setLevel(logging.DEBUG)

        run_tests()
    except:
        traceback.print_exc()
    finally:
        traceback.print_stack()
        fake_user_disconnects(admin)
        fake_user_disconnects(test_user)
        bot_disconnects(sweetie)
        if threading.active_count() != 1:
            print('waiting for all threads to end')
            time.sleep(5)
            print('threads remaining: {}'.format(threading.active_count()))
            faulthandler.dump_traceback()
        os._exit(1)
Пример #40
0
 def call_from_console(self):
     faulthandler.dump_traceback(file=open('log/tracestack.log', 'w+'), all_threads=True)
     return "[PCP Bot] dumpped state of Proxy"
Пример #41
0
 def call_from_console(self):
     faulthandler.dump_traceback(file=open('log/tracestack.log', 'w+'), all_threads=True)
     return "[Trackback] dumpped state of Proxy"
Пример #42
0
def print_traceback(*args, **kwargs):
    del args, kwargs
    faulthandler.dump_traceback()