def render_GET(self, request): """ .. http:get:: /debug/threads A GET request to this endpoint returns information about running threads. **Example request**: .. sourcecode:: none curl -X GET http://localhost:8085/debug/threads **Example response**: .. sourcecode:: javascript { "threads": [{ "thread_id": 123456, "thread_name": "my_thread", "frames": ["my_frame", ...] }, ...] } """ watchdog = WatchDog() return json.dumps({"threads": watchdog.get_threads_info()})
def __init__(self, *args, **kwargs): super(AbstractServer, self).__init__(*args, **kwargs) self.watchdog = WatchDog() self.selected_socks5_ports = set() # Enable Deferred debugging from twisted.internet.defer import setDebugging setDebugging(True)
def __init__(self, *args, **kwargs): super(AbstractServer, self).__init__(*args, **kwargs) twisted.internet.base.DelayedCall.debug = True self.watchdog = WatchDog() self.selected_socks5_ports = set() # Enable Deferred debugging from twisted.internet.defer import setDebugging setDebugging(True)
class AbstractServer(BaseTestCase): _annotate_counter = 0 def __init__(self, *args, **kwargs): super(AbstractServer, self).__init__(*args, **kwargs) self.watchdog = WatchDog() self.selected_socks5_ports = set() # Enable Deferred debugging from twisted.internet.defer import setDebugging setDebugging(True) @blocking_call_on_reactor_thread @inlineCallbacks def setUp(self, annotate=True): self._logger = logging.getLogger(self.__class__.__name__) self.session_base_dir = mkdtemp(suffix="_tribler_test_session") self.state_dir = os.path.join(self.session_base_dir, u"dot.Tribler") self.dest_dir = os.path.join(self.session_base_dir, u"TriblerDownloads") yield self.checkReactor(phase="setUp") self.setUpCleanup() os.makedirs(self.session_base_dir) self.annotate_dict = {} self.file_server = None self.dscfg_seed = None if annotate: self.annotate(self._testMethodName, start=True) self.watchdog.start() def setUpCleanup(self): # Change to an existing dir before cleaning up. os.chdir(TESTS_DIR) shutil.rmtree(unicode(self.session_base_dir), ignore_errors=True) def setUpFileServer(self, port, path): # Create a local file server, can be used to serve local files. This is preferred over an external network # request in order to get files. resource = File(path) factory = Site(resource) self.file_server = reactor.listenTCP(port, factory) @blocking_call_on_reactor_thread @inlineCallbacks def checkReactor(self, phase, *_): delayed_calls = reactor.getDelayedCalls() if delayed_calls: self._logger.error("The reactor was dirty during %s:", phase) for dc in delayed_calls: self._logger.error("> %s" % dc) dc.cancel() has_network_selectables = False for item in reactor.getReaders() + reactor.getWriters(): if isinstance(item, HTTPChannel) or isinstance(item, Client): has_network_selectables = True break if has_network_selectables: # TODO(Martijn): we wait a while before we continue the check since network selectables # might take some time to cleanup. I'm not sure what's causing this. yield deferLater(reactor, 0.2, lambda: None) # This is the same check as in the _cleanReactor method of Twisted's Trial selectable_strings = [] for sel in reactor.removeAll(): if interfaces.IProcessTransport.providedBy(sel): self._logger.error("Sending kill signal to %s", repr(sel)) sel.signalProcess('KILL') selectable_strings.append(repr(sel)) self.assertFalse(delayed_calls, "The reactor was dirty during %s" % phase) if Session.has_instance(): try: yield Session.get_instance().shutdown() except: pass Session.del_instance() raise RuntimeError("Found a leftover session instance during %s" % phase) self.assertFalse( selectable_strings, "The reactor has leftover readers/writers during %s: %r" % (phase, selectable_strings)) # Check whether we have closed all the sockets open_readers = reactor.getReaders() for reader in open_readers: self.assertNotIsInstance( reader, BasePort, "Listening ports left on the reactor during %s: %s" % (phase, reader)) # Check whether the threadpool is clean tp_items = len(reactor.getThreadPool().working) if tp_items > 0: # Print all stacks to debug this issue self.watchdog.print_all_stacks() self.assertEqual(tp_items, 0, "Still items left in the threadpool") @blocking_call_on_reactor_thread @inlineCallbacks def tearDown(self, annotate=True): self.tearDownCleanup() if annotate: self.annotate(self._testMethodName, start=False) process_unhandled_exceptions() process_unhandled_twisted_exceptions() self.watchdog.join(2) if self.watchdog.is_alive(): self._logger.critical("The WatchDog didn't stop!") self.watchdog.print_all_stacks() raise RuntimeError("Couldn't stop the WatchDog") if self.file_server: yield maybeDeferred(self.file_server.stopListening).addCallback( self.checkReactor) else: yield self.checkReactor("tearDown") def tearDownCleanup(self): self.setUpCleanup() def getStateDir(self, nr=0): state_dir = self.state_dir + (str(nr) if nr else '') if not os.path.exists(state_dir): os.mkdir(state_dir) return state_dir def getDestDir(self, nr=0): dest_dir = self.dest_dir + (str(nr) if nr else '') if not os.path.exists(dest_dir): os.mkdir(dest_dir) return dest_dir def annotate(self, annotation, start=True, destdir=OUTPUT_DIR): if not os.path.exists(destdir): os.makedirs(os.path.abspath(destdir)) if start: self.annotate_dict[annotation] = time.time() else: filename = os.path.join(destdir, u"annotations.txt") if os.path.exists(filename): f = open(filename, 'a') else: f = open(filename, 'w') print >> f, "annotation start end" AbstractServer._annotate_counter += 1 _annotation = re.sub('[^a-zA-Z0-9_]', '_', annotation) _annotation = u"%d_" % AbstractServer._annotate_counter + _annotation print >> f, _annotation, self.annotate_dict[annotation], time.time( ) f.close() def get_bucket_range_port(self): """ Return the port range of the test bucket assigned. """ min_base_port = 1000 if not os.environ.get("TEST_BUCKET", None) \ else int(os.environ['TEST_BUCKET']) * 2000 + 2000 return min_base_port, min_base_port + 2000 def get_socks5_ports(self): """ Return five random, free socks5 ports. This is here to make sure that tests in different buckets get assigned different SOCKS5 listen ports. Also, make sure that we have no duplicates in selected socks5 ports. """ socks5_ports = [] for _ in xrange(0, 5): min_base_port, max_base_port = self.get_bucket_range_port() selected_port = get_random_port(min_port=min_base_port, max_port=max_base_port) while selected_port in self.selected_socks5_ports: selected_port = get_random_port(min_port=min_base_port, max_port=max_base_port) self.selected_socks5_ports.add(selected_port) socks5_ports.append(selected_port) return socks5_ports
def __init__(self, *args, **kwargs): super(AbstractServer, self).__init__(*args, **kwargs) self.watchdog = WatchDog()
class AbstractServer(BaseTestCase): _annotate_counter = 0 def __init__(self, *args, **kwargs): super(AbstractServer, self).__init__(*args, **kwargs) self.watchdog = WatchDog() def setUp(self, annotate=True): self._logger = logging.getLogger(self.__class__.__name__) self.session_base_dir = mkdtemp(suffix="_tribler_test_session") self.state_dir = os.path.join(self.session_base_dir, u"dot.Tribler") self.dest_dir = os.path.join(self.session_base_dir, u"TriblerDownloads") defaults.sessdefaults['general']['state_dir'] = self.state_dir defaults.dldefaults["downloadconfig"]["saveas"] = self.dest_dir self.setUpCleanup() os.makedirs(self.session_base_dir) self.annotate_dict = {} if annotate: self.annotate(self._testMethodName, start=True) self.watchdog.start() def setUpCleanup(self): # Change to an existing dir before cleaning up. os.chdir(TESTS_DIR) shutil.rmtree(unicode(self.session_base_dir), ignore_errors=True) def tearDown(self, annotate=True): self.tearDownCleanup() if annotate: self.annotate(self._testMethodName, start=False) delayed_calls = reactor.getDelayedCalls() if delayed_calls: self._logger.error("The reactor was dirty:") for dc in delayed_calls: self._logger.error("> %s" % dc) self.assertFalse(delayed_calls, "The reactor was dirty when tearing down the test") self.assertFalse(Session.has_instance(), 'A session instance is still present when tearing down the test') process_unhandled_exceptions() self.watchdog.join() def tearDownCleanup(self): self.setUpCleanup() def getStateDir(self, nr=0): state_dir = self.state_dir + (str(nr) if nr else '') if not os.path.exists(state_dir): os.mkdir(state_dir) return state_dir def getDestDir(self, nr=0): dest_dir = self.dest_dir + (str(nr) if nr else '') if not os.path.exists(dest_dir): os.mkdir(dest_dir) return dest_dir def annotate(self, annotation, start=True, destdir=OUTPUT_DIR): if not os.path.exists(destdir): os.makedirs(os.path.abspath(destdir)) if start: self.annotate_dict[annotation] = time.time() else: filename = os.path.join(destdir, u"annotations.txt") if os.path.exists(filename): f = open(filename, 'a') else: f = open(filename, 'w') print >> f, "annotation start end" AbstractServer._annotate_counter += 1 _annotation = re.sub('[^a-zA-Z0-9_]', '_', annotation) _annotation = u"%d_" % AbstractServer._annotate_counter + _annotation print >> f, _annotation, self.annotate_dict[annotation], time.time() f.close()
def setUp(self): self._test_event = Event() self._test_event.set() self._printe_event = Event() self.watchdog = WatchDog()
class TriblerCoreTestWatchDog(TriblerCoreTest): def setUp(self): self._test_event = Event() self._test_event.set() self._printe_event = Event() self.watchdog = WatchDog() def tearDown(self): if self.watchdog.is_alive(): self.watchdog.join() self._test_event = None self._printe_event = None self.watchdog = None def _dummy_printe(self, _): self._printe_event.set() def test_watchdog_event(self): self.watchdog.printe = self._dummy_printe self.watchdog.register_event(self._test_event, "42-event", 0.2) # The event hasn't been set prematurelly self.assertFalse(self._printe_event.is_set()) self.watchdog.start() # Something has been printed self.assertTrue(self._printe_event.wait(1)) # The failed watchdog has been removed from the watch list with self.watchdog._synchronized_lock: self.assertNotIn("42-event", self.watchdog._registered_events.keys()) def test_watchdog_event_debug(self): self.watchdog.printe = self._dummy_printe self.watchdog.debug = True self.watchdog.register_event(self._test_event, "42-event", 0.2) self.watchdog.start() # Something has been printed (the "watchdog is OK" message) self.assertTrue(self._printe_event.wait(1)) def test_watchdog_print_all_stacks(self): self.watchdog.printe = self._dummy_printe # print_all_stacks() works on its own too self.assertFalse(self._printe_event.is_set()) self.watchdog.print_all_stacks() self.assertTrue(self._printe_event.is_set()) def test_watchdog_deadlock(self): self.watchdog.printe = self._dummy_printe self.watchdog.max_same_stack_time = 0 self.watchdog.check_for_deadlocks = True # The event hasn't triggered before starting the watchdog self.assertFalse(self._printe_event.is_set()) self.watchdog.start() # The even gets set when a thread has the same stack for more than 0 seconds. self.assertTrue(self._printe_event.wait(1)) def test_watchdog_thread_name(self): """ Test thread names outputted by watchdog """ self.assertEquals("Unknown", self.watchdog.get_thread_name(-1))
class AbstractServer(BaseTestCase): _annotate_counter = 0 def __init__(self, *args, **kwargs): super(AbstractServer, self).__init__(*args, **kwargs) twisted.internet.base.DelayedCall.debug = True self.watchdog = WatchDog() self.selected_socks5_ports = set() # Enable Deferred debugging from twisted.internet.defer import setDebugging setDebugging(True) @inlineCallbacks def setUp(self): self._logger = logging.getLogger(self.__class__.__name__) self.session_base_dir = self.temporary_directory(suffix="_tribler_test_session_") self.state_dir = os.path.join(self.session_base_dir, u"dot.Tribler") self.dest_dir = os.path.join(self.session_base_dir, u"TriblerDownloads") # Wait until the reactor has started reactor_deferred = Deferred() reactor.callWhenRunning(reactor_deferred.callback, None) self.annotate_dict = {} self.file_server = None self.dscfg_seed = None self.annotate(self._testMethodName, start=True) self.watchdog.start() yield reactor_deferred def setUpFileServer(self, port, path): # Create a local file server, can be used to serve local files. This is preferred over an external network # request in order to get files. resource = File(path) factory = Site(resource) self.file_server = reactor.listenTCP(port, factory) @inlineCallbacks def checkReactor(self, phase, *_): delayed_calls = reactor.getDelayedCalls() if delayed_calls: self._logger.error("The reactor was dirty during %s:", phase) for dc in delayed_calls: self._logger.error("> %s", dc) dc.cancel() from pony.orm.core import local if local.db_context_counter > 0: self._logger.error("Leftover pony db sessions found!") from pony.orm import db_session for _ in range(local.db_context_counter): db_session.__exit__() has_network_selectables = False for item in reactor.getReaders() + reactor.getWriters(): if isinstance(item, HTTPChannel) or isinstance(item, Client): has_network_selectables = True break if has_network_selectables: # TODO(Martijn): we wait a while before we continue the check since network selectables # might take some time to cleanup. I'm not sure what's causing this. yield deferLater(reactor, 0.2, lambda: None) # This is the same check as in the _cleanReactor method of Twisted's Trial selectable_strings = [] for sel in reactor.removeAll(): if interfaces.IProcessTransport.providedBy(sel): self._logger.error("Sending kill signal to %s", repr(sel)) sel.signalProcess('KILL') selectable_strings.append(repr(sel)) self.assertFalse(selectable_strings, "The reactor has leftover readers/writers during %s: %r" % (phase, selectable_strings)) # Check whether we have closed all the sockets open_readers = reactor.getReaders() for reader in open_readers: self.assertNotIsInstance(reader, BasePort) # Check whether the threadpool is clean tp_items = len(reactor.getThreadPool().working) if tp_items > 0: # Print all stacks to debug this issue self.watchdog.print_all_stacks() self.assertEqual(tp_items, 0, "Still items left in the threadpool") @inlineCallbacks def tearDown(self): self.annotate(self._testMethodName, start=False) process_unhandled_exceptions() process_unhandled_twisted_exceptions() self.watchdog.join(2) if self.watchdog.is_alive(): self._logger.critical("The WatchDog didn't stop!") self.watchdog.print_all_stacks() raise RuntimeError("Couldn't stop the WatchDog") if self.file_server: yield maybeDeferred(self.file_server.stopListening).addCallback(self.checkReactor) else: yield self.checkReactor("tearDown") super(AbstractServer, self).tearDown() def getStateDir(self, nr=0): state_dir = self.state_dir + (str(nr) if nr else '') if not os.path.exists(state_dir): os.mkdir(state_dir) return state_dir def getDestDir(self, nr=0): dest_dir = self.dest_dir + (str(nr) if nr else '') if not os.path.exists(dest_dir): os.mkdir(dest_dir) return dest_dir def annotate(self, annotation, start=True, destdir=OUTPUT_DIR): if not os.path.exists(destdir): os.makedirs(os.path.abspath(destdir)) if start: self.annotate_dict[annotation] = time.time() else: filename = os.path.join(destdir, u"annotations.txt") if os.path.exists(filename): f = open(filename, 'a') else: f = open(filename, 'w') f.write("annotation start end\n") AbstractServer._annotate_counter += 1 _annotation = re.sub('[^a-zA-Z0-9_]', '_', annotation) _annotation = u"%d_" % AbstractServer._annotate_counter + _annotation f.write("%s %s %s\n" % (_annotation, self.annotate_dict[annotation], time.time())) f.close() def get_bucket_range_port(self): """ Return the port range of the test bucket assigned. """ min_base_port = 1000 if not os.environ.get("TEST_BUCKET", None) \ else int(os.environ['TEST_BUCKET']) * 2000 + 2000 return min_base_port, min_base_port + 2000 def get_socks5_ports(self): """ Return five random, free socks5 ports. This is here to make sure that tests in different buckets get assigned different SOCKS5 listen ports. Also, make sure that we have no duplicates in selected socks5 ports. """ socks5_ports = [] for _ in xrange(0, 5): min_base_port, max_base_port = self.get_bucket_range_port() selected_port = get_random_port(min_port=min_base_port, max_port=max_base_port) while selected_port in self.selected_socks5_ports: selected_port = get_random_port(min_port=min_base_port, max_port=max_base_port) self.selected_socks5_ports.add(selected_port) socks5_ports.append(selected_port) return socks5_ports