def __init__(self, session): RESTEndpoint.__init__(self, session) TaskManager.__init__(self) self.events_responses = [] self.app.on_shutdown.append(self.on_shutdown) # We need to know that Tribler completed its startup sequence self.tribler_started = False self.session.notifier.add_observer(NTFY.TRIBLER_STARTED, self._tribler_started) for event_type, event_lambda in reactions_dict.items(): self.session.notifier.add_observer(event_type, lambda *args, el=event_lambda, et=event_type: self.write_data({"type": et.value, "event": el(*args)})) def on_circuit_removed(circuit, *args): if isinstance(circuit, Circuit): event = { "circuit_id": circuit.circuit_id, "bytes_up": circuit.bytes_up, "bytes_down": circuit.bytes_down, "uptime": time.time() - circuit.creation_time } self.write_data({"type": NTFY.TUNNEL_REMOVE.value, "event": event}) # Tribler tunnel circuit has been removed self.session.notifier.add_observer(NTFY.TUNNEL_REMOVE, on_circuit_removed)
def setUp(self): super(TestTaskManager, self).setUp() self.dispersy_objects = [] self.tm = TaskManager() self.tm._reactor = Clock() self.counter = 0
def __init__(self, notifier: Notifier, public_key: str = None): RESTEndpoint.__init__(self) TaskManager.__init__(self) self.events_responses: List[RESTStreamResponse] = [] self.app.on_shutdown.append(self.on_shutdown) self.notifier = None self.undelivered_error: Optional[dict] = None self.connect_notifier(notifier) self.public_key = public_key
def __init__(self, working_dir, config_path): super().__init__(Service.create_config(working_dir, config_path), None, working_dir, config_path) TaskManager.__init__(self) self.swarm = None self.start = time.time() self.results = [] self.register_task('monitor_swarm', self.monitor_swarm, interval=5) self.register_task('_graceful_shutdown', self._graceful_shutdown, delay=EXPERIMENT_RUN_TIME)
def __init__(self, lt_session): """ Initialize the DHT health manager. :param lt_session: The session used to perform health lookups. """ TaskManager.__init__(self) self.lookup_futures = {} # Map from binary infohash to future self.bf_seeders = { } # Map from infohash to (final) seeders bloomfilter self.bf_peers = {} # Map from infohash to (final) peers bloomfilter self.lt_session = lt_session
def __init__(self, working_dir, config_path): super().__init__(Service.create_config(working_dir, config_path), working_dir=working_dir, components=[ Ipv8Component(), KeyComponent(), RESTComponent(), TunnelsComponent() ]) TaskManager.__init__(self) self.results = [] self.output_file = 'speed_test_exit.txt'
def __init__(self, session, history_size=CORE_RESOURCE_HISTORY_SIZE): TaskManager.__init__(self) ResourceMonitor.__init__(self, history_size=history_size) self.session = session self.disk_usage_data = deque(maxlen=history_size) self.state_dir = session.config.get_state_dir() self.resource_log_file = session.config.get_log_dir( ) / DEFAULT_RESOURCE_FILENAME self.resource_log_enabled = session.config.get_resource_monitor_enabled( ) # Setup yappi profiler self.profiler = YappiProfiler(self.session)
async def run(self): await super().run() config = self.session.config self._task_manager = TaskManager() port = config.ipv8.port address = config.ipv8.address self.logger.info('Starting ipv8') self.logger.info(f'Port: {port}. Address: {address}') ipv8_config_builder = ( ConfigBuilder().set_port(port).set_address(address).clear_overlays( ).clear_keys() # We load the keys ourselves .set_working_directory(str(config.state_dir)).set_walker_interval( config.ipv8.walk_interval)) if config.gui_test_mode: endpoint = DispatcherEndpoint([]) else: # IPv8 includes IPv6 support by default. # We only load IPv4 to not kill all Tribler overlays (currently, it would instantly crash all users). # If you want to test IPv6 in Tribler you can set ``endpoint = None`` here. endpoint = DispatcherEndpoint(["UDPIPv4"], UDPIPv4={ 'port': port, 'ip': address }) ipv8 = IPv8(ipv8_config_builder.finalize(), enable_statistics=config.ipv8.statistics and not config.gui_test_mode, endpoint_override=endpoint) await ipv8.start() self.ipv8 = ipv8 key_component = await self.require_component(KeyComponent) self.peer = Peer(key_component.primary_key) if config.ipv8.statistics and not config.gui_test_mode: # Enable gathering IPv8 statistics for overlay in ipv8.overlays: ipv8.endpoint.enable_community_statistics( overlay.get_prefix(), True) if config.ipv8.walk_scaling_enabled and not config.gui_test_mode: from tribler_core.components.ipv8.ipv8_health_monitor import IPv8Monitor IPv8Monitor(ipv8, config.ipv8.walk_interval, config.ipv8.walk_scaling_upper_limit).start( self._task_manager) if config.dht.enabled: self._init_dht_discovery_community() if not config.gui_test_mode: if config.discovery_community.enabled: self._init_peer_discovery_community() else: if config.dht.enabled: self.dht_discovery_community.routing_tables[ UDPv4Address] = RoutingTable('\x00' * 20)
def __init__(self, state_dir, log_dir, config: ResourceMonitorSettings, notifier: Notifier, history_size=CORE_RESOURCE_HISTORY_SIZE): TaskManager.__init__(self) ResourceMonitor.__init__(self, history_size=history_size) self.config = config self.notifier = notifier self.disk_usage_data = deque(maxlen=history_size) self.state_dir = state_dir self.resource_log_file = log_dir / DEFAULT_RESOURCE_FILENAME self.resource_log_enabled = config.enabled # Setup yappi profiler self.profiler = YappiProfiler(log_dir)
async def fixture_task_manager(): task_manager = TaskManager() yield task_manager await task_manager.shutdown_task_manager()
def __init__(self, working_dir, config_path): super().__init__(Service.create_config(working_dir, config_path), None, working_dir, config_path) TaskManager.__init__(self) self.results = [] self.output_file = 'speed_test_exit.txt'
class TestTaskManager(TestBase): def setUp(self): super(TestTaskManager, self).setUp() self.dispersy_objects = [] self.tm = TaskManager() self.tm._reactor = Clock() self.counter = 0 def tearDown(self): self.tm.cancel_all_pending_tasks() @twisted_test @untwisted_test def test_call_later(self): self.tm.register_task("test", reactor.callLater(10, lambda: None)) self.assertTrue(self.tm.is_pending_task_active("test")) @twisted_test @untwisted_test def test_call_later_and_cancel(self): self.tm.register_task("test", reactor.callLater(10, lambda: None)) self.tm.cancel_pending_task("test") self.assertFalse(self.tm.is_pending_task_active("test")) @twisted_test @untwisted_test def test_call_later_and_replace(self): task1 = self.tm.register_task("test", reactor.callLater(10, lambda: None)) self.tm.replace_task("test", reactor.callLater(10, lambda: None)) self.assertTrue(self.tm.is_pending_task_active("test")) self.assertFalse(task1.active()) @twisted_test @untwisted_test def test_looping_call(self): self.tm.register_task("test", LoopingCall(lambda: None)).start(10, now=True) self.assertTrue(self.tm.is_pending_task_active("test")) @twisted_test @untwisted_test def test_looping_call_and_cancel(self): self.tm.register_task("test", LoopingCall(lambda: None)).start(10, now=True) self.tm.cancel_pending_task("test") self.assertFalse(self.tm.is_pending_task_active("test")) @twisted_test @untwisted_test def test_delayed_looping_call_requires_interval(self): self.assertRaises(ValueError, self.tm.register_task, "test", LoopingCall(lambda: None), delay=1) @twisted_test @untwisted_test def test_delayed_deferred_requires_value(self): self.assertRaises(ValueError, self.tm.register_task, "test", deferLater(reactor, 0.0, lambda: None), delay=1) @twisted_test @untwisted_test def test_delayed_looping_call_requires_LoopingCall_or_Deferred(self): self.assertRaises(ValueError, self.tm.register_task, "test not Deferred nor LoopingCall", self.tm._reactor.callLater(0, lambda: None), delay=1) @twisted_test @untwisted_test def test_delayed_looping_call_register_and_cancel_pre_delay(self): self.assertFalse(self.tm.is_pending_task_active("test")) self.tm.register_task("test", LoopingCall(lambda: None), delay=1, interval=1) self.assertTrue(self.tm.is_pending_task_active("test")) self.tm.cancel_pending_task("test") self.assertFalse(self.tm.is_pending_task_active("test")) @twisted_test @untwisted_test def test_delayed_looping_call_register_wait_and_cancel(self): self.assertFalse(self.tm.is_pending_task_active("test")) lc = LoopingCall(self.count) lc.clock = self.tm._reactor self.tm.register_task("test", lc, delay=1, interval=1) self.assertTrue(self.tm.is_pending_task_active("test")) # After one second, the counter has increased by one and the task is still active. self.tm._reactor.advance(1) self.assertEquals(1, self.counter) self.assertTrue(self.tm.is_pending_task_active("test")) # After one more second, the counter should be 2 self.tm._reactor.advance(1) self.assertEquals(2, self.counter) # After canceling the task the counter should stop increasing self.tm.cancel_pending_task("test") self.assertFalse(self.tm.is_pending_task_active("test")) self.tm._reactor.advance(10) self.assertEquals(2, self.counter) @twisted_test @untwisted_test def test_delayed_deferred(self): self.assertFalse(self.tm.is_pending_task_active("test")) d = Deferred() d.addCallback(self.set_counter) self.tm.register_task("test", d, delay=1, value=42) self.assertTrue(self.tm.is_pending_task_active("test")) # After one second, the deferred has fired self.tm._reactor.advance(1) self.assertEquals(42, self.counter) self.assertFalse(self.tm.is_pending_task_active("test")) def count(self): self.counter += 1 def set_counter(self, value): self.counter = value