def test_multiple_append_uniq_group(self): def multi_append(): for i in xrange(InfoSet.MAX_INFO_INSTANCES * 2): vuln = MockVuln() kb.append_uniq_group('a', 'b', vuln, group_klass=MockInfoSetTrue) info_set_list = kb.get('a', 'b') self.assertEqual(len(info_set_list), 1) info_set = info_set_list[0] self.assertEqual(len(info_set.infos), InfoSet.MAX_INFO_INSTANCES) return True pool = Pool(2) r1 = pool.apply_async(multi_append) r2 = pool.apply_async(multi_append) r3 = pool.apply_async(multi_append) self.assertTrue(r1.get()) self.assertTrue(r2.get()) self.assertTrue(r3.get()) pool.terminate() pool.join()
def test_increase_number_of_workers(self): worker_pool = Pool(processes=2, worker_names='WorkerThread', maxtasksperchild=3) self.assertEqual(worker_pool.get_worker_count(), 2) def noop(): return 1 + 2 for _ in xrange(12): result = worker_pool.apply_async(func=noop) self.assertEqual(result.get(), 3) self.assertEqual(worker_pool.get_worker_count(), 2) worker_pool.set_worker_count(4) # It takes some time... self.assertEqual(worker_pool.get_worker_count(), 2) for _ in xrange(12): result = worker_pool.apply_async(func=noop) self.assertEqual(result.get(), 3) self.assertEqual(worker_pool.get_worker_count(), 4) worker_pool.terminate() worker_pool.join()
def test_multiple_append_uniq_group(self): def multi_append(): for i in xrange(InfoSet.MAX_INFO_INSTANCES * 2): vuln = MockVuln() kb.append_uniq_group('a', 'b', vuln, group_klass=MockInfoSetTrue) info_set_list = kb.get('a', 'b') self.assertEqual(len(info_set_list), 1) info_set = info_set_list[0] self.assertEqual(len(info_set.infos), InfoSet.MAX_INFO_INSTANCES) return True pool = Pool(2) r1 = pool.apply_async(multi_append) r2 = pool.apply_async(multi_append) r3 = pool.apply_async(multi_append) self.assertTrue(r1.get()) self.assertTrue(r2.get()) self.assertTrue(r3.get()) pool.terminate() pool.join()
def test_inspect_data_to_log(self): worker_pool = Pool(processes=1, worker_names='WorkerThread') tso = ThreadStateObserver() messages = [] def save_messages(message): messages.append(message) tso.write_to_log = save_messages def sleep(sleep_time, **kwargs): time.sleep(sleep_time) args = (2, ) kwds = {'x': 2} worker_pool.apply_async(func=sleep, args=args, kwds=kwds) # Let the worker get the task time.sleep(0.3) worker_states = worker_pool.inspect_threads() tso.inspect_data_to_log(worker_pool, worker_states) self.assertEqual(len(messages), 2, messages) message_re = ( 'Worker with ID .*? has been running job .*? for .*? seconds.' ' The job is: .*?(.*?, kwargs=.*?)') self.assertRegexpMatches(messages[0], message_re) self.assertEqual(messages[1], '0% of WorkerThread workers are idle.')
def test_get_pool_queue_sizes(self): worker_pool = Pool(processes=4, worker_names='WorkerThread', maxtasksperchild=3) for _ in xrange(12): worker_pool.apply_async(func=delay) pool_sizes = worker_pool.get_pool_queue_sizes() self.assertGreater(pool_sizes['inqueue_size'], 0) self.assertEqual(pool_sizes['outqueue_size'], 0) worker_pool.terminate_join()
def test_worker_stats_not_idle(self): worker_pool = Pool(processes=1, worker_names='WorkerThread') def sleep(sleep_time, **kwargs): time.sleep(sleep_time) args = (2,) kwds = {'x': 2} worker_pool.apply_async(func=sleep, args=args, kwds=kwds) # Let the worker get the task time.sleep(0.3) # Got it? self.assertFalse(worker_pool._pool[0].worker.is_idle()) self.assertEqual(worker_pool._pool[0].worker.func_name, 'sleep') self.assertEqual(worker_pool._pool[0].worker.args, args) self.assertEqual(worker_pool._pool[0].worker.kwargs, kwds) self.assertGreater(worker_pool._pool[0].worker.job, 1)
def test_terminate_join_after_tasks(self): worker_pool = Pool(processes=4, worker_names='WorkerThread', maxtasksperchild=3) for _ in xrange(12): result = worker_pool.apply_async(func=noop) self.assertEqual(result.get(), 3) worker_pool.terminate_join()
def test_inspect_threads(self): worker_pool = Pool(processes=1, worker_names='WorkerThread') def sleep(sleep_time, **kwargs): time.sleep(sleep_time) args = (2, ) kwds = {'x': 2} worker_pool.apply_async(func=sleep, args=args, kwds=kwds) # Let the worker get the task time.sleep(0.3) worker_states = worker_pool.inspect_threads() self.assertEqual(len(worker_states), 1) worker_state = worker_states[0] self.assertEqual(worker_state['func_name'], 'sleep') self.assertEqual(worker_state['args'], args) self.assertEqual(worker_state['kwargs'], kwds) self.assertEqual(worker_state['idle'], False)
def test_inspect_threads(self): worker_pool = Pool(processes=1, worker_names='WorkerThread') def sleep(sleep_time, **kwargs): time.sleep(sleep_time) args = (2,) kwds = {'x': 2} worker_pool.apply_async(func=sleep, args=args, kwds=kwds) # Let the worker get the task time.sleep(0.3) worker_states = worker_pool.inspect_threads() self.assertEqual(len(worker_states), 1) worker_state = worker_states[0] self.assertEqual(worker_state['func_name'], 'sleep') self.assertEqual(worker_state['args'], args) self.assertEqual(worker_state['kwargs'], kwds) self.assertEqual(worker_state['idle'], False)
def get_fingerprint(url, threads): pool = Pool(worker_names='HMap', maxtasksperchild=2, processes=threads, max_queued_tasks=5) tests = { basic_get, basic_options, unknown_method, unauthorized_activity, nonexistant_object, malformed_method_line, long_url_ranges, long_default_ranges, many_header_ranges, large_header_ranges, unavailable_accept, fake_content_length } for test in tests: pool.apply_async(func=test, args=(url, )) pool.close() pool.join() pool.terminate() fingerprint['SYNTACTIC']['HEADER_ORDER'] = winnow_ordered_list( fingerprint['SYNTACTIC']['HEADER_ORDER']) return fingerprint
def test_output_pool_size(self): worker_pool = Pool(processes=4, worker_names='WorkerThread', maxtasksperchild=3) results = [] for _ in xrange(12): result = worker_pool.apply_async(func=delay) results.append(result) pool_sizes = worker_pool.get_pool_queue_sizes() while pool_sizes['inqueue_size']: pool_sizes = worker_pool.get_pool_queue_sizes() # Give the result handler task inside the pool set the results on the # result instances stored in the results lists time.sleep(1) # There should be no pending tasks in the output queue self.assertEqual(pool_sizes['outqueue_size'], 0) worker_pool.terminate_join()
def test_max_queued_tasks(self): worker_pool = Pool(processes=1, max_queued_tasks=2) # These tasks should be queued very fast worker_pool.apply_async(func=time.sleep, args=(2, )) worker_pool.apply_async(func=time.sleep, args=(2, )) worker_pool.apply_async(func=time.sleep, args=(2, )) worker_pool.apply_async(func=time.sleep, args=(2, )) # Now the pool is full and we need to wait in the main # thread to get the task queued start = time.time() worker_pool.apply_async(func=time.sleep, args=(2, )) spent = time.time() - start worker_pool.close() worker_pool.join() self.assertLess(spent, 2.1) self.assertGreater(spent, 1.9)
def test_max_queued_tasks(self): worker_pool = Pool(processes=1, max_queued_tasks=2) # These tasks should be queued very fast worker_pool.apply_async(func=time.sleep, args=(2,)) worker_pool.apply_async(func=time.sleep, args=(2,)) worker_pool.apply_async(func=time.sleep, args=(2,)) worker_pool.apply_async(func=time.sleep, args=(2,)) # Now the pool is full and we need to wait in the main # thread to get the task queued start = time.time() worker_pool.apply_async(func=time.sleep, args=(2,)) spent = time.time() - start worker_pool.close() worker_pool.join() self.assertLess(spent, 2.1) self.assertGreater(spent, 1.9)