def pool_exec(self, jobs, timeout): if not jobs: return [] thread_count = 0 if settings.USE_WORKER_POOL: thread_count = min(len(self.finders), settings.POOL_MAX_WORKERS) return pool_exec(get_pool('finders', thread_count), jobs, timeout)
def test_timeout_sync(self): p = pool.get_pool(thread_count=0) jobs = [pool.Job(lambda v: time.sleep(1) and v, 'job', i) for i in range(1, 5)] with self.assertRaises(pool.PoolTimeoutError): results = pool.pool_exec(p, jobs, 1) list(results)
def test_timeout(self): p = pool.get_pool() jobs = [pool.Job(lambda v: time.sleep(2) and v, i) for i in range(1, 5)] with self.assertRaises(pool.PoolTimeoutError): results = pool.pool_exec(p, jobs, 1) list(results)
def test_named_no_worker_pool(self): default = pool.get_pool(thread_count=0) p = pool.get_pool(name='test', thread_count=0) self.assertIsNone(p) self.assertIsNone(default) results = pool.pool_exec(p, [pool.Job(lambda v: v, 'job', 'a')], 1) self.assertEqual(list(results)[0].result, 'a')
def test_exception(self): p = pool.get_pool() err = Exception('this is a test') def testfunc(): raise err results = pool.pool_exec(p, [pool.Job(testfunc, 'job')], 1) self.assertEqual(list(results)[0].exception, err)
def fetch(self, patterns, startTime, endTime, now, requestContext): # deduplicate patterns patterns = list(set(patterns)) if not patterns: return [] log.debug( 'graphite.storage.Store.fetch :: Starting fetch on all backends') jobs = [ Job(finder.fetch, patterns, startTime, endTime, now=now, requestContext=requestContext) for finder in self.get_finders(requestContext.get('localOnly')) ] results = [] done = 0 errors = 0 # Start fetches start = time.time() try: for job in pool_exec(get_pool(), jobs, settings.REMOTE_FETCH_TIMEOUT): done += 1 if job.exception: errors += 1 log.debug("Fetch for %s failed after %fs: %s" % (str(patterns), time.time() - start, str(job.exception))) continue log.debug("Got a fetch result for %s after %fs" % (str(patterns), time.time() - start)) results.extend(job.result) except PoolTimeoutError: log.debug("Timed out in fetch after %fs" % (time.time() - start)) if errors == done: raise Exception('All fetches failed for %s' % (str(patterns))) log.debug("Got all fetch results for %s in %fs" % (str(patterns), time.time() - start)) return results
def test_named(self): default = pool.get_pool() p = pool.get_pool(name='test') self.assertIn('test', pool._pools) self.assertNotEqual(default, p) results = pool.pool_exec(p, [pool.Job(lambda v: v, 'job', 'a')], 1) self.assertEqual(list(results)[0].result, 'a') pool.stop_pool('test') self.assertNotIn('test', pool._pools)
def _find(self, query): jobs = [ Job(finder.find_nodes, query) for finder in self.get_finders(query.local) ] # Group matching nodes by their path nodes_by_path = defaultdict(list) done = 0 errors = 0 # Start finds start = time.time() try: for job in pool_exec(get_pool(), jobs, settings.REMOTE_FIND_TIMEOUT): done += 1 if job.exception: errors += 1 log.debug( "Find for %s failed after %fs: %s" % (str(query), time.time() - start, str(job.exception))) continue log.debug("Got a find result for %s after %fs" % (str(query), time.time() - start)) for node in job.result or []: nodes_by_path[node.path].append(node) except PoolTimeoutError: log.debug("Timed out in find after %fs" % (time.time() - start)) if errors == done: raise Exception('All finds failed for %s' % (str(query))) log.debug("Got all find results for %s in %fs" % (str(query), time.time() - start)) return self._list_nodes(query, nodes_by_path)
def test_basic(self): p = pool.get_pool() results = pool.pool_exec(p, [], 1) self.assertEqual(list(results), [])