class test_failed_AsyncResult(test_GroupResult): def setup(self): self.size = 11 subtasks = make_mock_group(10) failed = mock_task('ts11', states.FAILURE, KeyError('Baz')) save_result(failed) failed_res = AsyncResult(failed['id']) self.ts = GroupResult(uuid(), subtasks + [failed_res]) def test_completed_count(self): self.assertEqual(self.ts.completed_count(), len(self.ts) - 1) def test___iter__(self): it = iter(self.ts) def consume(): return list(it) with self.assertRaises(KeyError): consume() def test_join(self): with self.assertRaises(KeyError): self.ts.join() def test_successful(self): self.assertFalse(self.ts.successful()) def test_failed(self): self.assertTrue(self.ts.failed())
def test_join_timeout(self): ar = MockAsyncResultSuccess(uuid()) ar2 = MockAsyncResultSuccess(uuid()) ar3 = AsyncResult(uuid()) ts = GroupResult(uuid(), [ar, ar2, ar3]) with self.assertRaises(TimeoutError): ts.join(timeout=0.0000001)
def join_map_jobs(task_ids): """Test reduce function that manually joins all mapped jobs.""" print(("task_ids: {}".format(json.dumps(task_ids, indent=2)))) res = GroupResult(id=uuid.uuid4().bytes, results=[ AsyncResult(id[0]) for id in task_ids]) while True: ready = res.ready() if ready: break time.sleep(5) results = [] for r in res.join(timeout=10.): # deduped job? if isinstance(r, (list, tuple)): # build resolvable result task_id = r[0] results.append({'uuid': task_id, 'job_id': task_id, 'payload_id': task_id, 'status': 'job-deduped'}) else: results.append(r) args = [result['payload_id'] for result in results] return args
class test_pending_Group(AppCase): def setup(self): self.ts = GroupResult(uuid(), [AsyncResult(uuid()), AsyncResult(uuid())]) def test_completed_count(self): self.assertEqual(self.ts.completed_count(), 0) def test_ready(self): self.assertFalse(self.ts.ready()) def test_waiting(self): self.assertTrue(self.ts.waiting()) def x_join(self): with self.assertRaises(TimeoutError): self.ts.join(timeout=0.001) @skip_if_quick def x_join_longer(self): with self.assertRaises(TimeoutError): self.ts.join(timeout=1)
class test_pending_Group(AppCase): def setup(self): self.ts = GroupResult( uuid(), [AsyncResult(uuid()), AsyncResult(uuid())]) def test_completed_count(self): self.assertEqual(self.ts.completed_count(), 0) def test_ready(self): self.assertFalse(self.ts.ready()) def test_waiting(self): self.assertTrue(self.ts.waiting()) def x_join(self): with self.assertRaises(TimeoutError): self.ts.join(timeout=0.001) @skip_if_quick def x_join_longer(self): with self.assertRaises(TimeoutError): self.ts.join(timeout=1)
class test_GroupResult(AppCase): def setup(self): self.size = 10 self.ts = GroupResult(uuid(), make_mock_group(self.size)) def test_len(self): self.assertEqual(len(self.ts), self.size) def test_eq_other(self): self.assertFalse(self.ts == 1) def test_reduce(self): self.assertTrue(loads(dumps(self.ts))) def test_iterate_raises(self): ar = MockAsyncResultFailure(uuid()) ts = GroupResult(uuid(), [ar]) it = iter(ts) with self.assertRaises(KeyError): it.next() def test_forget(self): subs = [MockAsyncResultSuccess(uuid()), MockAsyncResultSuccess(uuid())] ts = GroupResult(uuid(), subs) ts.forget() for sub in subs: self.assertTrue(sub.forgotten) def test_getitem(self): subs = [MockAsyncResultSuccess(uuid()), MockAsyncResultSuccess(uuid())] ts = GroupResult(uuid(), subs) self.assertIs(ts[0], subs[0]) def test_save_restore(self): subs = [MockAsyncResultSuccess(uuid()), MockAsyncResultSuccess(uuid())] ts = GroupResult(uuid(), subs) ts.save() with self.assertRaises(AttributeError): ts.save(backend=object()) self.assertEqual(GroupResult.restore(ts.id).subtasks, ts.subtasks) ts.delete() self.assertIsNone(GroupResult.restore(ts.id)) with self.assertRaises(AttributeError): GroupResult.restore(ts.id, backend=object()) def test_join_native(self): backend = SimpleBackend() subtasks = [AsyncResult(uuid(), backend=backend) for i in range(10)] ts = GroupResult(uuid(), subtasks) backend.ids = [subtask.id for subtask in subtasks] res = ts.join_native() self.assertEqual(res, range(10)) def test_iter_native(self): backend = SimpleBackend() subtasks = [AsyncResult(uuid(), backend=backend) for i in range(10)] ts = GroupResult(uuid(), subtasks) backend.ids = [subtask.id for subtask in subtasks] self.assertEqual(len(list(ts.iter_native())), 10) def test_iterate_yields(self): ar = MockAsyncResultSuccess(uuid()) ar2 = MockAsyncResultSuccess(uuid()) ts = GroupResult(uuid(), [ar, ar2]) it = iter(ts) self.assertEqual(it.next(), 42) self.assertEqual(it.next(), 42) def test_iterate_eager(self): ar1 = EagerResult(uuid(), 42, states.SUCCESS) ar2 = EagerResult(uuid(), 42, states.SUCCESS) ts = GroupResult(uuid(), [ar1, ar2]) it = iter(ts) self.assertEqual(it.next(), 42) self.assertEqual(it.next(), 42) def test_join_timeout(self): ar = MockAsyncResultSuccess(uuid()) ar2 = MockAsyncResultSuccess(uuid()) ar3 = AsyncResult(uuid()) ts = GroupResult(uuid(), [ar, ar2, ar3]) with self.assertRaises(TimeoutError): ts.join(timeout=0.0000001) def test___iter__(self): it = iter(self.ts) results = sorted(list(it)) self.assertListEqual(results, list(xrange(self.size))) def test_join(self): joined = self.ts.join() self.assertListEqual(joined, list(xrange(self.size))) def test_successful(self): self.assertTrue(self.ts.successful()) def test_failed(self): self.assertFalse(self.ts.failed()) def test_waiting(self): self.assertFalse(self.ts.waiting()) def test_ready(self): self.assertTrue(self.ts.ready()) def test_completed_count(self): self.assertEqual(self.ts.completed_count(), len(self.ts))
def _run(self): """Submit the parallel work jobs, wait for all to complete, and return the results.""" # get map and work functions workFunc = getFunction(self._call) # get list of jobs jobs = [] if not isinstance(self._args[0], (list, tuple)): raise ParMapWorkUnitError( "Invalid type for ParWorkUnit argument 1: %s\n%s" % (type(self._args[0]), self._args[0])) for i, arg in enumerate(self._args[0]): workArgs = [arg] for mapArg in self._args[1:]: if isinstance(mapArg, (list, tuple)) and len(mapArg) == len( self._args[0]): workArgs.append(mapArg[i]) else: workArgs.append(mapArg) # append work unit id and job number for job tracking job = workFunc(*workArgs, **{'wuid': self._wuid, 'job_num': i}) # update context in job payload job.setdefault('context', {}).update(self._ctx) # propagate job/container configs from HySDS context if 'priority' not in job: job['priority'] = int(self._ctx.get('job_priority', 0)) if 'username' not in job: job['username'] = self._ctx.get('username', None) if 'container_image_name' not in job: job['container_image_name'] = self._ctx.get( 'container_image_name', None) if 'container_image_url' not in job: job['container_image_url'] = self._ctx.get( 'container_image_url', None) if 'container_mappings' not in job: job['container_mappings'] = self._ctx.get( 'container_mappings', {}) # set tag from HySDS context if 'tag' not in job and 'tag' in self._ctx: job['tag'] = self._ctx['tag'] jobs.append(job) # submit jobs and wait for execution group_res = group( submit_job.s(job).set(queue=self._job_queue) for job in jobs)() while True: ready = group_res.ready() if ready: break time.sleep(5) task_ids = group_res.join(timeout=10.) # if async, return task IDs; otherwise harvest results in a group then return if self._async: return [id for id in task_ids] else: res = GroupResult(id=uuid.uuid4().bytes, results=[AsyncResult(id[0]) for id in task_ids]) while True: ready = res.ready() if ready: break time.sleep(5) results = [] for r in res.join(timeout=10.): # deduped job? if isinstance(r, (list, tuple)): # build resolvable result task_id = r[0] results.append({ 'uuid': task_id, 'job_id': task_id, 'payload_id': task_id, 'status': 'job-deduped' }) else: results.append(r) return results