def _test_engine_worker_basic(): m = multiprocessing.Manager() shutdown_event = m.Event() out_q = m.Queue() sleep_time = 1 tasks = _task_generator(1) task_job_id, script, stdout, stderr, nproc = tasks[0] w = EngineWorker(out_q, shutdown_event, task_job_id, script, stdout, stderr, nproc, sleep_time=sleep_time) w.start() time.sleep(5) print "Process {i} is alive? {a}".format(i=w.pid, a=w.is_alive()) w.shutdown_event.set() results = get_results_from_queue(out_q) print results return results
def _test_pool(max_workers, tasks, cluster_renderer=None): """ Each task must have the form (task-id, :param max_workers: :param tasks: :param cluster_renderer: :return: """ log.debug("Testing processing pool with max worker:{m} max tasks:{t} cluster render {c}".format(m=max_workers, t=len(tasks), c=cluster_renderer)) max_tasks = len(tasks) m = multiprocessing.Manager() # pool shutdown shutdown_event = m.Event() # worker shutdown worker_shutdown_event = m.Event() out_q = m.Queue() in_q = m.Queue() sleep_time = 1 for task in tasks: log.info("Adding task {t} to queue".format(t=task)) in_q.put(task) job_id = 'j1234' p = ProcessPoolManager(job_id, worker_shutdown_event, shutdown_event, in_q, out_q, max_workers, sleep_time=sleep_time, cluster_renderer=cluster_renderer) p.start() results = [] while len(results) < max_tasks: rs = get_results_from_queue(out_q) for r in rs: print r results.append(r) time.sleep(1) log.info("Settings Pool shutdown event.") p.shutdown_event.set() wait_time = 10 log.info("waiting/sleeping for {s} sec.".format(s=wait_time)) time.sleep(wait_time) print "in q size ", in_q.qsize() print "outq size ", out_q.qsize() # this blocks untill in in_q is empty # p.join(timeout=5) p.terminate() return results
def _test_pool(max_workers, tasks, cluster_renderer=None): """ Each task must have the form (task-id, :param max_workers: :param tasks: :param cluster_renderer: :return: """ log.debug( "Testing processing pool with max worker:{m} max tasks:{t} cluster render {c}" .format(m=max_workers, t=len(tasks), c=cluster_renderer)) max_tasks = len(tasks) m = multiprocessing.Manager() # pool shutdown shutdown_event = m.Event() # worker shutdown worker_shutdown_event = m.Event() out_q = m.Queue() in_q = m.Queue() sleep_time = 1 for task in tasks: log.info("Adding task {t} to queue".format(t=task)) in_q.put(task) job_id = 'j1234' p = ProcessPoolManager(job_id, worker_shutdown_event, shutdown_event, in_q, out_q, max_workers, sleep_time=sleep_time, cluster_renderer=cluster_renderer) p.start() results = [] while len(results) < max_tasks: rs = get_results_from_queue(out_q) for r in rs: print r results.append(r) time.sleep(1) log.info("Settings Pool shutdown event.") p.shutdown_event.set() wait_time = 10 log.info("waiting/sleeping for {s} sec.".format(s=wait_time)) time.sleep(wait_time) print "in q size ", in_q.qsize() print "outq size ", out_q.qsize() # this blocks untill in in_q is empty # p.join(timeout=5) p.terminate() return results