def result(self): tmpdir = tempfile.mkdtemp("pebl") self.job.results(stdout=os.path.join(tmpdir, "stdout"), stderr=os.path.join(tmpdir, "stderr"), outdir=tmpdir) self.job.delete() rst = result.fromfile(os.path.join(tmpdir, "result.pebl")) shutil.rmtree(tmpdir) return rst
def run(self, tasks): """Run tasks by creating multiple processes. If poolsize was specified when creating this controller, additional tasks will be queued. """ tasks = copy(tasks) # because we do tasks.pop() below.. numtasks = len(tasks) poolsize = self.poolsize or numtasks running = {} done = [] opjoin = os.path.join while len(done) < numtasks: # submit tasks (if below poolsize and tasks remain) for i in xrange(min(poolsize-len(running), len(tasks))): task = tasks.pop() task.cwd = tempfile.mkdtemp() cPickle.dump(task, open(opjoin(task.cwd, 'task.pebl'), 'w')) pid = os.spawnlp(os.P_NOWAIT, PEBL, PEBL, "runtask", opjoin(task.cwd, "task.pebl")) running[pid] = task # wait for any child process to finish pid,status = os.wait() done.append(running.pop(pid, None)) results = [result.fromfile(opjoin(t.cwd, 'result.pebl')) for t in done] # to make the results look like deferred results for r in results: r.taskid = 0 # clean up for t in done: shutil.rmtree(t.cwd) return results