def _expand(self, rec=True): """ If rec is true, expand until we hit a real ivar Otherwise just do it once """ if not self._proxy_for.isSet(): assert len(self._in_tasks) == 1 in_task = self._in_tasks[0] #own_ix = in_task._outputs.index(self) in_task._state = T_QUEUED graph_mutex.release() try: in_task._exec(None, None) finally: graph_mutex.acquire() assert(self._proxy_for.isSet()) if rec: last = self ch = self._proxy_for.get() while isinstance(ch, IvarPlaceholder): last = ch ch = ch._expand(rec=False) # shorten chain self._proxy_for = last._proxy_for logging.debug("expanded %s, now points to: %s" % (repr(self), repr(self._proxy_for.get()))) else: logging.debug("Proxy for %s" % repr(self._proxy_for)) return self._proxy_for.get()
def state(self): """ Return the current state of the task """ global graph_mutex graph_mutex.acquire() res = self._state graph_mutex.release() return res
def spark_recursive(ivar): """ Assume we are holding the global mutex when this is called """ thread = threading.currentThread() logging.debug("spark_recursive %s" % thread.getName()) if not isWorkerThread(threading.currentThread()): raise Exception("Non-worker thread running spark_recursive") #assert(len(ivar._in_tasks) == 1) #task = ivar._in_tasks[0] state = ivar._state if state in (IVAR_CLOSED, IVAR_DONE_DESTROYED): frame = makeframe(ivar, []) must_run = True elif state in (IVAR_CLOSED_WAITING, IVAR_DONE_FILLED, IVAR_OPEN_RW, IVAR_OPEN_W): must_run = False elif state == IVAR_ERROR: # exception needs to percolate up to next task running raise Exception("%s encountered an error in recursive invocation" % repr(ivar)) else: raise Exception("%s had invalid state" % repr(ivar)) if not must_run: # No work to do, return return else: # add marker to deque to ensure we don't # start executing old tasks thread.deque.append(ReturnMarker) #thread.deque.extend(to_run) thread.deque.append(frame) graph_mutex.release() try: thread.run(recursive=True) finally: graph_mutex.acquire()
def _get(self): """ For internal use only: get directly from local future, don't bother forcing or locking or anything. Should have graph lock first. Only call when you are sure the Ivar has data ready for you. """ if LocalExecutor.isWorkerThread(): if not self._state in (IVAR_OPEN_R, IVAR_OPEN_RW, IVAR_DONE_FILLED): # This thread goes off and runs stuff recursively # before blocking LocalExecutor.spark_recursive(self) else: self._spark() graph_mutex.release() try: res = self._future.get() finally: graph_mutex.acquire() if res is ErrorVal and self._state == IVAR_ERROR: raise self._exception return res
def spark_recursive(ivar): """ Assume we are holding the global mutex when this is called """ thread = threading.currentThread() logging.debug("spark_recursive %s" % thread.getName()) if not isWorkerThread(threading.currentThread()): raise Exception("Non-worker thread running spark_recursive") # assert(len(ivar._in_tasks) == 1) # task = ivar._in_tasks[0] state = ivar._state if state in (IVAR_CLOSED, IVAR_DONE_DESTROYED): frame = makeframe(ivar, []) must_run = True elif state in (IVAR_CLOSED_WAITING, IVAR_DONE_FILLED, IVAR_OPEN_RW, IVAR_OPEN_W): must_run = False elif state == IVAR_ERROR: # exception needs to percolate up to next task running raise Exception("%s encountered an error in recursive invocation" % repr(ivar)) else: raise Exception("%s had invalid state" % repr(ivar)) if not must_run: # No work to do, return return else: # add marker to deque to ensure we don't # start executing old tasks thread.deque.append(ReturnMarker) # thread.deque.extend(to_run) thread.deque.append(frame) graph_mutex.release() try: thread.run(recursive=True) finally: graph_mutex.acquire()
def eval_frame(self, frame): """ frame is: (ivar, [ivar dependencies which may not have been picked by an evaluator], [list of tasks to run following immediately]) If ready to run, this thread runs the task If not, it searches the graph until it finds a runnable task, adding branches to the deque. """ logging.debug("%s: looking at frame %s " % (self.getName(), repr(frame))) ch = frame[0] graph_mutex.acquire() # Expand the ivar as needed while hasattr(ch, '_proxy_for'): #TODO: handle non-lazy arguments ch = ch._expand() logging.debug("expanded ivar to %s " % repr(ch)) frame = (ch, frame[1], frame[2]) chstate = ch._state if chstate in (IVAR_DONE_FILLED, IVAR_OPEN_W, IVAR_OPEN_R, IVAR_OPEN_RW): # Ivar is filled or will be filled by something else graph_mutex.release() return elif chstate == IVAR_ERROR: # Exception should already be propagated, just return graph_mutex.release() return elif chstate in (IVAR_CLOSED, IVAR_CLOSED_WAITING): # Continue on to evaluate pass else: raise Exception("Invalid ivar state for %s" % repr(frame)) #TODO: check ivar state assert (len(ch._in_tasks) == 1) task = ch._in_tasks[0] state = task._state if state == T_DATA_READY: task._state = T_QUEUED graph_mutex.release() # All dependencies satisfied self.exec_task(frame) elif state in (T_DONE_SUCCESS, T_RUNNING, T_QUEUED, T_CONTINUATION): # already started elsewhere graph_mutex.release() return elif state in (T_DATA_WAIT, T_INACTIVE): task._state = T_DATA_WAIT try: runnable_frame = self.find_runnable_task(frame) if runnable_frame is not None: assert (len(runnable_frame[0]._in_tasks) == 1) runnable_frame[0]._in_tasks[0]._state = T_QUEUED finally: graph_mutex.release() if runnable_frame is not None: self.exec_task(runnable_frame) elif state == T_ERROR: # Exception should already be propagated, just return assert (ch._state == IVAR_ERROR) graph_mutex.release() return else: graph_mutex.release() raise Exception("Task %s has invalid state %d" % ((repr(task), state)))
def eval_frame(self, frame): """ frame is: (ivar, [ivar dependencies which may not have been picked by an evaluator], [list of tasks to run following immediately]) If ready to run, this thread runs the task If not, it searches the graph until it finds a runnable task, adding branches to the deque. """ logging.debug("%s: looking at frame %s " % (self.getName(), repr(frame))) ch = frame[0] graph_mutex.acquire() # Expand the ivar as needed while hasattr(ch, "_proxy_for"): # TODO: handle non-lazy arguments ch = ch._expand() logging.debug("expanded ivar to %s " % repr(ch)) frame = (ch, frame[1], frame[2]) chstate = ch._state if chstate in (IVAR_DONE_FILLED, IVAR_OPEN_W, IVAR_OPEN_R, IVAR_OPEN_RW): # Ivar is filled or will be filled by something else graph_mutex.release() return elif chstate == IVAR_ERROR: # Exception should already be propagated, just return graph_mutex.release() return elif chstate in (IVAR_CLOSED, IVAR_CLOSED_WAITING): # Continue on to evaluate pass else: raise Exception("Invalid ivar state for %s" % repr(frame)) # TODO: check ivar state assert len(ch._in_tasks) == 1 task = ch._in_tasks[0] state = task._state if state == T_DATA_READY: task._state = T_QUEUED graph_mutex.release() # All dependencies satisfied self.exec_task(frame) elif state in (T_DONE_SUCCESS, T_RUNNING, T_QUEUED, T_CONTINUATION): # already started elsewhere graph_mutex.release() return elif state in (T_DATA_WAIT, T_INACTIVE): task._state = T_DATA_WAIT try: runnable_frame = self.find_runnable_task(frame) if runnable_frame is not None: assert len(runnable_frame[0]._in_tasks) == 1 runnable_frame[0]._in_tasks[0]._state = T_QUEUED finally: graph_mutex.release() if runnable_frame is not None: self.exec_task(runnable_frame) elif state == T_ERROR: # Exception should already be propagated, just return assert ch._state == IVAR_ERROR graph_mutex.release() return else: graph_mutex.release() raise Exception("Task %s has invalid state %d" % ((repr(task), state)))
def add_output(self, input_task): global graph_mutex graph_mutex.acquire() self._register_input(input_task) graph_mutex.release()