def __init__(self, seconds, action=None): self.seconds = seconds self.posted = (None, None) self.s = Scheduler() self.p = None self.g = (self, action)
def Sequence(*plist): """ Sequence(P1, [P2, .. ,PN]) Returns a list of return values from P1..PN """ processes = [] for p in plist: if type(p) == list: for q in p: processes.append(q) else: processes.append(p) # For every process we simulate a new process_id. When executing # in Main thread/process we set the new id in a global variable. s = Scheduler() _p = s.current _p_original_id = _p.id return_values = [] for p in processes: _p.id = p.id # Call Run directly instead of start() and join() p.run() return_values.append(p.return_value) _p.id = _p_original_id return return_values
def __init__(self, fn, *args, **kwargs): self.fn = fn self.args = args self.kwargs = kwargs self.return_value = None # Create unique id self.id = str(random.random()) + str(time.time()) # Greenlet specific self.greenlet = None # Synchronization specific self.state = None self.s = Scheduler() self.executed = False
def _parallel(plist, block = True): processes=[] for p in plist: if type(p)==list: for q in p: processes.append(q) else: processes.append(p) for p in processes: p.start() s = Scheduler() s.addBulk(processes) if block: s.join(processes) return [p.return_value for p in processes]
class TimeoutGuard(Guard): """ Timeout spawns a timer thread, when posted. If removed before timeout, then the timer thread is cancelled. """ def __init__(self, seconds, action=None): self.seconds = seconds self.posted = (None, None) self.s = Scheduler() self.p = None self.g = (self, action) # Timer expired, offer an active Channel Request def expire(self): op, req = self.posted if op == READ: ChannelReq(self.p, msg=None).offer(req) elif op == WRITE: req.offer(ChannelReq(self.p)) def _post_read(self, reader): self.posted = (READ, reader) # Start process self.p = Process(self.expire) self.p.start() self.p.setstate(ACTIVE) # Put process on the scheduler timer queue self.s.timer_wait(self.p, self.seconds) def _post_write(self, writer): self.posted = (WRITE, writer) # Start process self.p = Process(self.expire) self.p.start() self.p.setstate(ACTIVE) # Put process on the scheduler timer queue self.s.timer_wait(self.p, self.seconds) def _remove_read(self, req): self.s.timer_cancel(self.p) def _remove_write(self, req): self.s.timer_cancel(self.p)
def __init__(self, fn, *args, **kwargs): self.fn = fn self.args = args self.kwargs = kwargs self.return_value = None # Create unique id self.id = str(random.random())+str(time.time()) # Greenlet specific self.greenlet = None # Synchronization specific self.state = None self.s = Scheduler() self.executed = False
def __init__(self, name=None, buffer=0): if name == None: # Create unique name self.name = str(random.random()) + str(time.time()) else: self.name = name self.readqueue = [] self.writequeue = [] # Count, makes sure that all processes knows how many channel ends have retired self.readers = 0 self.writers = 0 self.ispoisoned = False self.isretired = False self.s = Scheduler()
def _parallel(plist, block=True): processes = [] for p in plist: if type(p) == list: for q in p: processes.append(q) else: processes.append(p) for p in processes: p.start() s = Scheduler() s.addBulk(processes) if block: s.join(processes) return [p.return_value for p in processes]
class Process(): """ Process(fn, *args, **kwargs) It is recommended to use the @process decorator, to create Process instances See process.__doc__ """ def __init__(self, fn, *args, **kwargs): self.fn = fn self.args = args self.kwargs = kwargs self.return_value = None # Create unique id self.id = str(random.random()) + str(time.time()) # Greenlet specific self.greenlet = None # Synchronization specific self.state = None self.s = Scheduler() self.executed = False def setstate(self, new_state): self.state = new_state # Reschedule, without putting this process on either the next[] or the blocking[] list. def wait(self): while self.state == ACTIVE: self.s.getNext().greenlet.switch() # Notify, by activating and setting state. def notify(self, new_state, force=False): self.state = new_state # Only activate, if we are activating someone other than ourselves # or we force an activation, which happens when an Io thread finishes, while # the calling process is still current process. if self.s.current != self or force: self.s.activate(self) # Init greenlet code # It must be called from the main thread. # Since we are only allowing that processes may be created in the main # thread or in other processes we can be certain that we are running in # the main thread. def start(self): self.greenlet = greenlet(self.run) # Main process execution def run(self): self.executed = False try: self.return_value = self.fn(*self.args, **self.kwargs) except ChannelPoisonException: # look for channels and channel ends self.__check_poison(self.args) self.__check_poison(self.kwargs.values()) except ChannelRetireException: # look for channel ends self.__check_retire(self.args) self.__check_retire(self.kwargs.values()) self.executed = True def __check_poison(self, args): for arg in args: try: if types.ListType == type(arg) or types.TupleType == type(arg): self.__check_poison(arg) elif types.DictType == type(arg): self.__check_poison(arg.keys()) self.__check_poison(arg.values()) elif type(arg.poison) == types.UnboundMethodType: arg.poison() except AttributeError: pass def __check_retire(self, args): for arg in args: try: if types.ListType == type(arg) or types.TupleType == type(arg): self.__check_retire(arg) elif types.DictType == type(arg): self.__check_retire(arg.keys()) self.__check_retire(arg.values()) elif type(arg.retire) == types.UnboundMethodType: # Ignore if try to retire an already retired channel end. try: arg.retire() except ChannelRetireException: pass except AttributeError: pass # syntactic sugar: Process() * 2 == [Process<1>,Process<2>] def __mul__(self, multiplier): return [self] + [ Process(self.fn, *self.__mul_channel_ends(self.args), **self.__mul_channel_ends(self.kwargs)) for i in range(multiplier - 1) ] # syntactic sugar: 2 * Process() == [Process<1>,Process<2>] def __rmul__(self, multiplier): return [self] + [ Process(self.fn, *self.__mul_channel_ends(self.args), **self.__mul_channel_ends(self.kwargs)) for i in range(multiplier - 1) ] # Copy lists and dictionaries def __mul_channel_ends(self, args): if types.ListType == type(args) or types.TupleType == type(args): R = [] for item in args: try: if type(item.isReader ) == types.UnboundMethodType and item.isReader(): R.append(item.channel.reader()) elif type(item.isWriter ) == types.UnboundMethodType and item.isWriter(): R.append(item.channel.writer()) except AttributeError: if item == types.ListType or item == types.DictType or item == types.TupleType: R.append(self.__mul_channel_ends(item)) else: R.append(item) if types.TupleType == type(args): return tuple(R) else: return R elif types.DictType == type(args): R = {} for key in args: try: if type(key.isReader ) == types.UnboundMethodType and key.isReader(): R[key.channel.reader()] = args[key] elif type(key.isWriter ) == types.UnboundMethodType and key.isWriter(): R[key.channel.writer()] = args[key] elif type( args[key].isReader ) == types.UnboundMethodType and args[key].isReader(): R[key] = args[key].channel.reader() elif type( args[key].isWriter ) == types.UnboundMethodType and args[key].isWriter(): R[key] = args[key].channel.writer() except AttributeError: if args[key] == types.ListType or args[ key] == types.DictType or args[ key] == types.TupleType: R[key] = self.__mul_channel_ends(args[key]) else: R[key] = args[key] return R return args
def current_process_id(): s = Scheduler() g = s.current return g.id
class Process(): """ Process(fn, *args, **kwargs) It is recommended to use the @process decorator, to create Process instances See process.__doc__ """ def __init__(self, fn, *args, **kwargs): self.fn = fn self.args = args self.kwargs = kwargs self.return_value = None # Create unique id self.id = str(random.random())+str(time.time()) # Greenlet specific self.greenlet = None # Synchronization specific self.state = None self.s = Scheduler() self.executed = False def setstate(self, new_state): self.state = new_state # Reschedule, without putting this process on either the next[] or the blocking[] list. def wait(self): while self.state == ACTIVE: self.s.getNext().greenlet.switch() # Notify, by activating and setting state. def notify(self, new_state, force=False): self.state = new_state # Only activate, if we are activating someone other than ourselves # or we force an activation, which happens when an Io thread finishes, while # the calling process is still current process. if self.s.current != self or force: self.s.activate(self) # Init greenlet code # It must be called from the main thread. # Since we are only allowing that processes may be created in the main # thread or in other processes we can be certain that we are running in # the main thread. def start(self): self.greenlet = greenlet(self.run) # Main process execution def run(self): self.executed = False try: self.return_value = self.fn(*self.args, **self.kwargs) except ChannelPoisonException: # look for channels and channel ends self.__check_poison(self.args) self.__check_poison(self.kwargs.values()) except ChannelRetireException: # look for channel ends self.__check_retire(self.args) self.__check_retire(self.kwargs.values()) self.executed = True def __check_poison(self, args): for arg in args: try: if types.ListType == type(arg) or types.TupleType == type(arg): self.__check_poison(arg) elif types.DictType == type(arg): self.__check_poison(arg.keys()) self.__check_poison(arg.values()) elif type(arg.poison) == types.UnboundMethodType: arg.poison() except AttributeError: pass def __check_retire(self, args): for arg in args: try: if types.ListType == type(arg) or types.TupleType == type(arg): self.__check_retire(arg) elif types.DictType == type(arg): self.__check_retire(arg.keys()) self.__check_retire(arg.values()) elif type(arg.retire) == types.UnboundMethodType: # Ignore if try to retire an already retired channel end. try: arg.retire() except ChannelRetireException: pass except AttributeError: pass # syntactic sugar: Process() * 2 == [Process<1>,Process<2>] def __mul__(self, multiplier): return [self] + [Process(self.fn, *self.__mul_channel_ends(self.args), **self.__mul_channel_ends(self.kwargs)) for i in range(multiplier - 1)] # syntactic sugar: 2 * Process() == [Process<1>,Process<2>] def __rmul__(self, multiplier): return [self] + [Process(self.fn, *self.__mul_channel_ends(self.args), **self.__mul_channel_ends(self.kwargs)) for i in range(multiplier - 1)] # Copy lists and dictionaries def __mul_channel_ends(self, args): if types.ListType == type(args) or types.TupleType == type(args): R = [] for item in args: try: if type(item.isReader) == types.UnboundMethodType and item.isReader(): R.append(item.channel.reader()) elif type(item.isWriter) == types.UnboundMethodType and item.isWriter(): R.append(item.channel.writer()) except AttributeError: if item == types.ListType or item == types.DictType or item == types.TupleType: R.append(self.__mul_channel_ends(item)) else: R.append(item) if types.TupleType == type(args): return tuple(R) else: return R elif types.DictType == type(args): R = {} for key in args: try: if type(key.isReader) == types.UnboundMethodType and key.isReader(): R[key.channel.reader()] = args[key] elif type(key.isWriter) == types.UnboundMethodType and key.isWriter(): R[key.channel.writer()] = args[key] elif type(args[key].isReader) == types.UnboundMethodType and args[key].isReader(): R[key] = args[key].channel.reader() elif type(args[key].isWriter) == types.UnboundMethodType and args[key].isWriter(): R[key] = args[key].channel.writer() except AttributeError: if args[key] == types.ListType or args[key] == types.DictType or args[key] == types.TupleType: R[key] = self.__mul_channel_ends(args[key]) else: R[key] = args[key] return R return args