def start(self, interval, initial_delay=None): self._running = True done = event.Event() def _inner(): if initial_delay: greenthread.sleep(initial_delay) try: while self._running: start = timeutils.utcnow() self.f(*self.args, **self.kw) end = timeutils.utcnow() if not self._running: break delay = interval - timeutils.delta_seconds(start, end) if delay <= 0: LOG.warn(_LW('task run outlasted interval by %s sec') % -delay) greenthread.sleep(delay if delay > 0 else 0) except LoopingCallDone as e: self.stop() done.send(e.retvalue) except Exception: LOG.exception(_LE('in fixed duration looping call')) done.send_exception(*sys.exc_info()) return else: done.send(True) self.done = done greenthread.spawn_n(_inner) return self.done
def start(self, interval, initial_delay=None): self._running = True done = event.Event() def _inner(): if initial_delay: greenthread.sleep(initial_delay) try: while self._running: start = timeutils.utcnow() self.f(*self.args, **self.kw) end = timeutils.utcnow() if not self._running: break delay = interval - timeutils.delta_seconds(start, end) if delay <= 0: LOG.warn(_('task run outlasted interval by %s sec') % -delay) greenthread.sleep(delay if delay > 0 else 0) except LoopingCallDone as e: self.stop() done.send(e.retvalue) except Exception: LOG.exception(_('in fixed duration looping call')) done.send_exception(*sys.exc_info()) return else: done.send(True) self.done = done greenthread.spawn_n(_inner) return self.done
def start(self, interval, initial_delay=None): self._running = True done = event.Event() def _inner(): if initial_delay: greenthread.sleep(initial_delay) try: while self._running: start = _ts() self.f(*self.args, **self.kw) end = _ts() if not self._running: break delay = end - start - interval if delay > 0: LOG.warn(_LW('task %(func_name)r run outlasted ' 'interval by %(delay).2f sec'), {'func_name': self.f, 'delay': delay}) greenthread.sleep(-delay if delay < 0 else 0) except LoopingCallDone as e: self.stop() done.send(e.retvalue) except Exception: LOG.exception(_LE('in fixed duration looping call')) done.send_exception(*sys.exc_info()) return else: done.send(True) self.done = done greenthread.spawn_n(_inner) return self.done
def start(self, interval, initial_delay=None): self._running = True done = event.Event() def _inner(): if initial_delay: greenthread.sleep(initial_delay) try: while self._running: start = _ts() self.f(*self.args, **self.kw) end = _ts() if not self._running: break delay = end - start - interval if delay > 0: LOG.warn(_LW('task %(func_name)s run outlasted ' 'interval by %(delay).2f sec'), {'func_name': repr(self.f), 'delay': delay}) greenthread.sleep(-delay if delay < 0 else 0) except LoopingCallDone as e: self.stop() done.send(e.retvalue) except Exception: LOG.exception(_LE('in fixed duration looping call')) done.send_exception(*sys.exc_info()) return else: done.send(True) self.done = done greenthread.spawn_n(_inner) return self.done
def l2gw_callback(resource, event, trigger, **kwargs): l2gwservice = manager.NeutronManager.get_service_plugins().get(constants.L2GW) context = kwargs.get("context") port_dict = kwargs.get("port") if l2gwservice: if event == events.AFTER_UPDATE: greenthread.spawn_n(l2gwservice.add_port_mac, context, port_dict) elif event == events.AFTER_DELETE: l2gwservice.delete_port_mac(context, port_dict)
def l2gw_callback(resource, event, trigger, **kwargs): l2gwservice = manager.NeutronManager.get_service_plugins().get( constants.L2GW) context = kwargs.get('context') port_dict = kwargs.get('port') if l2gwservice: if event == events.AFTER_UPDATE: greenthread.spawn_n(l2gwservice.add_port_mac, context, port_dict) elif event == events.AFTER_DELETE: l2gwservice.delete_port_mac(context, port_dict)
def starmap(self, function, iterable): """This is the same as :func:`itertools.starmap`, except that *func* is executed in a separate green thread for each item, with the concurrency limited by the pool's size. In operation, starmap consumes a constant amount of memory, proportional to the size of the pool, and is thus suited for iterating over extremely long input lists. """ if function is None: function = lambda *a: a gi = GreenMap(self.size) greenthread.spawn_n(self._do_map, function, iterable, gi) return gi
def start(self): while True: try: dead_hosts = self._search_dead_host() for dead_host in dead_hosts: greenthread.spawn_n(self._handle_deadhost, dead_host) except Exception as e: LOG.error(e) greenthread.sleep(5)
def run_receive_events(): try: print('run_receive_events') while to_run: count = event_fd.receive() # print ('event_fd.receive', count) for v in range(count): req = handler.get_recved() if not req: print('get_recved', req) continue greenthread.spawn_n(process_req, req) except Exception as e: print(e)
def __init__(self, driver): self.driver = driver self.__watched_events = set() self.__lock = threading.Lock() self.notifications = queue.Queue() self.notify_thread = greenthread.spawn_n(self.notify_loop) atexit.register(self.shutdown)
def setup(): global _rfile, _wfile, _threads, _coro, _setup_already, _reqq, _rspq if _setup_already: return else: _setup_already = True try: _rpipe, _wpipe = os.pipe() _wfile = greenio.GreenPipe(_wpipe, 'wb', 0) _rfile = greenio.GreenPipe(_rpipe, 'rb', 0) except ImportError: # This is Windows compatibility -- use a socket instead of a pipe because # pipes don't really exist on Windows. import socket from eventlet import util sock = util.__original_socket__(socket.AF_INET, socket.SOCK_STREAM) sock.bind(('localhost', 0)) sock.listen(50) csock = util.__original_socket__(socket.AF_INET, socket.SOCK_STREAM) csock.connect(('localhost', sock.getsockname()[1])) nsock, addr = sock.accept() _rfile = greenio.GreenSocket(csock).makefile('rb', 0) _wfile = nsock.makefile('wb', 0) _reqq = Queue(maxsize=-1) _rspq = Queue(maxsize=-1) for i in range(0, _nthreads): t = threading.Thread(target=tworker) t.setDaemon(True) t.start() _threads.add(t) _coro = greenthread.spawn_n(tpool_trampoline)
def start_new_thread(function, args=(), kwargs=None): if (sys.version_info >= (3, 4) and getattr(function, '__module__', '') == 'threading' and hasattr(function, '__self__')): # Since Python 3.4, threading.Thread uses an internal lock # automatically released when the python thread state is deleted. # With monkey patching, eventlet uses green threads without python # thread state, so the lock is not automatically released. # # Wrap _bootstrap_inner() to release explicitly the thread state lock # when the thread completes. thread = function.__self__ bootstrap_inner = thread._bootstrap_inner def wrap_bootstrap_inner(): try: bootstrap_inner() finally: # The lock can be cleared (ex: by a fork()) if thread._tstate_lock is not None: thread._tstate_lock.release() thread._bootstrap_inner = wrap_bootstrap_inner kwargs = kwargs or {} g = greenthread.spawn_n(__thread_body, function, args, kwargs) return get_ident(g)
def __init__(self, plugin): self.plugin = plugin self.__watched_events = set() self.__lock = threading.Lock() self.notifications = Queue.Queue() self.notify_thread = greenthread.spawn_n(self.notify_loop) atexit.register(self.shutdown)
def setup(): global _rfile, _wfile, _threads, _coro, _setup_already, _reqq, _rspq if _setup_already: return else: _setup_already = True try: _rpipe, _wpipe = os.pipe() _wfile = greenio.GreenPipe(_wpipe, "wb", 0) _rfile = greenio.GreenPipe(_rpipe, "rb", 0) except ImportError: # This is Windows compatibility -- use a socket instead of a pipe because # pipes don't really exist on Windows. import socket from eventlet import util sock = util.__original_socket__(socket.AF_INET, socket.SOCK_STREAM) sock.bind(("localhost", 0)) sock.listen(50) csock = util.__original_socket__(socket.AF_INET, socket.SOCK_STREAM) csock.connect(("localhost", sock.getsockname()[1])) nsock, addr = sock.accept() _rfile = greenio.GreenSocket(csock).makefile("rb", 0) _wfile = nsock.makefile("wb", 0) _reqq = Queue(maxsize=-1) _rspq = Queue(maxsize=-1) for i in range(0, _nthreads): t = threading.Thread(target=tworker) t.setDaemon(True) t.start() _threads.add(t) _coro = greenthread.spawn_n(tpool_trampoline)
def setup(): global _rsock, _wsock, _threads, _coro, _setup_already, _rspq, _reqq if _setup_already: return else: _setup_already = True sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.bind(('127.0.0.1', 0)) sock.listen(1) csock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) csock.connect(sock.getsockname()) _wsock, _addr = sock.accept() sock.close() _rsock = greenio.GreenSocket(csock) _reqq = Queue(maxsize=-1) _rspq = Queue(maxsize=-1) assert _nthreads >= 0, "Can't specify negative number of threads" if _nthreads == 0: import warnings warnings.warn("Zero threads in tpool. All tpool.execute calls will\ execute in main thread. Check the value of the environment \ variable EVENTLET_THREADPOOL_SIZE.", RuntimeWarning) for i in six.moves.range(_nthreads): t = threading.Thread(target=tworker, name="tpool_thread_%s" % i) t.setDaemon(True) t.start() _threads.append(t) _coro = greenthread.spawn_n(tpool_trampoline)
def __init__(self, driver): self.driver = driver self.__watched_events = set() self.__lock = threading.Lock() self.notifications = Queue.Queue() self.notify_thread = greenthread.spawn_n(self.notify_loop) atexit.register(self.shutdown)
def test_kill_n(self): gt = greenthread.spawn_n(passthru, 7) greenthread.kill(gt) self.assert_dead(gt) greenthread.sleep(0.001) self.assertEquals(_g_results, []) greenthread.kill(gt) self.assert_dead(gt)
def start(self, interval, initial_delay=None): self._running = True done = event.Event() def _inner(): if initial_delay: greenthread.sleep(initial_delay) try: while self._running: start = datetime.utcnow() self.f(*self.args, **self.kw) end = datetime.utcnow() if not self._running: break delta = end - start spend = (delta.days * 24 * 3600 + delta.seconds + delta.microseconds * 1.0 / 1000000) delay = interval - spend #LOG.debug("spent time: %.2f/%.2fsec." % (spend, delay)) if delay <= 0: LOG.warn( 'task (%s) run outlasted interval by -%.2f sec' % (self.f, -delay)) greenthread.sleep(delay if delay > 0 else 0) except LoopingCallDone as e: self.stop() done.send(e.retvalue) except Exception as ex: LOG.exception(ex) sys.exit() LOG.error("Exception caught, " "eat it to enable next round running.") done.send_exception(*sys.exc_info()) # raise return else: done.send(True) self.done = done greenthread.spawn_n(_inner) return self.done
def create_portforward(self, context, **kargs): portforward = kargs["portforward"]["portforward"] id = uuidutils.generate_uuid() self.validate_portforward(context, portforward) with context.session.begin(subtransactions=True): res = { "id": id, "name": portforward["name"], "router_id": portforward["router_id"], "router_gateway_ip": portforward["router_gateway_ip"], "instance_id": portforward["instance_id"], "instance_fix_ip": portforward["instance_fix_ip"], "source_port": portforward["source_port"], "destination_port": portforward["destination_port"], "protocol": portforward["protocol"], "tenant_id": portforward["tenant_id"] } portforward_db = PortForward(**res) context.session.add(portforward_db) greenthread.sleep(0) portforward = self.get_portforward(context, id) greenthread.spawn_n(self.apply_portforward, context, portforward) return portforward
def start(self, interval, initial_delay=None): self._running = True #this done will be set to LoopingCallBase's done done = event.Event() def _inner(): if initial_delay: greenthread.sleep(initial_delay) try: while self._running: start = datetime.datetime.utcnow() self.f(*self.args, **self.kw) # callback end = datetime.datetime.utcnow() if not self._running: break delay = interval - (end-start).total_seconds() logger.debug('delay was %f sec', delay) if delay <= 0: logger.info('task run outlasted interval by %s sec', -delay) greenthread.sleep(delay if delay > 0 else 0) except LoopingCallDone as e: self.stop() done.send(e.retvalue) except Exception as e: logger.info('exception taken') logger.exception(e) done.send_exception(*sys.exc_info()) return else: done.send(True) self._done_ = done greenthread.spawn_n(_inner) return self._done_
def spawn_n(self, function, *args, **kwargs): """Create a greenthread to run the *function*, the same as :meth:`spawn`. The difference is that :meth:`spawn_n` returns None; the results of *function* are not retrievable. """ # if reentering an empty pool, don't try to wait on a coroutine freeing # itself -- instead, just execute in the current coroutine current = greenthread.getcurrent() if self.sem.locked() and current in self.coroutines_running: self._spawn_n_impl(function, args, kwargs, None) else: self.sem.acquire() g = greenthread.spawn_n(self._spawn_n_impl, function, args, kwargs, True) if not self.coroutines_running: self.no_coros_running = event.Event() self.coroutines_running.add(g)
def setup(): global _rfile, _wfile, _threads, _coro, _setup_already, _rspq, _reqq if _setup_already: return else: _setup_already = True try: _rpipe, _wpipe = os.pipe() _wfile = greenio.GreenPipe(_wpipe, "wb", 0) _rfile = greenio.GreenPipe(_rpipe, "rb", 0) except (ImportError, NotImplementedError): # This is Windows compatibility -- use a socket instead of a pipe because # pipes don't really exist on Windows. import socket from eventlet import util sock = util.__original_socket__(socket.AF_INET, socket.SOCK_STREAM) sock.bind(("localhost", 0)) sock.listen(50) csock = util.__original_socket__(socket.AF_INET, socket.SOCK_STREAM) csock.connect(("localhost", sock.getsockname()[1])) nsock, addr = sock.accept() _rfile = greenio.GreenSocket(csock).makefile("rb", 0) _wfile = nsock.makefile("wb", 0) _rspq = Queue(maxsize=-1) _reqq = Queue(maxsize=-1) assert _nthreads >= 0, "Can't specify negative number of threads" if _nthreads == 0: import warnings warnings.warn( "Zero threads in tpool. All tpool.execute calls will\ execute in main thread. Check the value of the environment \ variable EVENTLET_THREADPOOL_SIZE.", RuntimeWarning, ) for i in xrange(_nthreads): t = threading.Thread(target=tworker, name="tpool_thread_%s" % i, args=(_reqq,)) t.setDaemon(True) t.start() _threads.append(t) _coro = greenthread.spawn_n(tpool_trampoline)
def setup(): global _rfile, _wfile, _threads, _coro, _setup_already, _rspq, _reqq if _setup_already: return else: _setup_already = True try: _rpipe, _wpipe = os.pipe() _wfile = greenio.GreenPipe(_wpipe, 'wb', 0) _rfile = greenio.GreenPipe(_rpipe, 'rb', 0) except (ImportError, NotImplementedError): # This is Windows compatibility -- use a socket instead of a pipe because # pipes don't really exist on Windows. import socket from eventlet import util sock = util.__original_socket__(socket.AF_INET, socket.SOCK_STREAM) sock.bind(('localhost', 0)) sock.listen(50) csock = util.__original_socket__(socket.AF_INET, socket.SOCK_STREAM) csock.connect(('localhost', sock.getsockname()[1])) nsock, addr = sock.accept() _rfile = greenio.GreenSocket(csock).makefile('rb', 0) _wfile = nsock.makefile('wb',0) _rspq = Queue(maxsize=-1) _reqq = Queue(maxsize=-1) assert _nthreads >= 0, "Can't specify negative number of threads" if _nthreads == 0: import warnings warnings.warn("Zero threads in tpool. All tpool.execute calls will\ execute in main thread. Check the value of the environment \ variable EVENTLET_THREADPOOL_SIZE.", RuntimeWarning) for i in xrange(_nthreads): t = threading.Thread(target=tworker, name="tpool_thread_%s" % i, args=(_reqq,)) t.setDaemon(True) t.start() _threads.append(t) _coro = greenthread.spawn_n(tpool_trampoline)
def __init__(self, nthreads=2): self.nthreads = nthreads self._run_queue = Queue() self._result_queue = Queue() self._threads = [] self._alive = True if nthreads <= 0: return # We spawn a greenthread whose job it is to pull results from the # worker threads via a real Queue and send them to eventlet Events so # that the calling greenthreads can be awoken. # # Since each OS thread has its own collection of greenthreads, it # doesn't work to have the worker thread send stuff to the event, as # it then notifies its own thread-local eventlet hub to wake up, which # doesn't do anything to help out the actual calling greenthread over # in the main thread. # # Thus, each worker sticks its results into a result queue and then # writes a byte to a pipe, signaling the result-consuming greenlet (in # the main thread) to wake up and consume results. # # This is all stuff that eventlet.tpool does, but that code can't have # multiple instances instantiated. Since the object server uses one # pool per disk, we have to reimplement this stuff. _raw_rpipe, self.wpipe = os.pipe() self.rpipe = greenio.GreenPipe(_raw_rpipe, 'rb', bufsize=0) for _junk in xrange(nthreads): thr = stdlib_threading.Thread( target=self._worker, args=(self._run_queue, self._result_queue)) thr.daemon = True thr.start() self._threads.append(thr) # This is the result-consuming greenthread that runs in the main OS # thread, as described above. self._consumer_coro = greenthread.spawn_n(self._consume_results, self._result_queue)
def start_new_thread(function, args=(), kwargs=None): kwargs = kwargs or {} g = greenthread.spawn_n(__thread_body, function, args, kwargs) return get_ident(g)
def FixedIntervalLoopingCallFunc(f): def __inner(): while True: f() greenthread.spawn_n(__inner)
_('task run outlasted interval by %s sec') % -delay) greenthread.sleep(delay if delay > 0 else 0) except LoopingCallDone, e: self.stop() done.send(e.retvalue) except Exception: LOG.exception(_('in fixed duration looping call')) done.send_exception(*sys.exc_info()) return else: done.send(True) self.done = done greenthread.spawn_n(_inner) return self.done # TODO(mikal): this class name is deprecated in Havana and should be removed # in the I release LoopingCall = FixedIntervalLoopingCall class DynamicLoopingCall(LoopingCallBase): """A looping call which sleeps until the next known event. The function called should return how long to sleep for before being called again. """ def start(self, initial_delay=None, periodic_interval_max=None):
def test_n(self): gt = greenthread.spawn_n(passthru, 2, b=3) assert not gt.dead greenthread.sleep(0) assert gt.dead self.assertEqual(_g_results, [((2,), {'b': 3})])
def sync(self): greenthread.spawn_n(self._sync)
def start(self): while True: try: dead_hosts = self._search_dead_host() for dead_host in dead_hosts: greenthread.spawn_n(self._handle_deadhost, dead_host) except Exception as e: LOG.error(e) greenthread.sleep(5) def main(): cha = ComputeNodeHA() cha.start() pass if __name__ == '__main__': gettextutils.install('ComputeNodeHA', lazy=True) gettextutils.enable_lazy() log_levels = (cfg.CONF.default_log_levels + ['stevedore=INFO', 'keystoneclient=INFO']) cfg.set_defaults(log.log_opts, default_log_levels=log_levels) log.setup('ComputeNodeHA') greenthread.spawn_n(main())
def test_n(self): gt = greenthread.spawn_n(passthru, 2, b=3) self.assert_(not gt.dead) greenthread.sleep(0) self.assert_(gt.dead) self.assertEquals(_g_results, [((2, ), {'b': 3})])
def _spawn(self, func, *args, **kwargs): if self._greenpool is not None: return self._greenpool.spawn_n(func, *args, **kwargs) else: return greenthread.spawn_n(func, *args, **kwargs)
print('run_receive_events') while to_run: count = event_fd.receive() # print ('event_fd.receive', count) for v in range(count): req = handler.get_recved() if not req: print('get_recved', req) continue greenthread.spawn_n(process_req, req) except Exception as e: print(e) # greenthread.spawn_n(run_receive_events) # ev_sleep(3) def run_clt_sock(tn, addr, run): print('running py thread', 'clt', tn, addr) udp_s = socket.socket(client_af, socket.SOCK_DGRAM) udp_s.settimeout(25) while run(): try: conf['c'] += 1 uid = ("%d-%d|" % (conf['c'], tn)).zfill(send_bytes_clt).encode('utf-8')
def start_new_thread(function, args=(), kwargs={}): g = greenthread.spawn_n(function, *args, **kwargs) return get_ident(g)
def start_new_thread(function, args=(), kwargs={}): g = greenthread.spawn_n(__thread_body, function, args, kwargs) return get_ident(g)
def _inner(): if initial_delay: greenthread.sleep(initial_delay) try: while self._running: self.f(*self.args, **self.kw) if not self._running: break greenthread.sleep(interval) except LoopingCallDone, e: self.stop() done.send(e.retvalue) except Exception: LOG.exception(_('in looping call')) done.send_exception(*sys.exc_info()) return else: done.send(True) self.done = done greenthread.spawn_n(_inner) return self.done def stop(self): self._running = False def wait(self): return self.done.wait()
def delete_portforward(self, context, id): portforward = None with context.session.begin(subtransactions=True): portforward = self._get_portforward(context, id) context.session.delete(portforward) greenthread.spawn_n(self._delete_portforward, context, portforward)
def test_n(self): gt = greenthread.spawn_n(passthru, 2, b=3) assert not gt.dead greenthread.sleep(0) assert gt.dead self.assertEqual(_g_results, [((2, ), {'b': 3})])
from eventlet import hubs from eventlet import greenthread #from eventlet.support import greenlets as greenlet def tellme(secret): print "a secret:",secret #hub = hubs.get_hub() #current = greenlet.getcurrent() greenthread.spawn_n(tellme,"you are so beautiful") #hub.schedule_call_global(0,tellme,"you are so beautiful") #hub.switch() greenthread.sleep(0) print("ok")
def start(self): if not self._running: self._running = True greenthread.spawn_n(self.run)
def test_n(self): gt = greenthread.spawn_n(passthru, 2, b=3) self.assert_(not gt.dead) greenthread.sleep(0) self.assert_(gt.dead) self.assertEquals(_g_results, [((2,),{'b':3})])