def _create_dns_entry(self): LOG.debug("%s: Creating dns entry for instance: %s" % (greenthread.getcurrent(), self.id)) dns_support = CONF.trove_dns_support LOG.debug(_("trove dns support = %s") % dns_support) if dns_support: dns_client = create_dns_client(self.context) def get_server(): c_id = self.db_info.compute_instance_id return self.nova_client.servers.get(c_id) def ip_is_available(server): LOG.info("Polling for ip addresses: $%s " % server.addresses) if server.addresses != {}: return True elif server.addresses == {} and server.status != InstanceStatus.ERROR: return False elif server.addresses == {} and server.status == InstanceStatus.ERROR: msg = _("Instance IP not available, instance (%s): " "server had status (%s).") LOG.error(msg % (self.id, server.status)) raise TroveError(status=server.status) poll_until(get_server, ip_is_available, sleep_time=1, time_out=DNS_TIME_OUT) server = self.nova_client.servers.get(self.db_info.compute_instance_id) LOG.info("Creating dns entry...") dns_client.create_instance_entry(self.id, get_ip_address(server.addresses)) else: LOG.debug("%s: DNS not enabled for instance: %s" % (greenthread.getcurrent(), self.id))
def test_log_no_green_thread(self): self.mox.StubOutWithMock(greenthread, 'getcurrent') greenthread.getcurrent().AndReturn(None) self.mox.ReplayAll() self.log.info("baz") self.assert_(True) # do not raise exception
def acquire(self, blocking=True): """Acquire a semaphore. When invoked without arguments: if the internal counter is larger than zero on entry, decrement it by one and return immediately. If it is zero on entry, block, waiting until some other thread has called release() to make it larger than zero. This is done with proper interlocking so that if multiple acquire() calls are blocked, release() will wake exactly one of them up. The implementation may pick one at random, so the order in which blocked threads are awakened should not be relied on. There is no return value in this case. When invoked with blocking set to true, do the same thing as when called without arguments, and return true. When invoked with blocking set to false, do not block. If a call without an argument would block, return false immediately; otherwise, do the same thing as when called without arguments, and return true.""" if not blocking and self.locked(): return False if self.counter <= 0: self._waiters.add(greenthread.getcurrent()) try: while self.counter <= 0: hubs.get_hub().switch() finally: self._waiters.discard(greenthread.getcurrent()) self.counter -= 1 return True
def _acquire(self, waiters): waiters.add(greenthread.getcurrent()) try: while self.parent.counter != 0: hubs.get_hub().switch() finally: waiters.discard(greenthread.getcurrent())
def process(self, msg, kwargs): """Uses hash of current green thread object for unqiue identifier """ if "extra" not in kwargs: kwargs["extra"] = {} extra = kwargs["extra"] if greenthread.getcurrent() is not None: extra.update({"gthread_id": hash(greenthread.getcurrent())}) extra["extra"] = extra.copy() return msg, kwargs
def process(self, msg, kwargs): """Uses hash of current green thread object for unqiue identifier """ if 'extra' not in kwargs: kwargs['extra'] = {} extra = kwargs['extra'] if greenthread.getcurrent() is not None: extra.update({'gthread_id': hash(greenthread.getcurrent())}) extra['extra'] = extra.copy() return msg, kwargs
def _wrapper(self, method, context, *args, **kwargs): """Maps the respective manager method with a task counter.""" # TODO(rnirmal): Just adding a basic counter. Will revist and # re-implement when we have actual tasks. self.tasks[greenthread.getcurrent()] = context try: func = getattr(self, method) func(context, *args, **kwargs) except Exception as e: excutils.save_and_reraise_exception() finally: del self.tasks[greenthread.getcurrent()]
def _wrapper(self, method, context, *args, **kwargs): """Maps the respective manager method with a task counter.""" # TODO(rnirmal): Just adding a basic counter. Will revist and # re-implement when we have actual tasks. self.tasks[greenthread.getcurrent()] = context try: func = getattr(self, method) func(context, *args, **kwargs) except Exception as e: LOG.error("Got an error running %s!" % method) LOG.error(traceback.format_exc()) finally: del self.tasks[greenthread.getcurrent()]
def acceptor(self, pool): greenthread.getcurrent() while self.alive: try: conn, addr = self.socket.accept() gt = pool.spawn(self.handle, conn, addr) gt.link(self.cleanup, conn) conn, addr, gt = None, None, None except eventlet.StopServe: return except: self.log.exception("Unexpected error in acceptor. Sepuku.") os._exit(4)
def _create_dns_entry(self): LOG.debug( _("%(gt)s: Creating dns entry for instance: %(id)s") % { 'gt': greenthread.getcurrent(), 'id': self.id }) dns_support = CONF.trove_dns_support LOG.debug(_("trove dns support = %s") % dns_support) if dns_support: dns_client = create_dns_client(self.context) def get_server(): c_id = self.db_info.compute_instance_id return self.nova_client.servers.get(c_id) def ip_is_available(server): LOG.info( _("Polling for ip addresses: $%s ") % server.addresses) if server.addresses != {}: return True elif (server.addresses == {} and server.status != InstanceStatus.ERROR): return False elif (server.addresses == {} and server.status == InstanceStatus.ERROR): LOG.error( _("Instance IP not available, " "instance (%(instance)s): " "server had status (%(status)s).") % { 'instance': self.id, 'status': server.status }) raise TroveError(status=server.status) utils.poll_until(get_server, ip_is_available, sleep_time=1, time_out=DNS_TIME_OUT) server = self.nova_client.servers.get( self.db_info.compute_instance_id) LOG.info(_("Creating dns entry...")) dns_client.create_instance_entry(self.id, get_ip_address(server.addresses)) else: LOG.debug( _("%(gt)s: DNS not enabled for instance: %(id)s") % { 'gt': greenthread.getcurrent(), 'id': self.id })
def __init__(self): self.active = False self.prefetch_queue = Queue(settings.prefetch['queue_size']) self.prefetch_thread = spawn(self.prefetch_worker) self.prefetch_thread.link(reraise_errors, greenthread.getcurrent()) self.given_items = Cache() self.postreport_queue = Queue(settings.postreport['queue_size']) self.postreport_thread = spawn(self.postreport_worker) self.postreport_thread.link(reraise_errors, greenthread.getcurrent()) self.storage_connections = eventlet.pools.Pool(max_size=settings.storage['max_connections']) self.storage_connections.create = StorageConnection
def waitall(self): """Waits until all greenthreads in the pool are finished working.""" assert greenthread.getcurrent() not in self.coroutines_running, \ "Calling waitall() from within one of the "\ "GreenPool's greenthreads will never terminate." if self.running(): self.no_coros_running.wait()
def _setup(self): self.tasks = self._create_timing_store() self.call = self._create_timing_store() self.call_stack = [self.call] self.current_tasklet = greenthread.getcurrent() self.start_call = self.call self.start_time = self.timer()
def spawn(self, function, *args, **kwargs): """Run the *function* with its arguments in its own green thread. Returns the :class:`GreenThread <eventlet.greenthread.GreenThread>` object that is running the function, which can be used to retrieve the results. If the pool is currently at capacity, ``spawn`` will block until one of the running greenthreads completes its task and frees up a slot. This function is reentrant; *function* can call ``spawn`` on the same pool without risk of deadlocking the whole thing. """ # if reentering an empty pool, don't try to wait on a coroutine freeing # itself -- instead, just execute in the current coroutine current = greenthread.getcurrent() if self.sem.locked() and current in self.coroutines_running: # a bit hacky to use the GT without switching to it gt = greenthread.GreenThread(current) gt.main(function, args, kwargs) return gt else: self.sem.acquire() gt = greenthread.spawn(function, *args, **kwargs) if not self.coroutines_running: self.no_coros_running = event.Event() self.coroutines_running.add(gt) gt.link(self._spawn_done) return gt
def resize_volume(self, new_size): old_volume_size = self.volume_size new_size = int(new_size) LOG.debug("%s: Resizing volume for instance: %s from %s to %r GB" % (greenthread.getcurrent(), self.server.id, old_volume_size, new_size)) self.volume_client.volumes.resize(self.volume_id, new_size) try: utils.poll_until( lambda: self.volume_client.volumes.get(self.volume_id), lambda volume: volume.status == 'in-use', sleep_time=2, time_out=CONF.volume_time_out) volume = self.volume_client.volumes.get(self.volume_id) self.update_db(volume_size=volume.size) self.nova_client.volumes.rescan_server_volume( self.server, self.volume_id) self.send_usage_event('modify_volume', old_volume_size=old_volume_size, launched_at=timeutils.isotime(), modify_at=timeutils.isotime(), volume_size=new_size) except PollTimeOut as pto: LOG.error("Timeout trying to rescan or resize the attached volume " "filesystem for volume: %s" % self.volume_id) except Exception as e: LOG.error(e) LOG.error("Error encountered trying to rescan or resize the " "attached volume filesystem for volume: %s" % self.volume_id) finally: self.update_db(task_status=inst_models.InstanceTasks.NONE)
def wait(self): logger.debug('Threadgroup wait() started') for x in self.timers: try: logger.debug('timers.waiting...') x.wait() except greenlet.GreenletExit: pass except Exception as ex: logger.info(ex) current = greenthread.getcurrent() for x in self.threads: if x is current: continue try: logger.debug('threads.waiting...') x.wait() # note that if tg.stop was called, then the # kill will cause a silent exit. GreenletExit will not be caught except greenlet.GreenletExit as ex: logger.debug(ex) except Exception as ex: logger.info(ex) logger.debug('Threadgroup wait() ended')
def run(self): coros = [] queue_url = self.conf.get('queue_url', '127.0.0.1:11300') concurrency = int_value(self.conf.get('concurrency'), 10) server_gt = greenthread.getcurrent() for i in range(concurrency): beanstalk = Beanstalk.from_url(queue_url) gt = eventlet.spawn(self.handle, beanstalk) gt.link(_eventlet_stop, server_gt, beanstalk) coros.append(gt) beanstalk, gt = None, None while self.alive: self.notify() try: eventlet.sleep(1.0) except AssertionError: self.alive = False break self.notify() try: with Timeout(self.graceful_timeout) as t: [c.kill(StopServe()) for c in coros] [c.wait() for c in coros] except Timeout as te: if te != t: raise [c.kill() for c in coros]
def resize_volume(self, new_size): LOG.debug( "%s: Resizing volume for instance: %s to %r GB" % (greenthread.getcurrent(), self.server.id, new_size) ) self.volume_client.volumes.resize(self.volume_id, int(new_size)) try: utils.poll_until( lambda: self.volume_client.volumes.get(self.volume_id), lambda volume: volume.status == "in-use", sleep_time=2, time_out=int(config.Config.get("volume_time_out")), ) volume = self.volume_client.volumes.get(self.volume_id) self.update_db(volume_size=volume.size) self.nova_client.volumes.rescan_server_volume(self.server, self.volume_id) self.guest.resize_fs(self.get_volume_mountpoint()) except PollTimeOut as pto: LOG.error( "Timeout trying to rescan or resize the attached volume " "filesystem for volume: %s" % self.volume_id ) except Exception as e: LOG.error( "Error encountered trying to rescan or resize the " "attached volume filesystem for volume: %s" % self.volume_id ) finally: self.update_db(task_status=inst_models.InstanceTasks.NONE)
def _create_dns_entry(self): LOG.debug("%s: Creating dns entry for instance: %s" % (greenthread.getcurrent(), self.id)) dns_client = create_dns_client(self.context) dns_support = config.Config.get("reddwarf_dns_support", 'False') LOG.debug(_("reddwarf dns support = %s") % dns_support) nova_client = create_nova_client(self.context) if utils.bool_from_string(dns_support): def get_server(): c_id = self.db_info.compute_instance_id return nova_client.servers.get(c_id) def ip_is_available(server): LOG.info("Polling for ip addresses: $%s " % server.addresses) if server.addresses != {}: return True elif server.addresses == {} and\ server.status != InstanceStatus.ERROR: return False elif server.addresses == {} and\ server.status == InstanceStatus.ERROR: LOG.error(_("Instance IP not available, instance (%s): " "server had status (%s).") % (self.id, server.status)) raise ReddwarfError(status=server.status) poll_until(get_server, ip_is_available, sleep_time=1, time_out=60 * 2) server = nova_client.servers.get(self.db_info.compute_instance_id) LOG.info("Creating dns entry...") dns_client.create_instance_entry(self.id, get_ip_address(server.addresses))
def resize_volume(self, new_size): LOG.debug(_("begin resize_volume for id: %s") % self.id) old_volume_size = self.volume_size new_size = int(new_size) LOG.debug( _("%(gt)s: Resizing instance %(instance_id)s volume for " "server %(server_id)s from %(old_volume_size)s to " "%(new_size)r GB") % { 'gt': greenthread.getcurrent(), 'instance_id': self.id, 'server_id': self.server.id, 'old_volume_size': old_volume_size, 'new_size': new_size }) if self.server.status == 'active': self._resize_active_volume(new_size) else: self._do_resize(new_size) self.send_usage_event('modify_volume', old_volume_size=old_volume_size, launched_at=timeutils.isotime(self.updated), modify_at=timeutils.isotime(self.updated), volume_size=new_size) LOG.debug(_("end resize_volume for id: %s") % self.id)
def resize_volume(self, new_size): old_volume_size = self.volume_size new_size = int(new_size) LOG.debug("%s: Resizing volume for instance: %s from %s to %r GB" % (greenthread.getcurrent(), self.server.id, old_volume_size, new_size)) self.volume_client.volumes.resize(self.volume_id, new_size) try: utils.poll_until( lambda: self.volume_client.volumes.get(self.volume_id), lambda volume: volume.status == 'in-use', sleep_time=2, time_out=CONF.volume_time_out) volume = self.volume_client.volumes.get(self.volume_id) self.update_db(volume_size=volume.size) self.nova_client.volumes.rescan_server_volume(self.server, self.volume_id) self.send_usage_event('modify_volume', old_volume_size=old_volume_size, launched_at=timeutils.isotime(), modify_at=timeutils.isotime(), volume_size=new_size) except PollTimeOut as pto: LOG.error("Timeout trying to rescan or resize the attached volume " "filesystem for volume: %s" % self.volume_id) except Exception as e: LOG.error(e) LOG.error("Error encountered trying to rescan or resize the " "attached volume filesystem for volume: %s" % self.volume_id) finally: self.update_db(task_status=inst_models.InstanceTasks.NONE)
def _setup(self): self._has_setup = True self.cur = None self.timings = {} self.current_tasklet = greenthread.getcurrent() self.thread_id = thread.get_ident() self.simulate_call("profiler")
def wait(self): """Wait until switch() or throw() is called. """ assert self.greenlet is None, 'This Waiter is already used by %r' % (self.greenlet, ) self.greenlet = getcurrent() try: return get_hub().switch() finally: self.greenlet = None
def __init__(self, *args, **kwargs): from eventlet import greenthread from eventlet.greenpool import GreenPool self.Pool = GreenPool self.getcurrent = greenthread.getcurrent self.getpid = lambda: id(greenthread.getcurrent()) self.spawn_n = greenthread.spawn_n super(TaskPool, self).__init__(*args, **kwargs)
def _dothrow(self, gt, cgt): #print 'throwing cancel from:%s to:%s current:%s' % (gt, cgt, # greenthread.getcurrent()) if isinstance(cgt, greenthread.GreenThread): cgt.kill(CancelOperation, None, None) else: hubs.get_hub().schedule_call_local(0, greenthread.getcurrent().switch) cgt.throw(CancelOperation())
def test_throw_is_immediate(): # we want to throw immediate so no exception thrown can interrupt us # outside the context manager's with block. from eventlet import greenthread w = EventWatch() w.event.send(None) w._watcher(greenthread.getcurrent()) # if the throw is immediate, we should never get here raise Exception('failed')
def do_listen(self, user, request, proc): username = user.username cgt = greenthread.getcurrent() proc() and proc().link(self._dothrow, cgt) self.add_to_lru(username) try: q = self.users[username] except KeyError, e: q = queue.Queue() self.users[username] = q
def switch(self, value=None): """Wake up the greenlet that is calling wait() currently (if there is one). Can only be called from Hub's greenlet. """ assert getcurrent() is get_hub().greenlet, "Can only use Waiter.switch method from the mainloop" if self.greenlet is not None: try: self.greenlet.switch(value) except: traceback.print_exc()
def throw(self, *throw_args): """Make greenlet calling wait() wake up (if there is a wait()). Can only be called from Hub's greenlet. """ assert getcurrent() is get_hub().greenlet, "Can only use Waiter.switch method from the mainloop" if self.greenlet is not None: try: self.greenlet.throw(*throw_args) except: traceback.print_exc()
def _set_user_auth_token(self): if not utils.vnc_api_is_authenticated(): return # forward user token to API server for RBAC # token saved earlier in the pipeline try: auth_token = greenthread.getcurrent().contrail_vars.token self._vnc_lib.set_auth_token(auth_token) except AttributeError: pass
def acquire(self, blocking=True, timeout=None): """Acquire a semaphore. When invoked without arguments: if the internal counter is larger than zero on entry, decrement it by one and return immediately. If it is zero on entry, block, waiting until some other thread has called release() to make it larger than zero. This is done with proper interlocking so that if multiple acquire() calls are blocked, release() will wake exactly one of them up. The implementation may pick one at random, so the order in which blocked threads are awakened should not be relied on. There is no return value in this case. When invoked with blocking set to true, do the same thing as when called without arguments, and return true. When invoked with blocking set to false, do not block. If a call without an argument would block, return false immediately; otherwise, do the same thing as when called without arguments, and return true. """ if not blocking and timeout is not None: raise ValueError("can't specify timeout for non-blocking acquire") if not blocking and self.locked(): return False current_thread = greenthread.getcurrent() if self.counter <= 0 or self._waiters: if current_thread not in self._waiters: self._waiters.append(current_thread) try: if timeout is not None: ok = False with Timeout(timeout, False): while self.counter <= 0: hubs.get_hub().switch() ok = True if not ok: return False else: # If someone else is already in this wait loop, give them # a chance to get out. while True: hubs.get_hub().switch() if self.counter > 0: break finally: try: self._waiters.remove(current_thread) except ValueError: # Fine if its already been dropped. pass self.counter -= 1 return True
def _set_user_auth_token(self): if not cfg.CONF.APISERVER.multi_tenancy: return # forward user token to API server for RBAC # token saved earlier in the pipeline try: auth_token = greenthread.getcurrent().contrail_vars.token self._vnc_lib.set_auth_token(auth_token) except AttributeError: pass
def _create_dns_entry(self): LOG.debug(_("%(gt)s: Creating dns entry for instance: %(id)s") % {'gt': greenthread.getcurrent(), 'id': self.id}) dns_support = CONF.trove_dns_support LOG.debug(_("trove dns support = %s") % dns_support) if dns_support: dns_client = create_dns_client(self.context) def get_server(): c_id = self.db_info.compute_instance_id return self.nova_client.servers.get(c_id) def ip_is_available(server): LOG.info(_("Polling for ip addresses: $%s ") % server.addresses) if server.addresses != {}: return True elif (server.addresses == {} and server.status != InstanceStatus.ERROR): return False elif (server.addresses == {} and server.status == InstanceStatus.ERROR): LOG.error(_("Instance IP not available, " "instance (%(instance)s): " "server had status (%(status)s).") % {'instance': self.id, 'status': server.status}) raise TroveError(status=server.status) utils.poll_until(get_server, ip_is_available, sleep_time=1, time_out=DNS_TIME_OUT) server = self.nova_client.servers.get( self.db_info.compute_instance_id) LOG.info(_("Creating dns entry...")) ip = get_ip_address(server.addresses) if not ip: raise TroveError('Error creating DNS. No IP available.') dns_client.create_instance_entry(self.id, ip.pop) else: LOG.debug(_("%(gt)s: DNS not enabled for instance: %(id)s") % {'gt': greenthread.getcurrent(), 'id': self.id})
def wrapper(self, method, context, *args, **kwargs): """Maps the respective manager method with a task counter.""" # TODO(rnirmal): Just adding a basic counter. Will revist and # re-implement when we have actual tasks. self.tasks[greenthread.getcurrent()] = context try: if not hasattr(self, method): raise AttributeError("No such RPC function '%s'" % method) func = getattr(self, method) LOG.info(str('*' * 80)) LOG.info("Running method %s..." % method) LOG.info(str('*' * 80)) result = func(context, *args, **kwargs) LOG.info("Finished method %s." % method) return result except Exception as e: LOG.error("Got an error running %s!" % method) LOG.error(traceback.format_exc()) finally: LOG.info(str('-' * 80)) del self.tasks[greenthread.getcurrent()]
def _set_user_auth_token(self): api_server = self.api_servers.get() if not utils.vnc_api_is_authenticated(api_server): return # forward user token to API server for RBAC # token saved earlier in the pipeline try: auth_token = greenthread.getcurrent().contrail_vars.token self._vnc_lib.set_auth_token(auth_token) except AttributeError: pass
def put(self, item, block=True, timeout=None): """Put an item into the queue. If optional arg *block* is true and *timeout* is ``None`` (the default), block if necessary until a free slot is available. If *timeout* is a positive number, it blocks at most *timeout* seconds and raises the :class:`Full` exception if no free slot was available within that time. Otherwise (*block* is false), put an item on the queue if a free slot is immediately available, else raise the :class:`Full` exception (*timeout* is ignored in that case). """ if self.maxsize is None or self.qsize() < self.maxsize: # there's a free slot, put an item right away self._put(item) if self.getters: self._schedule_unlock() elif not block and get_hub().greenlet is getcurrent(): # we're in the mainloop, so we cannot wait; we can switch() to other greenlets though # find a getter and deliver an item to it while self.getters: getter = self.getters.pop() if getter: self._put(item) item = self._get() getter.switch(item) return raise Full elif block: waiter = ItemWaiter(item, block) self.putters.add(waiter) timeout = Timeout(timeout, Full) try: if self.getters: self._schedule_unlock() result = waiter.wait() assert result is waiter, "Invalid switch into Queue.put: %r" % ( result, ) if waiter.item is not _NONE: self._put(item) finally: timeout.cancel() self.putters.discard(waiter) elif self.getters: waiter = ItemWaiter(item, block) self.putters.add(waiter) self._schedule_unlock() result = waiter.wait() assert result is waiter, "Invalid switch into Queue.put: %r" % ( result, ) if waiter.item is not _NONE: raise Full else: raise Full
def select(read_list, write_list, error_list, timeout=None): # error checking like this is required by the stdlib unit tests if timeout is not None: try: timeout = float(timeout) except ValueError: raise TypeError("Expected number for timeout") hub = get_hub() t = None current = getcurrent() assert hub.greenlet is not current, 'do not call blocking functions from the mainloop' ds = {} for r in read_list: ds[get_fileno(r)] = {'read' : r} for w in write_list: ds.setdefault(get_fileno(w), {})['write'] = w for e in error_list: ds.setdefault(get_fileno(e), {})['error'] = e listeners = [] def on_read(d): original = ds[get_fileno(d)]['read'] current.switch(([original], [], [])) def on_write(d): original = ds[get_fileno(d)]['write'] current.switch(([], [original], [])) def on_error(d, _err=None): original = ds[get_fileno(d)]['error'] current.switch(([], [], [original])) def on_timeout(): current.switch(([], [], [])) if timeout is not None: t = hub.schedule_call_global(timeout, on_timeout) try: for k, v in ds.iteritems(): if v.get('read'): listeners.append(hub.add(hub.READ, k, on_read)) if v.get('write'): listeners.append(hub.add(hub.WRITE, k, on_write)) try: return hub.switch() finally: for l in listeners: hub.remove(l) finally: if t is not None: t.cancel()
def select(read_list, write_list, error_list, timeout=None): # error checking like this is required by the stdlib unit tests if timeout is not None: try: timeout = float(timeout) except ValueError: raise TypeError("Expected number for timeout") hub = get_hub() t = None current = getcurrent() assert hub.greenlet is not current, 'do not call blocking functions from the mainloop' ds = {} for r in read_list: ds[get_fileno(r)] = {'read': r} for w in write_list: ds.setdefault(get_fileno(w), {})['write'] = w for e in error_list: ds.setdefault(get_fileno(e), {})['error'] = e listeners = [] def on_read(d): original = ds[get_fileno(d)]['read'] current.switch(([original], [], [])) def on_write(d): original = ds[get_fileno(d)]['write'] current.switch(([], [original], [])) def on_error(d, _err=None): original = ds[get_fileno(d)]['error'] current.switch(([], [], [original])) def on_timeout(): current.switch(([], [], [])) if timeout is not None: t = hub.schedule_call_global(timeout, on_timeout) try: for k, v in ds.iteritems(): if v.get('read'): listeners.append(hub.add(hub.READ, k, on_read)) if v.get('write'): listeners.append(hub.add(hub.WRITE, k, on_write)) try: return hub.switch() finally: for l in listeners: hub.remove(l) finally: if t is not None: t.cancel()
def _create_dns_entry(self): LOG.debug("%s: Creating dns entry for instance: %s" % (greenthread.getcurrent(), self.id)) dns_support = CONF.reddwarf_dns_support LOG.debug(_("reddwarf dns support = %s") % dns_support) if dns_support: nova_client = create_nova_client(self.context) dns_client = create_dns_client(self.context) def get_server(): c_id = self.db_info.compute_instance_id return nova_client.servers.get(c_id) def ip_is_available(server): LOG.info("Polling for ip addresses: $%s " % server.addresses) if server.addresses != {}: return True elif (server.addresses == {} and server.status != InstanceStatus.ERROR): return False elif (server.addresses == {} and server.status == InstanceStatus.ERROR): msg = _("Instance IP not available, instance (%s): " "server had status (%s).") LOG.error(msg % (self.id, server.status)) raise ReddwarfError(status=server.status) poll_until(get_server, ip_is_available, sleep_time=1, time_out=DNS_TIME_OUT) server = nova_client.servers.get(self.db_info.compute_instance_id) LOG.info("Creating dns entry...") dns_client.create_instance_entry(self.id, get_ip_address(server.addresses)) else: LOG.debug("%s: DNS not enabled for instance: %s" % (greenthread.getcurrent(), self.id))
def _patch(thrl): greens = object.__getattribute__(thrl, '_local__greens') # until we can store the localdict on greenlets themselves, # we store it in _local__greens on the local object cur = greenthread.getcurrent() if cur not in greens: # must be the first time we've seen this greenlet, call __init__ greens[cur] = {} cls = type(thrl) if cls.__init__ is not object.__init__: args, kw = object.__getattribute__(thrl, '_local__args') thrl.__init__(*args, **kw) object.__setattr__(thrl, '__dict__', greens[cur])
def _request_api_server_authn(self, url, data=None, headers=None): # forward user token to API server for RBAC # token saved earlier in the pipeline try: auth_token = getcurrent().contrail_vars.token except AttributeError: auth_token = None authn_headers = headers or {} if auth_token or self._authn_token: authn_headers['X-AUTH-TOKEN'] = auth_token or self._authn_token response = self._request_api_server(url, data, headers=authn_headers) return response
def wait(self): for x in self.timers: try: x.wait() except Exception as ex: LOG.exception(ex) current = greenthread.getcurrent() for x in self.threads: if x is current: continue try: x.wait() except Exception as ex: LOG.exception(ex)
def stop(self): """To stop the thread """ current = greenthread.getcurrent() # Make a copy for x in self.threads[:]: if x is current: # Skipping the current thread continue try: x.stop() except Exception as ex: message = "Exception - %s" % (ex) LOG.exception(message)
def _spawn_n_impl(self, func, args, kwargs, coro): try: try: func(*args, **kwargs) except (KeyboardInterrupt, SystemExit, greenlet.GreenletExit): raise except: if DEBUG: traceback.print_exc() finally: if coro is None: return else: coro = greenthread.getcurrent() self._spawn_done(coro)
def wait(self): """Wait for the thread """ current = greenthread.getcurrent() # Make a copy for x in self.threads[:]: if x is current: continue try: x.wait() except eventlet.greenlet.GreenletExit: pass except Exception as ex: message = "Unexpected exception - %r" % (ex) LOG.error(message)
def stop(self): current = greenthread.getcurrent() for x in self.threads: if x is current: # don't kill the current thread. continue try: x.stop() except Exception as ex: LOG.exception(ex) for x in self.timers: try: x.stop() except Exception as ex: LOG.exception(ex) self.timers = []