def migrate_disk_and_power_off(self, context, instance, dest, flavor, network_info, block_device_info=None, timeout=0, retry_interval=0): LOG.debug("migrate_disk_and_power_off called", instance=instance) try: self.utils.container_stop(instance.name, instance.host) container_ws = self.utils.container_migrate(instance.name, instance) container_config = ( self.config.configure_container_migrate( instance, container_ws)) utils.spawn( self.utils.container_init, container_config, instance, dest) except Exception as ex: with excutils.save_and_reraise_exception(): LOG.exception(_LE('Failed to migration container: %(e)s'), {'e': ex}, instance=instance) # disk_info is not used disk_info = {} return disk_info
def migrate_disk_and_power_off(self, context, instance, dest, flavor, network_info, block_device_info=None, timeout=0, retry_interval=0): LOG.debug("migrate_disk_and_power_off called", instance=instance) try: self.utils.container_stop(instance.name, instance.host) container_ws = self.utils.container_migrate( instance.name, instance) container_config = (self.config.configure_container_migrate( instance, container_ws)) utils.spawn(self.utils.container_init, container_config, instance, dest) except Exception as ex: with excutils.save_and_reraise_exception(): LOG.exception(_LE('Failed to migration container: %(e)s'), {'e': ex}, instance=instance) # disk_info is not used disk_info = {} return disk_info
def initialize(self): # NOTE(dkliban): Error handler needs to be registered before libvirt # connection is used for the first time. Otherwise, the # handler does not get registered. libvirt.registerErrorHandler(self._libvirt_error_handler, None) libvirt.virEventRegisterDefaultImpl() self._init_events() LOG.debug("Starting connection event dispatch thread") utils.spawn(self._conn_event_thread) self._initialized = True
def start(self): self.done = event.Event() def _inner(): """Function to do the image data transfer through an update and thereon checks if the state is 'active'. """ try: IMAGE_API.update(self.context, self.image_id, self.image_meta, data=self.input) self._running = True except exception.ImageNotAuthorized as exc: self.done.send_exception(exc) while self._running: try: image_meta = IMAGE_API.get(self.context, self.image_id) image_status = image_meta.get("status") if image_status == "active": self.stop() self.done.send(True) # If the state is killed, then raise an exception. elif image_status == "killed": self.stop() msg = (_("Glance image %s is in killed state") % self.image_id) LOG.error(msg) self.done.send_exception(exception.NovaException(msg)) elif image_status in ["saving", "queued"]: greenthread.sleep(GLANCE_POLL_INTERVAL) else: self.stop() msg = _("Glance image " "%(image_id)s is in unknown state " "- %(state)s") % { "image_id": self.image_id, "state": image_status} LOG.error(msg) self.done.send_exception(exception.NovaException(msg)) except Exception as exc: self.stop() self.done.send_exception(exc) utils.spawn(_inner) return self.done
def start(self): self.done = event.Event() def _inner(): """Function to do the image data transfer through an update and thereon checks if the state is 'active'. """ try: IMAGE_API.update(self.context, self.image_id, self.image_meta, data=self.input) self._running = True except exception.ImageNotAuthorized as exc: self.done.send_exception(exc) while self._running: try: image_meta = IMAGE_API.get(self.context, self.image_id) image_status = image_meta.get("status") if image_status == "active": self.stop() self.done.send(True) # If the state is killed, then raise an exception. elif image_status == "killed": self.stop() msg = (_("Glance image %s is in killed state") % self.image_id) LOG.error(msg) self.done.send_exception(exception.NovaException(msg)) elif image_status in ["saving", "queued"]: greenthread.sleep(GLANCE_POLL_INTERVAL) else: self.stop() msg = _("Glance image " "%(image_id)s is in unknown state " "- %(state)s") % { "image_id": self.image_id, "state": image_status } LOG.error(msg) self.done.send_exception(exception.NovaException(msg)) except Exception as exc: self.stop() self.done.send_exception(exc) utils.spawn(_inner) return self.done
def scatter_gather_cells(context, cell_mappings, timeout, fn, *args, **kwargs): """Target cells in parallel and return their results. The first parameter in the signature of the function to call for each cell should be of type RequestContext. :param context: The RequestContext for querying cells :param cell_mappings: The CellMappings to target in parallel :param timeout: The total time in seconds to wait for all the results to be gathered :param fn: The function to call for each cell :param args: The args for the function to call for each cell, not including the RequestContext :param kwargs: The kwargs for the function to call for each cell :returns: A dict {cell_uuid: result} containing the joined results. The did_not_respond_sentinel will be returned if a cell did not respond within the timeout. The raised_exception_sentinel will be returned if the call to a cell raised an exception. The exception will be logged. """ greenthreads = [] queue = eventlet.queue.LightQueue() results = {} def gather_result(cell_uuid, fn, *args, **kwargs): try: result = fn(*args, **kwargs) except Exception: LOG.exception('Error gathering result from cell %s', cell_uuid) result = raised_exception_sentinel # The queue is already synchronized. queue.put((cell_uuid, result)) for cell_mapping in cell_mappings: with target_cell(context, cell_mapping) as cctxt: greenthreads.append((cell_mapping.uuid, utils.spawn(gather_result, cell_mapping.uuid, fn, cctxt, *args, **kwargs))) with eventlet.timeout.Timeout(timeout, exception.CellTimeout): try: while len(results) != len(greenthreads): cell_uuid, result = queue.get() results[cell_uuid] = result except exception.CellTimeout: # NOTE(melwitt): We'll fill in did_not_respond_sentinels at the # same time we kill/wait for the green threads. pass # Kill the green threads still pending and wait on those we know are done. for cell_uuid, greenthread in greenthreads: if cell_uuid not in results: greenthread.kill() results[cell_uuid] = did_not_respond_sentinel LOG.warning('Timed out waiting for response from cell %s', cell_uuid) else: greenthread.wait() return results
def __init__(self, async_method, *args, **kwargs): self._gt = utils.spawn(async_method, *args, **kwargs) methods = ['json', 'fixed_ips', 'floating_ips'] for method in methods: fn = getattr(self, method) wrapper = functools.partial(self._sync_wrapper, fn) functools.update_wrapper(wrapper, fn) setattr(self, method, wrapper)
def _init_events(self): """Initializes the libvirt events subsystem. This requires running a native thread to provide the libvirt event loop integration. This forwards events to a green thread which does the actual dispatching. """ self._init_events_pipe() LOG.debug("Starting native event thread") self._event_thread = native_threading.Thread(target=self._native_thread) self._event_thread.setDaemon(True) self._event_thread.start() LOG.debug("Starting green dispatch thread") utils.spawn(self._dispatch_thread)
def _init_events(self): """Initializes the libvirt events subsystem. This requires running a native thread to provide the libvirt event loop integration. This forwards events to a green thread which does the actual dispatching. """ self._init_events_pipe() LOG.debug("Starting native event thread") self._event_thread = native_threading.Thread( target=self._native_thread) self._event_thread.setDaemon(True) self._event_thread.start() LOG.debug("Starting green dispatch thread") utils.spawn(self._dispatch_thread)
def __init__(self, async_method, *args, **kwargs): super(NetworkInfoAsyncWrapper, self).__init__() self._gt = utils.spawn(async_method, *args, **kwargs) methods = ["json", "fixed_ips", "floating_ips"] for method in methods: fn = getattr(self, method) wrapper = functools.partial(self._sync_wrapper, fn) functools.update_wrapper(wrapper, fn) setattr(self, method, wrapper)
def test_spawn_has_link(self): self.useFixture(fixtures.SpawnIsSynchronousFixture()) gt = utils.spawn(mock.MagicMock) passed_arg = 'test' call_count = [] def fake(thread, param): self.assertEqual(gt, thread) self.assertEqual(passed_arg, param) call_count.append(1) gt.link(fake, passed_arg) self.assertEqual(1, len(call_count))
def proxy_connection(self, req, connect_info, start_response): """Spawn bi-directional vnc proxy.""" sockets = {} t0 = utils.spawn(self.handshake, req, connect_info, sockets) t0.wait() if not sockets.get("client") or not sockets.get("server"): LOG.info(_LI("Invalid request: %s"), req) start_response("400 Invalid Request", [("content-type", "text/html")]) return "Invalid Request" client = sockets["client"] server = sockets["server"] t1 = utils.spawn(self.one_way_proxy, client, server) t2 = utils.spawn(self.one_way_proxy, server, client) t1.wait() t2.wait() # Make sure our sockets are closed server.close() client.close()
def start(self): self.done = event.Event() def _inner(): """Read data from the input and write the same to the output until the transfer completes. """ self._running = True while self._running: try: data = self.input.read(CHUNK_SIZE) if not data: self.stop() self.done.send(True) self.output.write(data) greenthread.sleep(IO_THREAD_SLEEP_TIME) except Exception as exc: self.stop() LOG.exception(_LE('Read/Write data failed')) self.done.send_exception(exc) utils.spawn(_inner) return self.done
def start(self): self.done = event.Event() def _inner(): """Read data from the input and write the same to the output until the transfer completes. """ self._running = True while self._running: try: data = self.input.read(None) if not data: self.stop() self.done.send(True) self.output.write(data) greenthread.sleep(IO_THREAD_SLEEP_TIME) except Exception as exc: self.stop() LOG.exception(_LE('Read/Write data failed')) self.done.send_exception(exc) utils.spawn(_inner) return self.done
def proxy_connection(self, req, connect_info, start_response): """Spawn bi-directional vnc proxy.""" sockets = {} t0 = utils.spawn(self.handshake, req, connect_info, sockets) t0.wait() if not sockets.get('client') or not sockets.get('server'): LOG.info(_LI("Invalid request: %s"), req) start_response('400 Invalid Request', [('content-type', 'text/html')]) return "Invalid Request" client = sockets['client'] server = sockets['server'] t1 = utils.spawn(self.one_way_proxy, client, server) t2 = utils.spawn(self.one_way_proxy, server, client) t1.wait() t2.wait() # Make sure our sockets are closed server.close() client.close()
def __init__(self, store_api, adapter, host_uuid): """Create the manager. :param store_api: the NvramStore api to use. :param adapter: pypowervm Adapter :param host_uuid: powervm host uuid string """ super(NvramManager, self).__init__() self._api = store_api self._adapter = adapter self._host_uuid = host_uuid self._update_list = {} self._queue = eventlet.queue.LightQueue() self._shutdown = False self._update_thread = n_utils.spawn(self._update_thread) LOG.debug('NVRAM store manager started.')
def test_spawn_return_has_wait(self): self.useFixture(fixtures.SpawnIsSynchronousFixture()) gt = utils.spawn(lambda x: '%s' % x, 'foo') foo = gt.wait() self.assertEqual('foo', foo)
def start(self): """Start serving a WSGI application. :returns: None """ # The server socket object will be closed after server exits, # but the underlying file descriptor will remain open, and will # give bad file descriptor error. So duplicating the socket object, # to keep file descriptor usable. dup_socket = self._socket.dup() dup_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # sockets can hang around forever without keepalive dup_socket.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) # This option isn't available in the OS X version of eventlet if hasattr(socket, 'TCP_KEEPIDLE'): dup_socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, CONF.tcp_keepidle) if self._use_ssl: try: ca_file = CONF.ssl_ca_file cert_file = CONF.ssl_cert_file key_file = CONF.ssl_key_file if cert_file and not os.path.exists(cert_file): raise RuntimeError( _("Unable to find cert_file : %s") % cert_file) if ca_file and not os.path.exists(ca_file): raise RuntimeError( _("Unable to find ca_file : %s") % ca_file) if key_file and not os.path.exists(key_file): raise RuntimeError( _("Unable to find key_file : %s") % key_file) if self._use_ssl and (not cert_file or not key_file): raise RuntimeError( _("When running server in SSL mode, you must " "specify both a cert_file and key_file " "option value in your configuration file")) ssl_kwargs = { 'server_side': True, 'certfile': cert_file, 'keyfile': key_file, 'cert_reqs': ssl.CERT_NONE, } if CONF.ssl_ca_file: ssl_kwargs['ca_certs'] = ca_file ssl_kwargs['cert_reqs'] = ssl.CERT_REQUIRED dup_socket = eventlet.wrap_ssl(dup_socket, **ssl_kwargs) except Exception: with excutils.save_and_reraise_exception(): LOG.error( _LE("Failed to start %(name)s on %(host)s" ":%(port)s with SSL support"), { 'name': self.name, 'host': self.host, 'port': self.port }) wsgi_kwargs = { 'func': eventlet.wsgi.server, 'sock': dup_socket, 'site': self.app, 'protocol': self._protocol, 'custom_pool': self._pool, 'log': self._logger, 'log_format': CONF.wsgi_log_format, 'debug': False, 'keepalive': CONF.wsgi_keep_alive, 'socket_timeout': self.client_socket_timeout } if self._max_url_len: wsgi_kwargs['url_length_limit'] = self._max_url_len self._server = utils.spawn(**wsgi_kwargs)
def start(self): """Start serving a WSGI application. :returns: None """ # The server socket object will be closed after server exits, # but the underlying file descriptor will remain open, and will # give bad file descriptor error. So duplicating the socket object, # to keep file descriptor usable. dup_socket = self._socket.dup() dup_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # sockets can hang around forever without keepalive dup_socket.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) # This option isn't available in the OS X version of eventlet if hasattr(socket, 'TCP_KEEPIDLE'): dup_socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, CONF.wsgi.tcp_keepidle) if self._use_ssl: try: ca_file = CONF.wsgi.ssl_ca_file cert_file = CONF.wsgi.ssl_cert_file key_file = CONF.wsgi.ssl_key_file if cert_file and not os.path.exists(cert_file): raise RuntimeError( _("Unable to find cert_file : %s") % cert_file) if ca_file and not os.path.exists(ca_file): raise RuntimeError( _("Unable to find ca_file : %s") % ca_file) if key_file and not os.path.exists(key_file): raise RuntimeError( _("Unable to find key_file : %s") % key_file) if self._use_ssl and (not cert_file or not key_file): raise RuntimeError( _("When running server in SSL mode, you must " "specify both a cert_file and key_file " "option value in your configuration file")) ssl_kwargs = { 'server_side': True, 'certfile': cert_file, 'keyfile': key_file, 'cert_reqs': ssl.CERT_NONE, } if CONF.wsgi.ssl_ca_file: ssl_kwargs['ca_certs'] = ca_file ssl_kwargs['cert_reqs'] = ssl.CERT_REQUIRED dup_socket = eventlet.wrap_ssl(dup_socket, **ssl_kwargs) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE("Failed to start %(name)s on %(host)s" ":%(port)s with SSL support"), {'name': self.name, 'host': self.host, 'port': self.port}) wsgi_kwargs = { 'func': eventlet.wsgi.server, 'sock': dup_socket, 'site': self.app, 'protocol': self._protocol, 'custom_pool': self._pool, 'log': self._logger, 'log_format': CONF.wsgi.wsgi_log_format, 'debug': False, 'keepalive': CONF.wsgi.keep_alive, 'socket_timeout': self.client_socket_timeout } if self._max_url_len: wsgi_kwargs['url_length_limit'] = self._max_url_len self._server = utils.spawn(**wsgi_kwargs)