def test_nova_resize_timeout(self): self._stop_mysql() self.server.resize(NEW_FLAVOR_ID) self.mock.StubOutWithMock(utils, 'poll_until') utils.poll_until(mox.IgnoreArg(), sleep_time=2, time_out=120)\ .AndRaise(PollTimeOut)
def reboot(self): try: LOG.debug("Instance %s calling stop_mysql..." % self.id) self.guest.stop_mysql() LOG.debug("Rebooting instance %s" % self.id) self.server.reboot() # Poll nova until instance is active reboot_time_out = int(config.Config.get("reboot_time_out", 60 * 2)) def update_server_info(): self._refresh_compute_server_info() return self.server.status == 'ACTIVE' utils.poll_until( update_server_info, sleep_time=2, time_out=reboot_time_out) # Set the status to PAUSED. The guest agent will reset the status # when the reboot completes and MySQL is running. status = InstanceServiceStatus.find_by(instance_id=self.id) status.set_status(inst_models.ServiceStatuses.PAUSED) status.save() LOG.debug("Successfully rebooted instance %s" % self.id) except Exception, e: LOG.error("Failed to reboot instance %s: %s" % (self.id, str(e)))
def resize_volume(self, new_size): old_volume_size = self.volume_size new_size = int(new_size) LOG.debug("%s: Resizing volume for instance: %s from %s to %r GB" % (greenthread.getcurrent(), self.server.id, old_volume_size, new_size)) self.volume_client.volumes.resize(self.volume_id, new_size) try: utils.poll_until( lambda: self.volume_client.volumes.get(self.volume_id), lambda volume: volume.status == 'in-use', sleep_time=2, time_out=CONF.volume_time_out) volume = self.volume_client.volumes.get(self.volume_id) self.update_db(volume_size=volume.size) self.nova_client.volumes.rescan_server_volume( self.server, self.volume_id) self.send_usage_event('modify_volume', old_volume_size=old_volume_size, launched_at=timeutils.isotime(), modify_at=timeutils.isotime(), volume_size=new_size) except PollTimeOut as pto: LOG.error("Timeout trying to rescan or resize the attached volume " "filesystem for volume: %s" % self.volume_id) except Exception as e: LOG.error(e) LOG.error("Error encountered trying to rescan or resize the " "attached volume filesystem for volume: %s" % self.volume_id) finally: self.update_db(task_status=inst_models.InstanceTasks.NONE)
def _create_dns_entry(self): LOG.debug("%s: Creating dns entry for instance: %s" % (greenthread.getcurrent(), self.id)) dns_client = create_dns_client(self.context) dns_support = config.Config.get("reddwarf_dns_support", 'False') LOG.debug(_("reddwarf dns support = %s") % dns_support) nova_client = create_nova_client(self.context) if utils.bool_from_string(dns_support): def get_server(): c_id = self.db_info.compute_instance_id return nova_client.servers.get(c_id) def ip_is_available(server): LOG.info("Polling for ip addresses: $%s " % server.addresses) if server.addresses != {}: return True elif server.addresses == {} and\ server.status != InstanceStatus.ERROR: return False elif server.addresses == {} and\ server.status == InstanceStatus.ERROR: LOG.error(_("Instance IP not available, instance (%s): " "server had status (%s).") % (self.id, server.status)) raise ReddwarfError(status=server.status) poll_until(get_server, ip_is_available, sleep_time=1, time_out=60 * 2) server = nova_client.servers.get(self.db_info.compute_instance_id) LOG.info("Creating dns entry...") dns_client.create_instance_entry(self.id, get_ip_address(server.addresses))
def _spawn_with_init_file(self, temp_file): child = pexpect.spawn("sudo mysqld_safe --init-file=%s" % temp_file.name) try: i = child.expect(['Starting mysqld daemon']) if i == 0: LOG.info("Starting mysqld daemon") except pexpect.TIMEOUT as e: LOG.error("wait_and_close_proc failed: %s" % e) finally: try: # There is a race condition here where we kill mysqld before # the init file been executed. We need to ensure mysqld is up. utils.poll_until(mysql_is_running, sleep_time=RESET_ROOT_SLEEP_INTERVAL, time_out=RESET_ROOT_RETRY_TIMEOUT) except exception.PollTimeOut: raise RestoreError("Reset root password failed: " "mysqld did not start!") LOG.info("Root password reset successfully!") LOG.info("Cleaning up the temp mysqld process...") child.delayafterclose = 1 child.delayafterterminate = 1 child.close(force=True) utils.execute_with_timeout("sudo", "killall", "mysqld")
def _delete_resources(self): try: self.server.delete() except Exception as ex: LOG.error("Error during delete compute server %s " % self.server.id) LOG.error(ex) try: dns_support = config.Config.get("reddwarf_dns_support", 'False') LOG.debug(_("reddwarf dns support = %s") % dns_support) if utils.bool_from_string(dns_support): dns_api = create_dns_client(self.context) dns_api.delete_instance_entry(instance_id=self.db_info.id) except Exception as ex: LOG.error("Error during dns entry for instance %s " % self.db_info.id) LOG.error(ex) # Poll until the server is gone. def server_is_finished(): try: server_id = self.db_info.compute_instance_id server = self.nova_client.servers.get(server_id) if server.status not in ['SHUTDOWN', 'ACTIVE']: msg = "Server %s got into ERROR status during delete " \ "of instance %s!" % (server.id, self.id) LOG.error(msg) return False except nova_exceptions.NotFound: return True poll_until(server_is_finished, sleep_time=2, time_out=int(config.Config.get('server_delete_time_out')))
def reboot(self): try: LOG.debug("Instance %s calling stop_mysql..." % self.id) self.guest.stop_mysql() LOG.debug("Rebooting instance %s" % self.id) self.server.reboot() # Poll nova until instance is active reboot_time_out = int(config.Config.get("reboot_time_out", 60 * 2)) def update_server_info(): self._refresh_compute_server_info() return self.server.status == 'ACTIVE' utils.poll_until(update_server_info, sleep_time=2, time_out=reboot_time_out) # Set the status to PAUSED. The guest agent will reset the status # when the reboot completes and MySQL is running. status = InstanceServiceStatus.find_by(instance_id=self.id) status.set_status(inst_models.ServiceStatuses.PAUSED) status.save() LOG.debug("Successfully rebooted instance %s" % self.id) except Exception, e: LOG.error("Failed to reboot instance %s: %s" % (self.id, str(e)))
def delete_instance(self): try: self.server.delete() except Exception as ex: LOG.error("Error during delete compute server %s " % self.server.id) LOG.error(ex) try: dns_support = config.Config.get("reddwarf_dns_support", "False") LOG.debug(_("reddwarf dns support = %s") % dns_support) if utils.bool_from_string(dns_support): dns_api = create_dns_client(self.context) dns_api.delete_instance_entry(instance_id=self.db_info.id) except Exception as ex: LOG.error("Error during dns entry for instance %s " % self.db_info.id) LOG.error(ex) # Poll until the server is gone. def server_is_finished(): try: server_id = self.db_info.compute_instance_id server = self.nova_client.servers.get(server_id) if server.status not in ["SHUTDOWN", "ACTIVE"]: msg = "Server %s got into ERROR status during delete " "of instance %s!" % (server.id, self.id) LOG.error(msg) return False except nova_exceptions.NotFound: return True poll_until(server_is_finished, sleep_time=2, time_out=int(config.Config.get("server_delete_time_out"))) # If time out occurs, the instance task is stuck in DELETING. LOG.debug("Setting instance %s to deleted..." % self.id) # Delete guest queue. guest = self.get_guest() guest.delete_queue() self.update_db(task_status=InstanceTasks.NONE) self.update_db(deleted=True, deleted_at=datetime.now())
def delete(cls, credential, region, volume_id): client = cls.get_client(credential, region) # Poll until the volume is attached. def volume_is_detached(): try: volume = client.volumes.get(volume_id) if volume.status != 'in-use': return True else: return False except nova_exceptions.ClientException as e: LOG.debug(e) return False try: # Wait until volume is detached, before issuing delete utils.poll_until(volume_is_detached, sleep_time=2, time_out=int(config.Config.get('volume_detach_time_out', 30))) except rd_exceptions.PollTimeOut as pto: LOG.error("Timeout waiting for volume to detach: %s" % volume_id) # Failed waiting for volume to detach, attempt to delete anyway try: client.volumes.delete(volume_id) except nova_exceptions.NotFound, e: raise rd_exceptions.NotFound(uuid=volume_id) except nova_exceptions.ClientException, e: raise rd_exceptions.VolumeDeletionFailure(str(pto))
def resize_volume(self, new_size): LOG.debug("%s: Resizing volume for instance: %s to %r GB" % (greenthread.getcurrent(), self.server.id, new_size)) self.volume_client.volumes.resize(self.volume_id, int(new_size)) try: utils.poll_until( lambda: self.volume_client.volumes.get(self.volume_id), lambda volume: volume.status == 'in-use', sleep_time=2, time_out=CONF.volume_time_out) volume = self.volume_client.volumes.get(self.volume_id) self.update_db(volume_size=volume.size) self.nova_client.volumes.rescan_server_volume(self.server, self.volume_id) self.guest.resize_fs(self.get_volume_mountpoint()) except PollTimeOut as pto: LOG.error("Timeout trying to rescan or resize the attached volume " "filesystem for volume: %s" % self.volume_id) except Exception as e: LOG.error(e) LOG.error("Error encountered trying to rescan or resize the " "attached volume filesystem for volume: %s" % self.volume_id) finally: self.update_db(task_status=inst_models.InstanceTasks.NONE)
def poll_until_then_raise(event, exception): try: utils.poll_until(event, sleep_time=RESET_ROOT_SLEEP_INTERVAL, time_out=RESET_ROOT_RETRY_TIMEOUT) except exception.PollTimeOut: raise exception
def test_revert_nova_fails(self): self._stop_mysql() self._nova_resizes_successfully() self.instance._set_service_status_to_paused() self.instance.service_status = ServiceStatuses.PAUSED utils.poll_until(mox.IgnoreArg(), sleep_time=2, time_out=120).AndRaise(PollTimeOut) self.instance.server.revert_resize() self._server_changes_to("ERROR", OLD_FLAVOR_ID)
def _assert_guest_is_ok(self): # The guest will never set the status to PAUSED. self.instance._set_service_status_to_paused() # Now we wait until it sets it to anything at all, # so we know it's alive. utils.poll_until(self._guest_is_awake, sleep_time=2, time_out=RESIZE_TIME_OUT)
def _wait_for_revert_nova_action(self): # Wait for the server to return to ACTIVE after revert. def update_server_info(): self.instance._refresh_compute_server_info() return self.instance.server.status == 'ACTIVE' utils.poll_until( update_server_info, sleep_time=2, time_out=REVERT_TIME_OUT)
def _wait_for_revert_nova_action(self): # Wait for the server to return to ACTIVE after revert. def update_server_info(): self.instance._refresh_compute_server_info() return self.instance.server.status == 'ACTIVE' utils.poll_until(update_server_info, sleep_time=2, time_out=REVERT_TIME_OUT)
def _wait_for_nova_action(self): # Wait for the flavor to change. def update_server_info(): self.instance._refresh_compute_server_info() return self.instance.server.status != 'RESIZE' utils.poll_until(update_server_info, sleep_time=2, time_out=RESIZE_TIME_OUT)
def resize_flavor(self, new_flavor_id, old_memory_size, new_memory_size): def resize_status_msg(): return "instance_id=%s, status=%s, flavor_id=%s, " "dest. flavor id=%s)" % ( self.db_info.id, self.server.status, str(self.flavor["id"]), str(new_flavor_id), ) try: LOG.debug("Instance %s calling stop_mysql..." % self.db_info.id) self.guest.stop_mysql() try: LOG.debug("Instance %s calling Compute resize..." % self.db_info.id) self.server.resize(new_flavor_id) # Do initial check and confirm the status is appropriate. self._refresh_compute_server_info() if self.server.status != "RESIZE" and self.server.status != "VERIFY_RESIZE": msg = "Unexpected status after call to resize! : %s" raise ReddwarfError(msg % resize_status_msg()) # Wait for the flavor to change. def update_server_info(): self._refresh_compute_server_info() return self.server.status != "RESIZE" utils.poll_until(update_server_info, sleep_time=2, time_out=60 * 2) # Do check to make sure the status and flavor id are correct. if str(self.server.flavor["id"]) != str(new_flavor_id) or self.server.status != "VERIFY_RESIZE": msg = "Assertion failed! flavor_id=%s and not %s" actual_flavor = self.server.flavor["id"] expected_flavor = new_flavor_id raise ReddwarfError(msg % (actual_flavor, expected_flavor)) # Confirm the resize with Nova. LOG.debug("Instance %s calling Compute confirm resize..." % self.db_info.id) self.server.confirm_resize() # Record the new flavor_id in our database. LOG.debug("Updating instance %s to flavor_id %s." % (self.id, new_flavor_id)) self.update_db(flavor_id=new_flavor_id) except PollTimeOut as pto: LOG.error("Timeout trying to resize the flavor for instance " " %s" % self.db_info.id) except Exception as ex: new_memory_size = old_memory_size LOG.error("Error during resize compute! Aborting action.") LOG.error(ex) finally: # Tell the guest to restart MySQL with the new RAM size. # This is in the finally because we have to call this, or # else MySQL could stay turned off on an otherwise usable # instance. LOG.debug("Instance %s starting mysql..." % self.db_info.id) self.guest.start_mysql_with_conf_changes(new_memory_size) finally: self.update_db(task_status=inst_models.InstanceTasks.NONE)
def test_successful_migrate(self): self._stop_mysql() self.server.migrate() self._server_changes_to("VERIFY_RESIZE", NEW_FLAVOR_ID) self.instance._set_service_status_to_paused() self.instance.service_status = ServiceStatuses.RUNNING utils.poll_until(mox.IgnoreArg(), sleep_time=2, time_out=120) self._start_mysql() self.instance.server.confirm_resize()
def test_confirm_resize_fails(self): self._stop_mysql() self._nova_resizes_successfully() self.instance._set_service_status_to_paused() self.instance.service_status = ServiceStatuses.RUNNING utils.poll_until(mox.IgnoreArg(), sleep_time=2, time_out=120) self._start_mysql() self.server.status = "SHUTDOWN" self.instance.server.confirm_resize()
def _wait_for_nova_action(self): # Wait for the flavor to change. def update_server_info(): self.instance._refresh_compute_server_info() return self.instance.server.status != 'RESIZE' utils.poll_until( update_server_info, sleep_time=2, time_out=RESIZE_TIME_OUT)
def _create_volume(self, volume_size): LOG.info("Entering create_volume") LOG.debug(_("Starting to create the volume for the instance")) volume_support = config.Config.get("reddwarf_volume_support", 'False') LOG.debug(_("reddwarf volume support = %s") % volume_support) if (volume_size is None or utils.bool_from_string(volume_support) is False): volume_info = { 'block_device': None, 'device_path': None, 'mount_point': None, 'volumes': None, } return volume_info volume_client = create_nova_volume_client(self.context) volume_desc = ("mysql volume for %s" % self.id) volume_ref = volume_client.volumes.create( volume_size, display_name="mysql-%s" % self.id, display_description=volume_desc) # Record the volume ID in case something goes wrong. self.update_db(volume_id=volume_ref.id) utils.poll_until( lambda: volume_client.volumes.get(volume_ref.id), lambda v_ref: v_ref.status in ['available', 'error'], sleep_time=2, time_out=2 * 60) v_ref = volume_client.volumes.get(volume_ref.id) if v_ref.status in ['error']: raise VolumeCreationFailure() LOG.debug(_("Created volume %s") % v_ref) # The mapping is in the format: # <id>:[<type>]:[<size(GB)>]:[<delete_on_terminate>] # setting the delete_on_terminate instance to true=1 mapping = "%s:%s:%s:%s" % (v_ref.id, '', v_ref.size, 1) bdm = config.Config.get('block_device_mapping', 'vdb') block_device = {bdm: mapping} volumes = [{'id': v_ref.id, 'size': v_ref.size}] LOG.debug("block_device = %s" % block_device) LOG.debug("volume = %s" % volumes) device_path = config.Config.get('device_path', '/dev/vdb') mount_point = config.Config.get('mount_point', '/var/lib/mysql') LOG.debug(_("device_path = %s") % device_path) LOG.debug(_("mount_point = %s") % mount_point) volume_info = {'block_device': block_device, 'device_path': device_path, 'mount_point': mount_point, 'volumes': volumes} return volume_info
def _create_volume(self, volume_size): LOG.info("Entering create_volume") LOG.debug(_("Starting to create the volume for the instance")) volume_support = config.Config.get("reddwarf_volume_support", 'False') LOG.debug(_("reddwarf volume support = %s") % volume_support) if (volume_size is None or utils.bool_from_string(volume_support) is False): volume_info = { 'block_device': None, 'device_path': None, 'mount_point': None, 'volumes': None, } return volume_info volume_client = create_nova_volume_client(self.context) volume_desc = ("mysql volume for %s" % self.id) volume_ref = volume_client.volumes.create( volume_size, display_name="mysql-%s" % self.id, display_description=volume_desc) # Record the volume ID in case something goes wrong. self.update_db(volume_id=volume_ref.id) utils.poll_until(lambda: volume_client.volumes.get(volume_ref.id), lambda v_ref: v_ref.status in ['available', 'error'], sleep_time=2, time_out=2 * 60) v_ref = volume_client.volumes.get(volume_ref.id) if v_ref.status in ['error']: raise VolumeCreationFailure() LOG.debug(_("Created volume %s") % v_ref) # The mapping is in the format: # <id>:[<type>]:[<size(GB)>]:[<delete_on_terminate>] # setting the delete_on_terminate instance to true=1 mapping = "%s:%s:%s:%s" % (v_ref.id, '', v_ref.size, 1) bdm = config.Config.get('block_device_mapping', 'vdb') block_device = {bdm: mapping} volumes = [{'id': v_ref.id, 'size': v_ref.size}] LOG.debug("block_device = %s" % block_device) LOG.debug("volume = %s" % volumes) device_path = config.Config.get('device_path', '/dev/vdb') mount_point = config.Config.get('mount_point', '/var/lib/mysql') LOG.debug(_("device_path = %s") % device_path) LOG.debug(_("mount_point = %s") % mount_point) volume_info = { 'block_device': block_device, 'device_path': device_path, 'mount_point': mount_point, 'volumes': volumes } return volume_info
def _assert_guest_is_ok(self): # The guest will never set the status to PAUSED. self.instance._set_service_status_to_paused() # Now we wait until it sets it to anything at all, # so we know it's alive. utils.poll_until( self._guest_is_awake, sleep_time=2, time_out=RESIZE_TIME_OUT)
def test_revert_nova_fails(self): self._stop_mysql() self._nova_resizes_successfully() self.instance._set_service_status_to_paused() self.instance.service_status = ServiceStatuses.PAUSED utils.poll_until(mox.IgnoreArg(), sleep_time=2, time_out=120)\ .AndRaise(PollTimeOut) self.instance.server.revert_resize() self._server_changes_to("ERROR", OLD_FLAVOR_ID)
def _server_changes_to(self, new_status, new_flavor_id): def change(): self.server.status = new_status self.instance.server.flavor['id'] = new_flavor_id if not self.poll_until_mocked: self.mock.StubOutWithMock(utils, "poll_until") self.poll_until_mocked = True utils.poll_until(mox.IgnoreArg(), sleep_time=2, time_out=120)\ .WithSideEffects(lambda ignore, sleep_time, time_out: change())
def test_mysql_is_not_okay(self): self._stop_mysql() self._nova_resizes_successfully() self.instance._set_service_status_to_paused() self.instance.service_status = ServiceStatuses.SHUTDOWN utils.poll_until(mox.IgnoreArg(), sleep_time=2, time_out=120) self._start_mysql() self.instance.server.revert_resize() self._server_changes_to("ACTIVE", OLD_FLAVOR_ID) self.guest.restart()
def test_guest_is_not_okay(self): self._stop_db() self._nova_resizes_successfully() self.instance._set_service_status_to_paused() self.instance.service_status = ServiceStatuses.PAUSED utils.poll_until(mox.IgnoreArg(), sleep_time=2, time_out=120)\ .AndRaise(PollTimeOut) self.instance.server.revert_resize() self._server_changes_to("ACTIVE", OLD_FLAVOR_ID) self.guest.restart()
def _create_volume(self, volume_size): LOG.info("Entering create_volume") LOG.debug(_("Starting to create the volume for the instance")) volume_support = config.Config.get("reddwarf_volume_support", "False") LOG.debug(_("reddwarf volume support = %s") % volume_support) if volume_size is None or utils.bool_from_string(volume_support) is False: volume_info = {"block_device": None, "device_path": None, "mount_point": None, "volumes": None} return volume_info volume_client = create_nova_volume_client(self.context) volume_desc = "mysql volume for %s" % self.id volume_ref = volume_client.volumes.create( volume_size, display_name="mysql-%s" % self.id, display_description=volume_desc ) # Record the volume ID in case something goes wrong. self.update_db(volume_id=volume_ref.id) utils.poll_until( lambda: volume_client.volumes.get(volume_ref.id), lambda v_ref: v_ref.status in ["available", "error"], sleep_time=2, time_out=2 * 60, ) v_ref = volume_client.volumes.get(volume_ref.id) if v_ref.status in ["error"]: raise VolumeCreationFailure() LOG.debug(_("Created volume %s") % v_ref) # The mapping is in the format: # <id>:[<type>]:[<size(GB)>]:[<delete_on_terminate>] # setting the delete_on_terminate instance to true=1 mapping = "%s:%s:%s:%s" % (v_ref.id, "", v_ref.size, 1) bdm = config.Config.get("block_device_mapping", "vdb") block_device = {bdm: mapping} volumes = [{"id": v_ref.id, "size": v_ref.size}] LOG.debug("block_device = %s" % block_device) LOG.debug("volume = %s" % volumes) device_path = config.Config.get("device_path", "/dev/vdb") mount_point = config.Config.get("mount_point", "/var/lib/mysql") LOG.debug(_("device_path = %s") % device_path) LOG.debug(_("mount_point = %s") % mount_point) volume_info = { "block_device": block_device, "device_path": device_path, "mount_point": mount_point, "volumes": volumes, } return volume_info
def _create_volume(self, volume_size): LOG.info("Entering create_volume") LOG.debug(_("Starting to create the volume for the instance")) volume_client = create_nova_volume_client(self.context) volume_desc = ("mysql volume for %s" % self.id) volume_ref = volume_client.volumes.create( volume_size, display_name="mysql-%s" % self.id, display_description=volume_desc) # Record the volume ID in case something goes wrong. self.update_db(volume_id=volume_ref.id) utils.poll_until( lambda: volume_client.volumes.get(volume_ref.id), lambda v_ref: v_ref.status in ['available', 'error'], sleep_time=2, time_out=VOLUME_TIME_OUT) v_ref = volume_client.volumes.get(volume_ref.id) if v_ref.status in ['error']: raise VolumeCreationFailure() LOG.debug(_("Created volume %s") % v_ref) # The mapping is in the format: # <id>:[<type>]:[<size(GB)>]:[<delete_on_terminate>] # setting the delete_on_terminate instance to true=1 mapping = "%s:%s:%s:%s" % (v_ref.id, '', v_ref.size, 1) bdm = CONF.block_device_mapping block_device = {bdm: mapping} volumes = [{'id': v_ref.id, 'size': v_ref.size}] LOG.debug("block_device = %s" % block_device) LOG.debug("volume = %s" % volumes) device_path = CONF.device_path mount_point = CONF.mount_point LOG.debug(_("device_path = %s") % device_path) LOG.debug(_("mount_point = %s") % mount_point) volume_info = {'block_device': block_device, 'device_path': device_path, 'mount_point': mount_point, 'volumes': volumes} return volume_info
def _create_volume(self, volume_size): LOG.info("Entering create_volume") LOG.debug(_("Starting to create the volume for the instance")) volume_client = create_nova_volume_client(self.context) volume_desc = ("mysql volume for %s" % self.id) volume_ref = volume_client.volumes.create( volume_size, display_name="mysql-%s" % self.id, display_description=volume_desc) # Record the volume ID in case something goes wrong. self.update_db(volume_id=volume_ref.id) utils.poll_until(lambda: volume_client.volumes.get(volume_ref.id), lambda v_ref: v_ref.status in ['available', 'error'], sleep_time=2, time_out=VOLUME_TIME_OUT) v_ref = volume_client.volumes.get(volume_ref.id) if v_ref.status in ['error']: raise VolumeCreationFailure() LOG.debug(_("Created volume %s") % v_ref) # The mapping is in the format: # <id>:[<type>]:[<size(GB)>]:[<delete_on_terminate>] # setting the delete_on_terminate instance to true=1 mapping = "%s:%s:%s:%s" % (v_ref.id, '', v_ref.size, 1) bdm = CONF.block_device_mapping block_device = {bdm: mapping} volumes = [{'id': v_ref.id, 'size': v_ref.size}] LOG.debug("block_device = %s" % block_device) LOG.debug("volume = %s" % volumes) device_path = CONF.device_path mount_point = CONF.mount_point LOG.debug(_("device_path = %s") % device_path) LOG.debug(_("mount_point = %s") % mount_point) volume_info = { 'block_device': block_device, 'device_path': device_path, 'mount_point': mount_point, 'volumes': volumes } return volume_info
def create_instance(self, flavor_id, flavor_ram, image_id, databases, users, service_type, volume_size, security_groups, backup_id): if use_nova_server_volume: server, volume_info = self._create_server_volume( flavor_id, image_id, security_groups, service_type, volume_size) else: server, volume_info = self._create_server_volume_individually( flavor_id, image_id, security_groups, service_type, volume_size) try: self._create_dns_entry() except Exception as e: msg = "Error creating DNS entry for instance: %s" % self.id err = inst_models.InstanceTasks.BUILDING_ERROR_DNS self._log_and_raise(e, msg, err) if server: self._guest_prepare(server, flavor_ram, volume_info, databases, users, backup_id) if not self.db_info.task_status.is_error: self.update_db(task_status=inst_models.InstanceTasks.NONE) # Make sure the service becomes active before sending a usage # record to avoid over billing a customer for an instance that # fails to build properly. try: utils.poll_until(self._service_is_active, sleep_time=USAGE_SLEEP_TIME, time_out=USAGE_TIMEOUT) self.send_usage_event('create', instance_size=flavor_ram) except PollTimeOut: LOG.error("Timeout for service changing to active. " "No usage create-event sent.") except Exception: LOG.exception("Error during create-event call.")
def attach(cls, credential, region, volume, server_id, device): """Assigns a floating ip to a server""" client = cls.get_client(credential, region) # Poll until the volume is attached. def volume_is_attached(): try: client.volumes.create_server_volume(server_id, volume['id'], device) return True except nova_exceptions.ClientException as e: LOG.debug(e) return False try: # Attempt to attach volume utils.poll_until(volume_is_attached, sleep_time=5, time_out=int(config.Config.get('volume_attach_time_out', 60))) except rd_exceptions.PollTimeOut as pto: LOG.error("Timeout trying to attach volume: %s" % volume['id']) raise rd_exceptions.VolumeAttachmentFailure(str(pto))
def _create_dns_entry(self): LOG.debug("%s: Creating dns entry for instance: %s" % (greenthread.getcurrent(), self.id)) dns_support = CONF.reddwarf_dns_support LOG.debug(_("reddwarf dns support = %s") % dns_support) if dns_support: nova_client = create_nova_client(self.context) dns_client = create_dns_client(self.context) def get_server(): c_id = self.db_info.compute_instance_id return nova_client.servers.get(c_id) def ip_is_available(server): LOG.info("Polling for ip addresses: $%s " % server.addresses) if server.addresses != {}: return True elif (server.addresses == {} and server.status != InstanceStatus.ERROR): return False elif (server.addresses == {} and server.status == InstanceStatus.ERROR): msg = _("Instance IP not available, instance (%s): " "server had status (%s).") LOG.error(msg % (self.id, server.status)) raise ReddwarfError(status=server.status) poll_until(get_server, ip_is_available, sleep_time=1, time_out=DNS_TIME_OUT) server = nova_client.servers.get(self.db_info.compute_instance_id) LOG.info("Creating dns entry...") dns_client.create_instance_entry(self.id, get_ip_address(server.addresses)) else: LOG.debug("%s: DNS not enabled for instance: %s" % (greenthread.getcurrent(), self.id))
def _delete_resources(self): server_id = self.db_info.compute_instance_id old_server = self.nova_client.servers.get(server_id) try: self.server.delete() except Exception as ex: LOG.error("Error during delete compute server %s " % self.server.id) LOG.error(ex) try: dns_support = CONF.reddwarf_dns_support LOG.debug(_("reddwarf dns support = %s") % dns_support) if dns_support: dns_api = create_dns_client(self.context) dns_api.delete_instance_entry(instance_id=self.db_info.id) except Exception as ex: LOG.error("Error during dns entry for instance %s " % self.db_info.id) LOG.error(ex) # Poll until the server is gone. def server_is_finished(): try: server = self.nova_client.servers.get(server_id) if server.status not in ['SHUTDOWN', 'ACTIVE']: msg = "Server %s got into ERROR status during delete " \ "of instance %s!" % (server.id, self.id) LOG.error(msg) return False except nova_exceptions.NotFound: return True poll_until(server_is_finished, sleep_time=2, time_out=CONF.server_delete_time_out) self.send_usage_event('delete', deleted_at=timeutils.isotime(), server=old_server)
def reboot(self): try: LOG.debug("Instance %s calling stop_mysql..." % self.id) self.guest.stop_mysql() LOG.debug("Rebooting instance %s" % self.id) self.server.reboot() # Poll nova until instance is active reboot_time_out = CONF.reboot_time_out def update_server_info(): self._refresh_compute_server_info() return self.server.status == 'ACTIVE' utils.poll_until( update_server_info, sleep_time=2, time_out=reboot_time_out) # Set the status to PAUSED. The guest agent will reset the status # when the reboot completes and MySQL is running. self._set_service_status_to_paused() LOG.debug("Successfully rebooted instance %s" % self.id) except Exception, e: LOG.error("Failed to reboot instance %s: %s" % (self.id, str(e)))
def reboot(self): try: LOG.debug("Instance %s calling stop_db..." % self.id) self.guest.stop_db() LOG.debug("Rebooting instance %s" % self.id) self.server.reboot() # Poll nova until instance is active reboot_time_out = CONF.reboot_time_out def update_server_info(): self._refresh_compute_server_info() return self.server.status == 'ACTIVE' utils.poll_until(update_server_info, sleep_time=2, time_out=reboot_time_out) # Set the status to PAUSED. The guest agent will reset the status # when the reboot completes and MySQL is running. self._set_service_status_to_paused() LOG.debug("Successfully rebooted instance %s" % self.id) except Exception, e: LOG.error("Failed to reboot instance %s: %s" % (self.id, str(e)))
def resize_volume(self, new_size): LOG.debug("%s: Resizing volume for instance: %s to %r GB" % (greenthread.getcurrent(), self.server.id, new_size)) self.volume_client.volumes.resize(self.volume_id, int(new_size)) try: utils.poll_until( lambda: self.volume_client.volumes.get(self.volume_id), lambda volume: volume.status == 'in-use', sleep_time=2, time_out=int(config.Config.get('volume_time_out'))) volume = self.volume_client.volumes.get(self.volume_id) self.update_db(volume_size=volume.size) self.nova_client.volumes.rescan_server_volume( self.server, self.volume_id) self.guest.resize_fs(self.get_volume_mountpoint()) except PollTimeOut as pto: LOG.error("Timeout trying to rescan or resize the attached volume " "filesystem for volume: %s" % self.volume_id) except Exception as e: LOG.error("Error encountered trying to rescan or resize the " "attached volume filesystem for volume: %s" % self.volume_id) finally: self.update_db(task_status=inst_models.InstanceTasks.NONE)
def resize_flavor(self, new_flavor_id, old_memory_size, new_memory_size): def resize_status_msg(): return "instance_id=%s, status=%s, flavor_id=%s, "\ "dest. flavor id=%s)" % (self.db_info.id, self.server.status, str(self.flavor['id']), str(new_flavor_id)) try: LOG.debug("Instance %s calling stop_mysql..." % self.db_info.id) self.guest.stop_mysql() try: LOG.debug("Instance %s calling Compute resize..." % self.db_info.id) self.server.resize(new_flavor_id) # Do initial check and confirm the status is appropriate. self._refresh_compute_server_info() if (self.server.status != "RESIZE" and self.server.status != "VERIFY_RESIZE"): msg = "Unexpected status after call to resize! : %s" raise ReddwarfError(msg % resize_status_msg()) # Wait for the flavor to change. def update_server_info(): self._refresh_compute_server_info() return self.server.status != 'RESIZE' utils.poll_until(update_server_info, sleep_time=2, time_out=60 * 2) # Do check to make sure the status and flavor id are correct. if (str(self.server.flavor['id']) != str(new_flavor_id) or self.server.status != "VERIFY_RESIZE"): msg = "Assertion failed! flavor_id=%s and not %s" actual_flavor = self.server.flavor['id'] expected_flavor = new_flavor_id raise ReddwarfError(msg % (actual_flavor, expected_flavor)) # Confirm the resize with Nova. LOG.debug("Instance %s calling Compute confirm resize..." % self.db_info.id) self.server.confirm_resize() # Record the new flavor_id in our database. LOG.debug("Updating instance %s to flavor_id %s." % (self.id, new_flavor_id)) self.update_db(flavor_id=new_flavor_id) except PollTimeOut as pto: LOG.error("Timeout trying to resize the flavor for instance " " %s" % self.db_info.id) except Exception as ex: new_memory_size = old_memory_size LOG.error("Error during resize compute! Aborting action.") LOG.error(ex) finally: # Tell the guest to restart MySQL with the new RAM size. # This is in the finally because we have to call this, or # else MySQL could stay turned off on an otherwise usable # instance. LOG.debug("Instance %s starting mysql..." % self.db_info.id) self.guest.start_mysql_with_conf_changes(new_memory_size) finally: self.update_db(task_status=inst_models.InstanceTasks.NONE)
@classmethod def assign(cls, credential, region, floating_ip, server_id): """Assigns a floating ip to a server""" client = cls.get_client(credential, region) def floating_ip_is_attached(): try: client.servers.add_floating_ip(server_id, floating_ip['ip']) return True except nova_exceptions.ClientException, e: # raise rd_exceptions.ReddwarfError(str(e)) LOG.error(e) return False try: utils.poll_until(floating_ip_is_attached, sleep_time=2, time_out=int(config.Config.get('floating_ip_attach_timeout', 20))) except rd_exceptions.PollTimeOut as pto: LOG.error("Timeout trying to assign floating ip %s to instance %s" %(floating_ip['ip'], server_id)) raise rd_exceptions.FloatingIpAttachmentFailure(str(pto)) class Volume(RemoteModelBase): _data_fields = ['id', 'attachments', 'size', 'status'] def __init__(self, volume=None, credential=None, region=None, id=None): if id is None and volume is None: msg = "id is not defined" raise rd_exceptions.InvalidModelError(msg) elif volume is None: try:
resp, content = req.request(API_URL + "instances/" + instance_id, "GET", "", AUTH_HEADER) self.assertEqual(200, resp.status, ("Expecting 200 as response status of show instance but received %s" % resp.status)) LOG.debug("Content: %s" % content) content = json.loads(content) status = content['instance']['status'] if status=='running': return True return False except Exception as e: LOG.debug(e) return False try: # Wait up to 15 minutes for instance to go running utils.poll_until(instance_is_running, sleep_time=10, time_out=int(960)) except rd_exceptions.PollTimeOut as pto: LOG.error("Timeout waiting for instance to switch to running") self.fail("Instance did not switch to running after App Server teardown and recreate") def _load_boot_params(self, tenant_id, flavor_id): # Attempt to find Boot parameters for a specific tenant try: service_image = models.ServiceImage.find_by(service_name="database", tenant_id=tenant_id, deleted=False) except rd_exceptions.ModelNotFoundError, e: LOG.info("Service Image for tenant %s not found, using image for 'default_tenant'" % tenant_id) service_image = models.ServiceImage.find_by(service_name="database", tenant_id='default_tenant', deleted=False) image_id = service_image['image_id']
def _resize_flavor(self, new_flavor_id=None, old_memory_size=None, new_memory_size=None): def resize_status_msg(): return "instance_id=%s, status=%s, flavor_id=%s, "\ "dest. flavor id=%s)" % (self.db_info.id, self.server.status, str(self.flavor['id']), str(new_flavor_id)) try: LOG.debug("Instance %s calling stop_mysql..." % self.db_info.id) self.guest.stop_mysql() try: LOG.debug("Instance %s calling Compute resize..." % self.db_info.id) if new_flavor_id: self.server.resize(new_flavor_id) else: LOG.debug("Migrating instance %s without flavor change ..." % self.db_info.id) self.server.migrate() # Do initial check and confirm the status is appropriate. self._refresh_compute_server_info() if (self.server.status != "RESIZE" and self.server.status != "VERIFY_RESIZE"): msg = "Unexpected status after call to resize! : %s" raise ReddwarfError(msg % resize_status_msg()) # Wait for the flavor to change. def update_server_info(): self._refresh_compute_server_info() return self.server.status != 'RESIZE' utils.poll_until( update_server_info, sleep_time=2, time_out=60 * 2) # Do check to make sure the status and flavor id are correct. if new_flavor_id: if str(self.server.flavor['id']) != str(new_flavor_id): msg = "Assertion failed! flavor_id=%s and not %s" \ % (self.server.flavor['id'], new_flavor_id) raise ReddwarfError(msg) if (self.server.status != "VERIFY_RESIZE"): msg = "Assertion failed! status=%s and not %s" \ % (self.server.status, 'VERIFY_RESIZE') raise ReddwarfError(msg) # Confirm the resize with Nova. LOG.debug("Instance %s calling Compute confirm resize..." % self.db_info.id) self.server.confirm_resize() if new_flavor_id: # Record the new flavor_id in our database. LOG.debug("Updating instance %s to flavor_id %s." % (self.id, new_flavor_id)) self.update_db(flavor_id=new_flavor_id) except Exception as ex: new_memory_size = old_memory_size new_flavor_id = None LOG.error("Error resizing instance %s." % self.db_info.id) LOG.error(ex) finally: # Tell the guest to restart MySQL with the new RAM size. # This is in the finally because we have to call this, or # else MySQL could stay turned off on an otherwise usable # instance. LOG.debug("Instance %s starting mysql..." % self.db_info.id) if new_flavor_id: self.guest.start_mysql_with_conf_changes(new_memory_size) else: self.guest.restart() finally: self.update_db(task_status=inst_models.InstanceTasks.NONE)