def _create_resources(): # parse the ID from the Ref instance_id = utils.get_id_from_href(instance) # verify that the instance exists and can perform actions from trove.instance.models import Instance instance_model = Instance.load(context, instance_id) instance_model.validate_can_perform_action() cls.verify_swift_auth_token(context) try: db_info = DBBackup.create(name=name, description=description, tenant_id=context.tenant, state=BackupState.NEW, instance_id=instance_id, deleted=False) except exception.InvalidModelError as ex: LOG.exception("Unable to create Backup record:") raise exception.BackupCreationError(str(ex)) backup_info = {'id': db_info.id, 'name': name, 'description': description, 'instance_id': instance_id, 'backup_type': db_info.backup_type, 'checksum': db_info.checksum, } api.API(context).create_backup(backup_info, instance_id) return db_info
def resize_flavor(self, new_flavor_id): self.validate_can_perform_action() LOG.debug("resizing instance %s flavor to %s" % (self.id, new_flavor_id)) # Validate that the flavor can be found and that it isn't the same size # as the current one. client = create_nova_client(self.context) try: new_flavor = client.flavors.get(new_flavor_id) except nova_exceptions.NotFound: raise exception.FlavorNotFound(uuid=new_flavor_id) old_flavor = client.flavors.get(self.flavor_id) new_flavor_size = new_flavor.ram old_flavor_size = old_flavor.ram if CONF.trove_volume_support: if new_flavor.ephemeral != 0: raise exception.LocalStorageNotSupported() if new_flavor_size == old_flavor_size: raise exception.CannotResizeToSameSize() elif CONF.device_path is not None: # ephemeral support enabled if new_flavor.ephemeral == 0: raise exception.LocalStorageNotSpecified(flavor=new_flavor_id) if (new_flavor_size == old_flavor_size and new_flavor.ephemeral == new_flavor.ephemeral): raise exception.CannotResizeToSameSize() # Set the task to RESIZING and begin the async call before returning. self.update_db(task_status=InstanceTasks.RESIZING) LOG.debug("Instance %s set to RESIZING." % self.id) task_api.API(self.context).resize_flavor(self.id, old_flavor, new_flavor)
def reboot(self): self.validate_can_perform_action() LOG.info(_LI("Rebooting instance %s."), self.id) if self.db_info.cluster_id is not None and not self.context.is_admin: raise exception.ClusterInstanceOperationNotSupported() self.update_db(task_status=InstanceTasks.REBOOTING) task_api.API(self.context).reboot(self.id)
def _delete_resources(): backup = cls.get_by_id(context, backup_id) if backup.is_running: msg = _("Backup %s cannot be deleted because it is running.") raise exception.UnprocessableEntity(msg % backup_id) cls.verify_swift_auth_token(context) api.API(context).delete_backup(backup_id)
def _create_resources(): db_info = DBInstance.create(name=name, flavor_id=flavor_id, tenant_id=context.tenant, volume_size=volume_size, service_type=service_type, task_status=InstanceTasks.BUILDING) LOG.debug(_("Tenant %(tenant)s created new " "Trove instance %(db)s...") % {'tenant': context.tenant, 'db': db_info.id}) service_status = InstanceServiceStatus.create( instance_id=db_info.id, status=rd_instance.ServiceStatuses.NEW) if CONF.trove_dns_support: dns_client = create_dns_client(context) hostname = dns_client.determine_hostname(db_info.id) db_info.hostname = hostname db_info.save() root_password = None if CONF.root_on_create and not backup_id: root_password = uuidutils.generate_uuid() task_api.API(context).create_instance(db_info.id, name, flavor, image_id, databases, users, service_type, volume_size, backup_id, availability_zone, root_password) return SimpleInstance(context, db_info, service_status, root_password)
def migrate(self, host=None): self.validate_can_perform_action() LOG.info(_("Migrating instance id = %(instance_id)s " "to host = %(host)s.") % {'instance_id': self.id, 'host': host}) self.update_db(task_status=InstanceTasks.MIGRATING) task_api.API(self.context).migrate(self.id, host)
def detach_replica(self): self.validate_can_perform_action() LOG.info(_LI("Detaching instance %s from its replication source."), self.id) if not self.slave_of_id: raise exception.BadRequest( _("Instance %s is not a replica.") % self.id) task_api.API(self.context).detach_replica(self.id)
def update_overrides(self, overrides): LOG.debug("Updating or removing overrides for instance %s" % self.id) need_restart = do_configs_require_restart( overrides, datastore_manager=self.ds_version.manager) LOG.debug("config overrides has non-dynamic settings, " "requires a restart: %s" % need_restart) if need_restart: self.update_db(task_status=InstanceTasks.RESTART_REQUIRED) task_api.API(self.context).update_overrides(self.id, overrides)
def update_overrides(self, config): LOG.debug("Updating or removing overrides for instance %s." % self.id) overrides = config.get_configuration_overrides() need_restart = config.does_configuration_need_restart() LOG.debug("Config overrides has non-dynamic settings, " "requires a restart: %s." % need_restart) if need_restart: self.update_db(task_status=InstanceTasks.RESTART_REQUIRED) task_api.API(self.context).update_overrides(self.id, overrides)
def _delete_resources(): if self.is_building: raise exception.UnprocessableEntity("Instance %s is not ready." % self.id) LOG.debug(_(" ... deleting compute id = %s") % self.db_info.compute_instance_id) LOG.debug(_(" ... setting status to DELETING.")) self.update_db(task_status=InstanceTasks.DELETING) task_api.API(self.context).delete_instance(self.id)
def _create_resources(): if cluster_config: cluster_id = cluster_config.get("id", None) shard_id = cluster_config.get("shard_id", None) instance_type = cluster_config.get("instance_type", None) else: cluster_id = shard_id = instance_type = None db_info = DBInstance.create(name=name, flavor_id=flavor_id, tenant_id=context.tenant, volume_size=volume_size, datastore_version_id= datastore_version.id, task_status=InstanceTasks.BUILDING, configuration_id=configuration_id, slave_of_id=slave_of_id, cluster_id=cluster_id, shard_id=shard_id, type=instance_type) LOG.debug("Tenant %(tenant)s created new Trove instance %(db)s." % {'tenant': context.tenant, 'db': db_info.id}) # if a configuration group is associated with an instance, # generate an overrides dict to pass into the instance creation # method config = Configuration(context, configuration_id) overrides = config.get_configuration_overrides() service_status = InstanceServiceStatus.create( instance_id=db_info.id, status=tr_instance.ServiceStatuses.NEW) if CONF.trove_dns_support: dns_client = create_dns_client(context) hostname = dns_client.determine_hostname(db_info.id) db_info.hostname = hostname db_info.save() root_password = None if cls.get_root_on_create( datastore_version.manager) and not backup_id: root_password = utils.generate_random_password() task_api.API(context).create_instance(db_info.id, name, flavor, image_id, databases, users, datastore_version.manager, datastore_version.packages, volume_size, backup_id, availability_zone, root_password, nics, overrides, slave_of_id, cluster_config) return SimpleInstance(context, db_info, service_status, root_password)
def restart(self): self.validate_can_perform_action() LOG.info("Restarting MySQL on instance %s..." % self.id) # Set our local status since Nova might not change it quick enough. #TODO(tim.simpson): Possible bad stuff can happen if this service # shuts down before it can set status to NONE. # We need a last updated time to mitigate this; # after some period of tolerance, we'll assume the # status is no longer in effect. self.update_db(task_status=InstanceTasks.REBOOTING) task_api.API(self.context).restart(self.id)
def tearDown(self): self.db_info.delete() self.backup.delete() self.datastore.delete() self.datastore_version.delete() models.create_nova_client = self.orig_client task_api.API(self.context).create_instance = self.orig_api models.run_with_quotas = self.run_with_quotas backup_models.DBBackup.check_swift_object_exist = self.context self.backup.delete() self.db_info.delete() super(CreateInstanceTest, self).tearDown()
def _create_resources(): # parse the ID from the Ref instance_id = utils.get_id_from_href(instance) # verify that the instance exists and can perform actions from trove.instance.models import Instance instance_model = Instance.load(context, instance_id) instance_model.validate_can_perform_action() cls.validate_can_perform_action(instance_model, 'backup_create') cls.verify_swift_auth_token(context) if instance_model.cluster_id is not None: raise exception.ClusterInstanceOperationNotSupported() ds = instance_model.datastore ds_version = instance_model.datastore_version parent = None if parent_id: # Look up the parent info or fail early if not found or if # the user does not have access to the parent. _parent = cls.get_by_id(context, parent_id) parent = { 'location': _parent.location, 'checksum': _parent.checksum, } try: db_info = DBBackup.create(name=name, description=description, tenant_id=context.tenant, state=BackupState.NEW, instance_id=instance_id, parent_id=parent_id, datastore_version_id=ds_version.id, deleted=False) except exception.InvalidModelError as ex: LOG.exception( _("Unable to create backup record for " "instance: %s"), instance_id) raise exception.BackupCreationError(str(ex)) backup_info = { 'id': db_info.id, 'name': name, 'description': description, 'instance_id': instance_id, 'backup_type': db_info.backup_type, 'checksum': db_info.checksum, 'parent': parent, 'datastore': ds.name, 'datastore_version': ds_version.name, } api.API(context).create_backup(backup_info, instance_id) return db_info
def unassign_configuration(self): LOG.debug(_("Unassigning the configuration from the instance %s") % self.id) LOG.debug(_("Unassigning the configuration id %s") % self.configuration.id) if self.configuration and self.configuration.id: flavor = self.get_flavor() config_id = self.configuration.id task_api.API(self.context).unassign_configuration(self.id, flavor, config_id) else: LOG.debug("no configuration found on instance skipping.")
def _resize_resources(): self.validate_can_perform_action() LOG.info(_("Resizing volume of instance %s.") % self.id) if self.db_info.cluster_id is not None: raise exception.ClusterInstanceOperationNotSupported() old_size = self.volume_size if int(new_size) <= old_size: raise exception.BadRequest(_("The new volume 'size' must be " "larger than the current volume " "size of '%s'.") % old_size) # Set the task to Resizing before sending off to the taskmanager self.update_db(task_status=InstanceTasks.RESIZING) task_api.API(self.context).resize_volume(new_size, self.id)
def restart(self): self.validate_can_perform_action() LOG.info(_LI("Restarting datastore on instance %s."), self.id) if self.db_info.cluster_id is not None and not self.context.is_admin: raise exception.ClusterInstanceOperationNotSupported() # Set our local status since Nova might not change it quick enough. #TODO(tim.simpson): Possible bad stuff can happen if this service # shuts down before it can set status to NONE. # We need a last updated time to mitigate this; # after some period of tolerance, we'll assume the # status is no longer in effect. self.update_db(task_status=InstanceTasks.REBOOTING) task_api.API(self.context).restart(self.id)
def _resize_resources(): self.validate_can_perform_action() LOG.info("Resizing volume of instance %s..." % self.id) if not self.volume_size: raise exception.BadRequest(_("Instance %s has no volume.") % self.id) old_size = self.volume_size if int(new_size) <= old_size: raise exception.BadRequest(_("The new volume 'size' must be " "larger than the current volume " "size of '%s'") % old_size) # Set the task to Resizing before sending off to the taskmanager self.update_db(task_status=InstanceTasks.RESIZING) task_api.API(self.context).resize_volume(new_size, self.id)
def delete(self): self.validate_cluster_available( [ClusterTasks.NONE, ClusterTasks.DELETING]) db_insts = inst_models.DBInstance.find_all(cluster_id=self.id, deleted=False).all() self.update_db(task_status=ClusterTasks.DELETING) for db_inst in db_insts: instance = inst_models.load_any_instance(self.context, db_inst.id) instance.delete() task_api.API(self.context).delete_cluster(self.id)
def save(context, configuration, configuration_items, instances): DBConfiguration.save(configuration) for item in configuration_items: item["deleted_at"] = None ConfigurationParameter.save(item) items = Configuration.load_items(context, configuration.id) for instance in instances: LOG.debug("applying to instance: %s" % instance.id) overrides = {} for i in items: overrides[i.configuration_key] = i.configuration_value task_api.API(context).update_overrides(instance.id, overrides)
def _delete_resources(): if self.is_building: raise exception.UnprocessableEntity("Instance %s is not ready." % self.id) LOG.debug("Deleting instance with compute id = %s." % self.db_info.compute_instance_id) from trove.cluster.models import is_cluster_deleting if (self.db_info.cluster_id is not None and not is_cluster_deleting(self.context, self.db_info.cluster_id)): raise exception.ClusterInstanceOperationNotSupported() self.update_db(task_status=InstanceTasks.DELETING, configuration_id=None) task_api.API(self.context).delete_instance(self.id)
def unassign_configuration(self): LOG.debug("Unassigning the configuration from the instance %s.", self.id) if self.configuration and self.configuration.id: LOG.debug("Unassigning the configuration id %s.", self.configuration.id) flavor = self.get_flavor() config_id = self.configuration.id LOG.debug("Configuration being unassigned; " "Marking restart required.") self.update_db(task_status=InstanceTasks.RESTART_REQUIRED) task_api.API(self.context).unassign_configuration( self.id, flavor, config_id) else: LOG.debug("No configuration found on instance. Skipping.")
def _create_resources(): security_groups = None if backup_id is not None: backup_info = Backup.get_by_id(context, backup_id) if backup_info.is_running: raise exception.BackupNotCompleteError(backup_id=backup_id) location = backup_info.location LOG.info(_("Checking if backup exist in '%s'") % location) if not Backup.check_object_exist(context, location): raise exception.BackupFileNotFound(location=location) db_info = DBInstance.create(name=name, flavor_id=flavor_id, tenant_id=context.tenant, volume_size=volume_size, service_type=service_type, task_status=InstanceTasks.BUILDING) LOG.debug( _("Tenant %s created new Trove instance %s...") % (context.tenant, db_info.id)) service_status = InstanceServiceStatus.create( instance_id=db_info.id, status=ServiceStatuses.NEW) if CONF.trove_dns_support: dns_client = create_dns_client(context) hostname = dns_client.determine_hostname(db_info.id) db_info.hostname = hostname db_info.save() if CONF.trove_security_groups_support: security_group = SecurityGroup.create_for_instance( db_info.id, context) security_groups = [security_group["name"]] task_api.API(context).create_instance(db_info.id, name, flavor, image_id, databases, users, service_type, volume_size, security_groups, backup_id) return SimpleInstance(context, db_info, service_status)
def delete(self): if self.db_info.task_status not in (ClusterTasks.NONE, ClusterTasks.DELETING): current_task = self.db_info.task_status.name msg = _("This action cannot be performed on the cluster while " "the current cluster task is '%s'.") % current_task LOG.error(msg) raise exception.UnprocessableEntity(msg) db_insts = inst_models.DBInstance.find_all(cluster_id=self.id, deleted=False).all() self.update_db(task_status=ClusterTasks.DELETING) for db_inst in db_insts: instance = inst_models.load_any_instance(self.context, db_inst.id) instance.delete() task_api.API(self.context).delete_cluster(self.id)
def delete(self): self.validate_cluster_available([ClusterTasks.NONE, ClusterTasks.DELETING]) db_insts = inst_models.DBInstance.find_all(cluster_id=self.id, deleted=False).all() self.update_db(task_status=ClusterTasks.DELETING) # we force the server-group delete here since we need to load the # group while the instances still exist. Also, since the instances # take a while to be removed they might not all be gone even if we # do it after the delete. srv_grp.ServerGroup.delete(self.context, self.server_group, force=True) for db_inst in db_insts: instance = inst_models.load_any_instance(self.context, db_inst.id) instance.delete() task_api.API(self.context).delete_cluster(self.id)
def _create_resources(): try: db_info = DBBackup.create(name=name, description=description, tenant_id=context.project_id, state=backup_state, instance_id=instance_id, parent_id=parent_id or last_backup_id, datastore_version_id=ds_version.id, deleted=False, location=location, checksum=checksum, backup_type=backup_type, size=size) except exception.InvalidModelError as ex: LOG.exception( "Unable to create backup record for " "instance: %s", instance_id) raise exception.BackupCreationError(str(ex)) if not restore_from: backup_info = { 'id': db_info.id, 'name': name, 'description': description, 'instance_id': instance_id, 'backup_type': db_info.backup_type, 'checksum': db_info.checksum, 'parent': parent, 'datastore': ds.name, 'datastore_version': ds_version.name, 'swift_container': swift_container } api.API(context).create_backup(backup_info, instance_id) else: context.notification.payload.update({'backup_id': db_info.id}) return db_info
def setUp(self): util.init_db() self.context = Mock() self.name = "name" self.flavor_id = 5 self.image_id = "UUID" self.databases = [] self.users = [] self.datastore = datastore_models.DBDatastore.create( id=str(uuid.uuid4()), name='mysql', ) self.datastore_version = (datastore_models.DBDatastoreVersion.create( id=str(uuid.uuid4()), datastore_id=self.datastore.id, name="5.5", manager="mysql", image_id="image_id", packages="", active=True)) self.volume_size = 1 self.az = "az" self.nics = None self.configuration = None self.tenant_id = "UUID" self.datastore_version_id = str(uuid.uuid4()) self.db_info = DBInstance.create( name=self.name, flavor_id=self.flavor_id, tenant_id=self.tenant_id, volume_size=self.volume_size, datastore_version_id=self.datastore_version.id, task_status=InstanceTasks.BUILDING, configuration_id=self.configuration) self.backup_name = "name" self.descr = None self.backup_state = backup_models.BackupState.COMPLETED self.instance_id = self.db_info.id self.parent_id = None self.deleted = False self.backup = backup_models.DBBackup.create( name=self.backup_name, description=self.descr, tenant_id=self.tenant_id, state=self.backup_state, instance_id=self.instance_id, parent_id=self.parent_id, datastore_version_id=self.datastore_version.id, deleted=False) self.backup.size = 1.1 self.backup.save() self.backup_id = self.backup.id self.orig_client = models.create_nova_client models.create_nova_client = nova.fake_create_nova_client self.orig_api = task_api.API(self.context).create_instance task_api.API(self.context).create_instance = Mock() self.run_with_quotas = models.run_with_quotas models.run_with_quotas = Mock() self.check = backup_models.DBBackup.check_swift_object_exist backup_models.DBBackup.check_swift_object_exist = Mock( return_value=True) super(CreateInstanceTest, self).setUp()
def setUp(self, *args): super(ApiTest, self).setUp() self.context = context.TroveContext() self.api = task_api.API(self.context) self._mock_rpc_client()
def reapply(context, id, md5, include_clustered, batch_size, batch_delay, force): task_api.API(context).reapply_module( id, md5, include_clustered, batch_size, batch_delay, force)
def migrate(self, host=None): self.validate_can_perform_action() LOG.info("Migrating instance id = %s, to host = %s" % (self.id, host)) self.update_db(task_status=InstanceTasks.MIGRATING) task_api.API(self.context).migrate(self.id, host)