def delete(self, req, tenant_id, id): LOG.info(_('Deleting backup for tenant %(tenant_id)s ' 'ID: %(backup_id)s') % {'tenant_id': tenant_id, 'backup_id': id}) context = req.environ[wsgi.CONTEXT_KEY] Backup.delete(context, id) return wsgi.Result(None, 202)
def _action_reset_task_status(self, context, instance, body): LOG.debug("Setting Task-Status to NONE on instance %s." % instance.id) instance.reset_task_status() LOG.debug("Failing backups for instance %s." % instance.id) Backup.fail_for_instance(instance.id) return wsgi.Result(None, 202)
def _create_replication_slave(self, context, instance_id, name, flavor, image_id, databases, users, datastore_manager, packages, volume_size, availability_zone, root_password, nics, overrides, slave_of_id, backup_id, volume_type, modules): if type(instance_id) in [list]: ids = instance_id root_passwords = root_password else: ids = [instance_id] root_passwords = [root_password] replica_number = 0 replica_backup_id = backup_id replica_backup_created = False replicas = [] master_instance_tasks = BuiltInstanceTasks.load(context, slave_of_id) server_group = master_instance_tasks.server_group scheduler_hints = srv_grp.ServerGroup.convert_to_hint(server_group) LOG.debug("Using scheduler hints for locality: %s" % scheduler_hints) try: for replica_index in range(0, len(ids)): try: replica_number += 1 LOG.debug("Creating replica %d of %d." % (replica_number, len(ids))) instance_tasks = FreshInstanceTasks.load( context, ids[replica_index]) snapshot = instance_tasks.get_replication_master_snapshot( context, slave_of_id, flavor, replica_backup_id, replica_number=replica_number) replica_backup_id = snapshot['dataset']['snapshot_id'] replica_backup_created = (replica_backup_id is not None) instance_tasks.create_instance( flavor, image_id, databases, users, datastore_manager, packages, volume_size, replica_backup_id, availability_zone, root_passwords[replica_index], nics, overrides, None, snapshot, volume_type, modules, scheduler_hints) replicas.append(instance_tasks) except Exception: # if it's the first replica, then we shouldn't continue LOG.exception(_( "Could not create replica %(num)d of %(count)d.") % {'num': replica_number, 'count': len(ids)}) if replica_number == 1: raise for replica in replicas: replica.wait_for_instance(CONF.restore_usage_timeout, flavor) finally: if replica_backup_created: Backup.delete(context, replica_backup_id)
def delete(self, req, tenant_id, id): LOG.info(_('Deleting backup for tenant %(tenant_id)s ' 'ID: %(backup_id)s') % {'tenant_id': tenant_id, 'backup_id': id}) context = req.environ[wsgi.CONTEXT_KEY] context.notification = notification.DBaaSBackupDelete(context, request=req) with StartNotification(context, backup_id=id): Backup.delete(context, id) return wsgi.Result(None, 202)
def _create_resources(): security_groups = None if backup_id is not None: backup_info = Backup.get_by_id(context, backup_id) if backup_info.is_running: raise exception.BackupNotCompleteError(backup_id=backup_id) location = backup_info.location LOG.info(_("Checking if backup exist in '%s'") % location) if not Backup.check_object_exist(context, location): raise exception.BackupFileNotFound(location=location) db_info = DBInstance.create(name=name, flavor_id=flavor_id, tenant_id=context.tenant, volume_size=volume_size, service_type=service_type, task_status=InstanceTasks.BUILDING) LOG.debug(_("Tenant %(tenant)s created new " "Trove instance %(db)s...") % {'tenant': context.tenant, 'db': db_info.id}) service_status = InstanceServiceStatus.create( instance_id=db_info.id, status=rd_instance.ServiceStatuses.NEW) if CONF.trove_dns_support: dns_client = create_dns_client(context) hostname = dns_client.determine_hostname(db_info.id) db_info.hostname = hostname db_info.save() if CONF.trove_security_groups_support: security_group = SecurityGroup.create_for_instance( db_info.id, context) if CONF.trove_security_groups_rules_support: SecurityGroupRule.create_sec_group_rule( security_group, CONF.trove_security_group_rule_protocol, CONF.trove_security_group_rule_port, CONF.trove_security_group_rule_port, CONF.trove_security_group_rule_cidr, context ) security_groups = [security_group["name"]] task_api.API(context).create_instance(db_info.id, name, flavor, image_id, databases, users, service_type, volume_size, security_groups, backup_id, availability_zone) return SimpleInstance(context, db_info, service_status)
def _create_replication_slave(self, context, instance_id, name, flavor, image_id, databases, users, datastore_manager, packages, volume_size, availability_zone, root_password, nics, overrides, slave_of_id, backup_id): if type(instance_id) in [list]: ids = instance_id root_passwords = root_password else: ids = [instance_id] root_passwords = [root_password] replica_number = 0 replica_backup_id = backup_id replica_backup_created = False replicas = [] try: for replica_index in range(0, len(ids)): try: replica_number += 1 LOG.debug("Creating replica %d of %d." % (replica_number, len(ids))) instance_tasks = FreshInstanceTasks.load( context, ids[replica_index]) snapshot = instance_tasks.get_replication_master_snapshot( context, slave_of_id, flavor, replica_backup_id, replica_number=replica_number) replica_backup_id = snapshot['dataset']['snapshot_id'] replica_backup_created = True instance_tasks.create_instance( flavor, image_id, databases, users, datastore_manager, packages, volume_size, replica_backup_id, availability_zone, root_passwords[replica_index], nics, overrides, None, snapshot) replicas.append(instance_tasks) except Exception: # if it's the first replica, then we shouldn't continue LOG.exception(_( "Could not create replica %(num)d of %(count)d.") % {'num': replica_number, 'count': len(instance_id)}) if replica_number == 1: raise for replica in replicas: replica.wait_for_instance(CONF.restore_usage_timeout, flavor) #rds-start replica.create_monitor() #rds-end finally: if replica_backup_created: Backup.delete(context, replica_backup_id)
def find(self, req, tenant_id, id): LOG.info(_('Listing all children and grandchildren for tenant %(tenant_id)s' 'ID: %(backup_id)s') % {'tenant_id':tenant_id, 'backup_id': id}) context = req.environ[wsgi.CONTEXT_KEY] child_list = [] Backup.find_children(context, id, child_list) del child_list[-1] view = views.BackupViews(child_list) paged = pagination.SimplePaginatedDataView(req.url, 'backups', view, None) return wsgi.Result(paged.data(), 200)
def delete(self, req, tenant_id, id): LOG.info(_('Deleting backup for tenant %(tenant_id)s ' 'ID: %(backup_id)s') % {'tenant_id': tenant_id, 'backup_id': id}) context = req.environ[wsgi.CONTEXT_KEY] backup = Backup.get_by_id(context, id) policy.authorize_on_target(context, 'backup:delete', {'tenant': backup.tenant_id}) context.notification = notification.DBaaSBackupDelete(context, request=req) with StartNotification(context, backup_id=id): Backup.delete(context, id) return wsgi.Result(None, 202)
def _action_reset_status(self, context, req, instance, body): if 'force_delete' in body['reset_status']: self.authorize_instance_action(context, 'force_delete', instance) else: self.authorize_instance_action( context, 'reset_status', instance) context.notification = notification.DBaaSInstanceResetStatus( context, request=req) with StartNotification(context, instance_id=instance.id): instance.reset_status() LOG.debug("Failing backups for instance %s." % instance.id) backup_model.fail_for_instance(instance.id) return wsgi.Result(None, 202)
def validate_can_perform_action(self): """ Raises exception if an instance action cannot currently be performed. """ # cases where action cannot be performed if self.db_info.server_status != "ACTIVE": status = self.db_info.server_status elif ( self.db_info.task_status != InstanceTasks.NONE and self.db_info.task_status != InstanceTasks.RESTART_REQUIRED ): status = self.db_info.task_status elif not self.datastore_status.status.action_is_allowed: status = self.status elif Backup.running(self.id): status = InstanceStatus.BACKUP else: # action can be performed return msg = _( "Instance %(instance_id)s is not currently available for an " "action to be performed (status was %(action_status)s)." ) % {"instance_id": self.id, "action_status": status} LOG.error(msg) raise exception.UnprocessableEntity(msg)
def show(self, req, tenant_id, id): """Return a single backup.""" LOG.debug("Showing a backup for tenant %s ID: '%s'" % (tenant_id, id)) context = req.environ[wsgi.CONTEXT_KEY] backup = Backup.get_by_id(context, id) return wsgi.Result(views.BackupView(backup).data(), 200)
def index(self, req, tenant_id): """ Return all backups information for a tenant ID. """ LOG.debug("Listing Backups for tenant '%s'" % tenant_id) context = req.environ[wsgi.CONTEXT_KEY] ## add customise conditions when list instance search_ops = {} search_ops.update(req.GET) search_ops = self._validate_search_ops(search_ops) op_chain = search_ops.get('chain', None) if op_chain: backups = KSC_Backup.get_chain_whole(context, op_chain) else: backups = Backup.list(context, conditions=search_ops) LOG.debug("index.backups: '%s'", backups) bks = [] for backup in backups: try: #service = inst_models.ServiceImage.find_by(id=backup.service_image_id) #backup.db_type = service['service_name'] ds,ds_version = ds_patch_models.find_datastore_by_image_id(backup['service_image_id']) backup.db_type = ds.name except Exception as ex: backup['db_type'] = "" LOG.warn("Failed get db type information of backup %s, %s"%(backup['id'], ex)) bks.append(backup) backups = bks return wsgi.Result(views.BackupViews(backups).data(), 200)
def backups(self, req, tenant_id, id): """Return all backups for the specified instance.""" LOG.info(_("req : '%s'\n\n") % req) LOG.info(_("Indexing backups for instance '%s'") % id) backups = backup_model.list_for_instance(id) return wsgi.Result(backup_views.BackupViews(backups).data(), 200)
def show(self, req, tenant_id, id): """Return a single backup.""" LOG.info(_("Showing a backup for tenant '%s'") % tenant_id) LOG.info(_("id : '%s'\n\n") % id) context = req.environ[wsgi.CONTEXT_KEY] backup = Backup.get_by_id(context, id) return wsgi.Result(views.BackupView(backup).data(), 200)
def index(self, req, tenant_id): """ Return all backups information for a tenant ID. """ LOG.debug("Listing Backups for tenant '%s'" % tenant_id) context = req.environ[wsgi.CONTEXT_KEY] backups = Backup.list(context) return wsgi.Result(views.BackupViews(backups).data(), 200)
def create_backup(self, backup_id): from trove.backup.models import Backup, BackupState backup = Backup.get_by_id(backup_id) def finish_create_backup(): backup.state = BackupState.COMPLETED backup.save() self.event_spawn(1.0, finish_create_backup)
def status(self): # Check for taskmanager errors. if self.db_info.task_status.is_error: return InstanceStatus.ERROR # Check for taskmanager status. action = self.db_info.task_status.action if "BUILDING" == action: if "ERROR" == self.db_info.server_status: return InstanceStatus.ERROR return InstanceStatus.BUILD if "REBOOTING" == action: return InstanceStatus.REBOOT if "RESIZING" == action: return InstanceStatus.RESIZE if "RESTART_REQUIRED" == action: return InstanceStatus.RESTART_REQUIRED if InstanceTasks.PROMOTING.action == action: return InstanceStatus.PROMOTE if InstanceTasks.EJECTING.action == action: return InstanceStatus.EJECT if InstanceTasks.LOGGING.action == action: return InstanceStatus.LOGGING # Check for server status. if self.db_info.server_status in ["BUILD", "ERROR", "REBOOT", "RESIZE"]: return self.db_info.server_status # As far as Trove is concerned, Nova instances in VERIFY_RESIZE should # still appear as though they are in RESIZE. if self.db_info.server_status in ["VERIFY_RESIZE"]: return InstanceStatus.RESIZE # Check if there is a backup running for this instance if Backup.running(self.id): return InstanceStatus.BACKUP # Report as Shutdown while deleting, unless there's an error. if "DELETING" == action: if self.db_info.server_status in ["ACTIVE", "SHUTDOWN", "DELETED"]: return InstanceStatus.SHUTDOWN else: LOG.error( _LE("While shutting down instance (%(instance)s): " "server had status (%(status)s)."), {"instance": self.id, "status": self.db_info.server_status}, ) return InstanceStatus.ERROR # Check against the service status. # The service is only paused during a reboot. if tr_instance.ServiceStatuses.PAUSED == self.datastore_status.status: return InstanceStatus.REBOOT # If the service status is NEW, then we are building. if tr_instance.ServiceStatuses.NEW == self.datastore_status.status: return InstanceStatus.BUILD # For everything else we can look at the service status mapping. return self.datastore_status.status.api_status
def _relocate_master(self, master_id, slave_id,backup_id=None): ''' Fore. 2014/9/29 desperated method. don't use it. :param master_id: :param slave_id: :param backup_id: ''' master_group_item = InstanceGroupItem.get_by_instance_id(self.context, master_id) def __show_master_status(inst_id): _instance = self.load(self.context, inst_id) _guest = _instance.get_guest() mStatus = _guest.ksc_show_master_status() log_path, log_pos = mStatus['file'], mStatus['position'] return log_path, log_pos if backup_id is None: log_path, log_pos = __show_master_status(master_id) else: bk_info = Backup.get_by_id(self.context, backup_id) if master_group_item.group_id == bk_info.group_id: log_path, log_pos = Backup.get_binlog_info(self.context, backup_id) else: log_path, log_pos = __show_master_status(master_id) LOG.debug("relocate master instance %s get binlog_path:%s binlog_pos:%s" \ %(master_id, log_path, log_pos)) group_item = master_group_item master_host = self._get_instance_ip(master_id) master_guest = FreshInstanceTasks.load(self.context, master_id).guest master_port = int(master_guest.ksc_list_variables(["port"])["port"]) repl_user = CONF.rds_rpl_user repl_password = self._gen_rpl_user_password(group_item.group_id) master_log_file = log_path master_log_pos = log_pos slave_instance = self.load(self.context, slave_id) slave_guest = slave_instance.get_guest() slave_guest.ksc_relocate_master(master_host=master_host, master_port = master_port, repl_user=repl_user, repl_password=repl_password, master_log_file=master_log_file, master_log_pos=master_log_pos)
def show(self, req, tenant_id, id): """Return a single backup.""" LOG.debug("Showing a backup for tenant %s ID: '%s'" % (tenant_id, id)) context = req.environ[wsgi.CONTEXT_KEY] backup = Backup.get_by_id(context, id) instance = get_db_info(context, backup.instance_id, None, True) backup.instance_name = instance.name return wsgi.Result(views.BackupView(backup).data(), 200)
def backups(self, req, tenant_id, id): """Return all backups for the specified instance.""" LOG.info(_("req : '%s'\n\n") % req) LOG.info(_("Indexing backups for instance '%s'") % id) context = req.environ[wsgi.CONTEXT_KEY] backups, marker = backup_model.list_for_instance(context, id) view = backup_views.BackupViews(backups) paged = pagination.SimplePaginatedDataView(req.url, "backups", view, marker) return wsgi.Result(paged.data(), 200)
def create(self, req, body, tenant_id): LOG.debug("Creating a Backup for tenant '%s'" % tenant_id) context = req.environ[wsgi.CONTEXT_KEY] data = body["backup"] instance = data["instance"] name = data["name"] desc = data.get("description") backup = Backup.create(context, instance, name, desc) return wsgi.Result(views.BackupView(backup).data(), 202)
def show(self, req, tenant_id, id): """Return a single backup.""" LOG.debug("Showing a backup for tenant %s ID: '%s'" % (tenant_id, id)) context = req.environ[wsgi.CONTEXT_KEY] backup = Backup.get_by_id(context, id) policy.authorize_on_target(context, 'backup:show', {'tenant': backup.tenant_id}) return wsgi.Result(views.BackupView(backup).data(), 200)
def create_backup(self, backup_id): from trove.backup.models import Backup, BackupState backup = Backup.get_by_id(context=None, backup_id=backup_id) def finish_create_backup(): backup.state = BackupState.COMPLETED backup.location = 'http://localhost/path/to/backup' backup.save() eventlet.spawn_after(1.0, finish_create_backup)
def create(self, req, body, tenant_id): LOG.debug("Creating a Backup for tenant '%s'" % tenant_id) context = req.environ[wsgi.CONTEXT_KEY] data = body['backup'] instance = data['instance'] name = data['name'] desc = data.get('description') parent = data.get('parent_id') backup = Backup.create(context, instance, name, desc, parent_id=parent) return wsgi.Result(views.BackupView(backup).data(), 202)
def index(self, req, tenant_id): """ Return all backups information for a tenant ID. """ LOG.debug("Listing Backups for tenant '%s'" % tenant_id) context = req.environ[wsgi.CONTEXT_KEY] backups, marker = Backup.list(context) view = views.BackupViews(backups) paged = pagination.SimplePaginatedDataView(req.url, 'backups', view, marker) return wsgi.Result(paged.data(), 200)
def delete_backup(self): self.inst_id = utils.get_instance_id_bytenant(DBInstanceType.SINGLE) _list = Backup.list_for_instance(self.inst_id) _backup = None for _item in _list: _backup = _item break if _backup is None: raise Exception("not found backup") utils.delete_backup_byclient(_backup.id) utils.check_backup_status(_backup.id, 'DELETED', deleted=True)
def _create_replication_slave(self, context, instance_id, name, flavor, image_id, databases, users, datastore_manager, packages, volume_size, availability_zone, root_password, nics, overrides, slave_of_id): instance_tasks = FreshInstanceTasks.load(context, instance_id) snapshot = instance_tasks.get_replication_master_snapshot(context, slave_of_id) try: instance_tasks.create_instance(flavor, image_id, databases, users, datastore_manager, packages, volume_size, snapshot['dataset']['snapshot_id'], availability_zone, root_password, nics, overrides, None) finally: Backup.delete(context, snapshot['dataset']['snapshot_id']) instance_tasks.attach_replication_slave(snapshot, flavor)
def index(self, req, tenant_id): """ Return all backups information for a tenant ID. """ LOG.debug("Listing backups for tenant %s", tenant_id) datastore = req.GET.get('datastore') context = req.environ[wsgi.CONTEXT_KEY] policy.authorize_on_tenant(context, 'backup:index') backups, marker = Backup.list(context, datastore) view = views.BackupViews(backups) paged = pagination.SimplePaginatedDataView(req.url, 'backups', view, marker) return wsgi.Result(paged.data(), 200)
def create_backup(self, backup_info): from trove.backup.models import Backup from trove.backup.state import BackupState backup = Backup.get_by_id(context=None, backup_id=backup_info['id']) def finish_create_backup(): backup.state = BackupState.COMPLETED backup.location = 'http://localhost/path/to/backup' backup.checksum = 'fake-md5-sum' backup.size = BACKUP_SIZE backup.save() eventlet.spawn_after(10, finish_create_backup)
def index(self, req, tenant_id): """ Return all backups information for a tenant ID. """ LOG.debug("Listing backups for tenant %s" % tenant_id) datastore = req.GET.get('datastore') context = req.environ[wsgi.CONTEXT_KEY] policy.authorize_on_tenant(context, 'backup:index') backups, marker = Backup.list(context, datastore) view = views.BackupViews(backups) paged = pagination.SimplePaginatedDataView(req.url, 'backups', view, marker) return wsgi.Result(paged.data(), 200)
def create_backup(self, backup_info): from trove.backup.models import Backup from trove.backup.state import BackupState backup = Backup.get_by_id(context=None, backup_id=backup_info['id']) def finish_create_backup(): backup.state = BackupState.COMPLETED backup.location = 'http://localhost/path/to/backup' backup.checksum = 'fake-md5-sum' backup.size = BACKUP_SIZE backup.save() eventlet.spawn_after(7.5, finish_create_backup)
def status(self): ### Check for taskmanager errors. if self.db_info.task_status.is_error: return InstanceStatus.ERROR ### Check for taskmanager status. ACTION = self.db_info.task_status.action if 'BUILDING' == ACTION: if 'ERROR' == self.db_info.server_status: return InstanceStatus.ERROR return InstanceStatus.BUILD if 'REBOOTING' == ACTION: return InstanceStatus.REBOOT if 'RESIZING' == ACTION: return InstanceStatus.RESIZE ### Check for server status. if self.db_info.server_status in ["BUILD", "ERROR", "REBOOT", "RESIZE"]: return self.db_info.server_status # As far as Trove is concerned, Nova instances in VERIFY_RESIZE should # still appear as though they are in RESIZE. if self.db_info.server_status in ["VERIFY_RESIZE"]: return InstanceStatus.RESIZE ### Check if there is a backup running for this instance if Backup.running(self.id): return InstanceStatus.BACKUP ### Report as Shutdown while deleting, unless there's an error. if 'DELETING' == ACTION: if self.db_info.server_status in ["ACTIVE", "SHUTDOWN", "DELETED"]: return InstanceStatus.SHUTDOWN else: LOG.error(_("While shutting down instance (%(instance)s): " "server had status (%(status)s).") % {'instance': self.id, 'status': self.db_info.server_status}) return InstanceStatus.ERROR ### Check against the service status. # The service is only paused during a reboot. if rd_instance.ServiceStatuses.PAUSED == self.service_status.status: return InstanceStatus.REBOOT # If the service status is NEW, then we are building. if rd_instance.ServiceStatuses.NEW == self.service_status.status: return InstanceStatus.BUILD # For everything else we can look at the service status mapping. return self.service_status.status.api_status
def backups(self, req, tenant_id, id): """Return all backups for the specified instance.""" LOG.info("Listing backups for instance '%s'", id) LOG.debug("req : '%s'\n\n", req) context = req.environ[wsgi.CONTEXT_KEY] instance = models.Instance.load(context, id) self.authorize_instance_action(context, 'backups', instance) backups, marker = backup_model.list_for_instance(context, id) view = backup_views.BackupViews(backups) paged = pagination.SimplePaginatedDataView(req.url, 'backups', view, marker) return wsgi.Result(paged.data(), 200)
def guest_log_action(self, req, body, tenant_id, id): """Processes a guest log.""" LOG.info("Processing log for tenant %s", tenant_id) context = req.environ[wsgi.CONTEXT_KEY] try: backup_model.verify_swift_auth_token(context) except exception.SwiftNotFound: raise exception.LogsNotAvailable() instance = models.Instance.load(context, id) if not instance: raise exception.NotFound(uuid=id) log_name = body['name'] enable = body.get('enable', None) disable = body.get('disable', None) publish = body.get('publish', None) discard = body.get('discard', None) if enable and disable: raise exception.BadRequest(_("Cannot enable and disable log.")) client = create_guest_client(context, id) guest_log = client.guest_log_action(log_name, enable, disable, publish, discard) return wsgi.Result({'log': guest_log}, 200)
def create(self, req, body, tenant_id): LOG.info(_("Creating a backup for tenant %s"), tenant_id) context = req.environ[wsgi.CONTEXT_KEY] data = body['backup'] instance = data['instance'] name = data['name'] desc = data.get('description') parent = data.get('parent_id') context.notification = notification.DBaaSBackupCreate(context, request=req) with StartNotification(context, name=name, instance_id=instance, description=desc, parent_id=parent): backup = Backup.create(context, instance, name, desc, parent_id=parent) return wsgi.Result(views.BackupView(backup).data(), 202)
def status(self): ### Check for taskmanager errors. if self.db_info.task_status.is_error: return InstanceStatus.ERROR ### Check for taskmanager status. ACTION = self.db_info.task_status.action if 'BUILDING' == ACTION: if 'ERROR' == self.db_info.server_status: return InstanceStatus.ERROR return InstanceStatus.BUILD if 'REBOOTING' == ACTION: return InstanceStatus.REBOOT if 'RESIZING' == ACTION: return InstanceStatus.RESIZE ### Check for server status. if self.db_info.server_status in ["BUILD", "ERROR", "REBOOT", "RESIZE"]: return self.db_info.server_status ### Check if there is a backup running for this instance if Backup.running(self.id): return InstanceStatus.BACKUP ### Report as Shutdown while deleting, unless there's an error. if 'DELETING' == ACTION: if self.db_info.server_status in ["ACTIVE", "SHUTDOWN", "DELETED"]: return InstanceStatus.SHUTDOWN else: LOG.error(_("While shutting down instance (%(instance)s): " "server had status (%(status)s).") % {'instance': self.id, 'status': self.db_info.server_status}) return InstanceStatus.ERROR ### Check against the service status. # The service is only paused during a reboot. if rd_instance.ServiceStatuses.PAUSED == self.service_status.status: return InstanceStatus.REBOOT # If the service status is NEW, then we are building. if rd_instance.ServiceStatuses.NEW == self.service_status.status: return InstanceStatus.BUILD # For everything else we can look at the service status mapping. return self.service_status.status.api_status
def validate_can_perform_action(self): """ Raises exception if an instance action cannot currently be performed. """ if self.db_info.server_status != 'ACTIVE': status = self.db_info.server_status elif self.db_info.task_status != InstanceTasks.NONE: status = self.db_info.task_status elif not self.service_status.status.action_is_allowed: status = self.status elif Backup.running(self.id): status = InstanceStatus.BACKUP else: return msg = ("Instance is not currently available for an action to be " "performed (status was %s)." % status) LOG.error(msg) raise exception.UnprocessableEntity(msg)
def create(self, req, body, tenant_id): LOG.info("Creating a backup for tenant %s", tenant_id) context = req.environ[wsgi.CONTEXT_KEY] policy.authorize_on_tenant(context, 'backup:create') data = body['backup'] instance = data.get('instance') name = data['name'] desc = data.get('description') parent = data.get('parent_id') incremental = data.get('incremental') swift_container = data.get('swift_container') restore_from = data.get('restore_from') context.notification = notification.DBaaSBackupCreate(context, request=req) if not restore_from: if not instance: raise exception.BackupCreationError('instance is missing.') if not swift_container: instance_id = utils.get_id_from_href(instance) backup_strategy = BackupStrategy.get(context, instance_id) if backup_strategy: swift_container = backup_strategy.swift_container with StartNotification(context, name=name, instance_id=instance, description=desc, parent_id=parent): backup = Backup.create(context, instance, name, desc, parent_id=parent, incremental=incremental, swift_container=swift_container, restore_from=restore_from) return wsgi.Result(views.BackupView(backup).data(), 202)
def index(self, req, tenant_id): """ Return all backups information for a tenant ID. """ LOG.debug("Listing backups for tenant %s", tenant_id) datastore = req.GET.get('datastore') instance_id = req.GET.get('instance_id') all_projects = strutils.bool_from_string(req.GET.get('all_projects')) context = req.environ[wsgi.CONTEXT_KEY] if all_projects: policy.authorize_on_tenant(context, 'backup:index:all_projects') else: policy.authorize_on_tenant(context, 'backup:index') backups, marker = Backup.list(context, datastore=datastore, instance_id=instance_id, all_projects=all_projects) view = views.BackupViews(backups) paged = pagination.SimplePaginatedDataView(req.url, 'backups', view, marker) return wsgi.Result(paged.data(), 200)
def validate_can_perform_action(self): """ Raises exception if an instance action cannot currently be performed. """ # cases where action cannot be performed if self.db_info.server_status != 'ACTIVE': status = self.db_info.server_status elif (self.db_info.task_status != InstanceTasks.NONE and self.db_info.task_status != InstanceTasks.RESTART_REQUIRED): status = self.db_info.task_status elif not self.datastore_status.status.action_is_allowed: status = self.status elif Backup.running(self.id): status = InstanceStatus.BACKUP else: # action can be performed return msg = (_("Instance %(instance_id)s is not currently available for an " "action to be performed (status was %(action_status)s).") % {'instance_id': self.id, 'action_status': status}) LOG.error(msg) raise exception.UnprocessableEntity(msg)
def delete(self, req, tenant_id, id): LOG.debug("Delete Backup for tenant: %s, ID: %s" % (tenant_id, id)) context = req.environ[wsgi.CONTEXT_KEY] Backup.delete(context, id) return wsgi.Result(None, 202)
def create(cls, context, name, flavor_id, image_id, databases, users, service_type, volume_size, backup_id, availability_zone=None): client = create_nova_client(context) try: flavor = client.flavors.get(flavor_id) except nova_exceptions.NotFound: raise exception.FlavorNotFound(uuid=flavor_id) deltas = {'instances': 1} if CONF.trove_volume_support: validate_volume_size(volume_size) deltas['volumes'] = volume_size else: if volume_size is not None: raise exception.VolumeNotSupported() ephemeral_support = CONF.device_path if ephemeral_support and flavor.ephemeral == 0: raise exception.LocalStorageNotSpecified(flavor=flavor_id) if backup_id is not None: backup_info = Backup.get_by_id(context, backup_id) if backup_info.is_running: raise exception.BackupNotCompleteError(backup_id=backup_id) if not backup_info.check_swift_object_exist( context, verify_checksum=CONF.verify_swift_checksum_on_restore): raise exception.BackupFileNotFound( location=backup_info.location) def _create_resources(): db_info = DBInstance.create(name=name, flavor_id=flavor_id, tenant_id=context.tenant, volume_size=volume_size, service_type=service_type, task_status=InstanceTasks.BUILDING) LOG.debug(_("Tenant %(tenant)s created new " "Trove instance %(db)s...") % {'tenant': context.tenant, 'db': db_info.id}) service_status = InstanceServiceStatus.create( instance_id=db_info.id, status=rd_instance.ServiceStatuses.NEW) if CONF.trove_dns_support: dns_client = create_dns_client(context) hostname = dns_client.determine_hostname(db_info.id) db_info.hostname = hostname db_info.save() root_password = None if CONF.root_on_create and not backup_id: root_password = uuidutils.generate_uuid() task_api.API(context).create_instance(db_info.id, name, flavor, image_id, databases, users, service_type, volume_size, backup_id, availability_zone, root_password) return SimpleInstance(context, db_info, service_status, root_password) return run_with_quotas(context.tenant, deltas, _create_resources)
def _create_replication_slave(self, context, instance_id, name, flavor, image_id, databases, users, datastore_manager, packages, volume_size, availability_zone, root_password, nics, overrides, slave_of_id, backup_id, volume_type, modules): if type(instance_id) in [list]: ids = instance_id root_passwords = root_password else: ids = [instance_id] root_passwords = [root_password] replica_number = 0 replica_backup_id = backup_id replica_backup_created = False replicas = [] master_instance_tasks = BuiltInstanceTasks.load(context, slave_of_id) server_group = master_instance_tasks.server_group scheduler_hints = srv_grp.ServerGroup.convert_to_hint(server_group) LOG.debug("Using scheduler hints for locality: %s", scheduler_hints) try: for replica_index in range(0, len(ids)): try: replica_number += 1 LOG.debug("Creating replica %(num)d of %(count)d.", { 'num': replica_number, 'count': len(ids) }) instance_tasks = FreshInstanceTasks.load( context, ids[replica_index]) snapshot = instance_tasks.get_replication_master_snapshot( context, slave_of_id, flavor, replica_backup_id, replica_number=replica_number) replica_backup_id = snapshot['dataset']['snapshot_id'] replica_backup_created = (replica_backup_id is not None) instance_tasks.create_instance( flavor, image_id, databases, users, datastore_manager, packages, volume_size, replica_backup_id, availability_zone, root_passwords[replica_index], nics, overrides, None, snapshot, volume_type, modules, scheduler_hints) replicas.append(instance_tasks) except Exception: # if it's the first replica, then we shouldn't continue LOG.exception( _("Could not create replica %(num)d of %(count)d."), { 'num': replica_number, 'count': len(ids) }) if replica_number == 1: raise for replica in replicas: replica.wait_for_instance(CONF.restore_usage_timeout, flavor) finally: if replica_backup_created: Backup.delete(context, replica_backup_id)
def _create_replication_slave(self, context, instance_id, name, flavor, image_id, databases, users, datastore_manager, packages, volume_size, availability_zone, root_password, nics, overrides, slave_of_id, backup_id, volume_type, modules): if type(instance_id) in [list]: ids = instance_id root_passwords = root_password else: ids = [instance_id] root_passwords = [root_password] replica_number = 0 replica_backup_id = backup_id replica_backup_created = False replicas = [] master_instance_tasks = BuiltInstanceTasks.load(context, slave_of_id) server_group = master_instance_tasks.server_group scheduler_hints = srv_grp.ServerGroup.convert_to_hint(server_group) LOG.debug("Using scheduler hints for locality: %s" % scheduler_hints) try: for replica_index in range(0, len(ids)): try: replica_number += 1 LOG.debug("Creating replica %d of %d." % (replica_number, len(ids))) instance_tasks = FreshInstanceTasks.load( context, ids[replica_index]) snapshot = instance_tasks.get_replication_master_snapshot( context, slave_of_id, flavor, replica_backup_id, replica_number=replica_number) replica_backup_id = snapshot['dataset']['snapshot_id'] replica_backup_created = (replica_backup_id is not None) instance_tasks.create_instance( flavor, image_id, databases, users, datastore_manager, packages, volume_size, replica_backup_id, availability_zone, root_passwords[replica_index], nics, overrides, None, snapshot, volume_type, modules, scheduler_hints) replicas.append(instance_tasks) except Exception: # if it's the first replica, then we shouldn't continue LOG.exception( _("Could not create replica %(num)d of %(count)d.") % { 'num': replica_number, 'count': len(ids) }) if replica_number == 1: raise for replica in replicas: replica.wait_for_instance(CONF.restore_usage_timeout, flavor) # Some datastores requires completing configuration of replication # nodes with information that is only available after all the # instances has been started. if (master_instance_tasks.post_processing_required_for_replication( )): slave_instances = [ BuiltInstanceTasks.load(context, slave.id) for slave in master_instance_tasks.slaves ] # Collect info from each slave post instance launch slave_detail = [ slave_instance.get_replication_detail() for slave_instance in slave_instances ] # Pass info of all replication nodes to the master for # replication setup completion master_detail = master_instance_tasks.get_replication_detail() master_instance_tasks.complete_master_setup(slave_detail) # Pass info of all replication nodes to each slave for # replication setup completion for slave_instance in slave_instances: slave_instance.complete_slave_setup( master_detail, slave_detail) # Push pending data/transactions from master to slaves master_instance_tasks.sync_data_to_slaves() # Set the status of all slave nodes to ACTIVE for slave_instance in slave_instances: slave_guest = remote.create_guest_client( slave_instance.context, slave_instance.db_info.id, slave_instance.datastore_version.manager) slave_guest.cluster_complete() finally: if replica_backup_created: Backup.delete(context, replica_backup_id)
def create(cls, context, name, flavor_id, image_id, databases, users, datastore, datastore_version, volume_size, backup_id, availability_zone=None, nics=None, configuration_id=None): client = create_nova_client(context) try: flavor = client.flavors.get(flavor_id) except nova_exceptions.NotFound: raise exception.FlavorNotFound(uuid=flavor_id) deltas = {'instances': 1} if CONF.trove_volume_support: validate_volume_size(volume_size) deltas['volumes'] = volume_size else: if volume_size is not None: raise exception.VolumeNotSupported() ephemeral_support = CONF.device_path if ephemeral_support and flavor.ephemeral == 0: raise exception.LocalStorageNotSpecified(flavor=flavor_id) if backup_id is not None: backup_info = Backup.get_by_id(context, backup_id) if backup_info.is_running: raise exception.BackupNotCompleteError(backup_id=backup_id) if not backup_info.check_swift_object_exist( context, verify_checksum=CONF.verify_swift_checksum_on_restore): raise exception.BackupFileNotFound( location=backup_info.location) if not nics and CONF.default_neutron_networks: nics = [] for net_id in CONF.default_neutron_networks: nics.append({"net-id": net_id}) def _create_resources(): db_info = DBInstance.create(name=name, flavor_id=flavor_id, tenant_id=context.tenant, volume_size=volume_size, datastore_version_id= datastore_version.id, task_status=InstanceTasks.BUILDING, configuration_id=configuration_id) LOG.debug(_("Tenant %(tenant)s created new " "Trove instance %(db)s...") % {'tenant': context.tenant, 'db': db_info.id}) # if a configuration group is associated with an instance, # generate an overrides dict to pass into the instance creation # method overrides = Configuration.get_configuration_overrides( context, configuration_id) service_status = InstanceServiceStatus.create( instance_id=db_info.id, status=tr_instance.ServiceStatuses.NEW) if CONF.trove_dns_support: dns_client = create_dns_client(context) hostname = dns_client.determine_hostname(db_info.id) db_info.hostname = hostname db_info.save() root_password = None if cls.get_root_on_create( datastore_version.manager) and not backup_id: root_password = utils.generate_random_password() task_api.API(context).create_instance(db_info.id, name, flavor, image_id, databases, users, datastore_version.manager, datastore_version.packages, volume_size, backup_id, availability_zone, root_password, nics, overrides) return SimpleInstance(context, db_info, service_status, root_password) return run_with_quotas(context.tenant, deltas, _create_resources)
def create(cls, context, name, flavor_id, image_id, databases, users, datastore, datastore_version, volume_size, backup_id, availability_zone=None, nics=None, configuration_id=None, slave_of_id=None, cluster_config=None): datastore_cfg = CONF.get(datastore_version.manager) client = create_nova_client(context) try: flavor = client.flavors.get(flavor_id) except nova_exceptions.NotFound: raise exception.FlavorNotFound(uuid=flavor_id) deltas = {'instances': 1} volume_support = datastore_cfg.volume_support if volume_support: validate_volume_size(volume_size) deltas['volumes'] = volume_size # Instance volume should have enough space for the backup # Backup, and volume sizes are in GBs target_size = volume_size else: target_size = flavor.disk # local_storage if volume_size is not None: raise exception.VolumeNotSupported() if datastore_cfg.device_path: if flavor.ephemeral == 0: raise exception.LocalStorageNotSpecified(flavor=flavor_id) target_size = flavor.ephemeral # ephemeral_Storage if backup_id is not None: backup_info = Backup.get_by_id(context, backup_id) if backup_info.is_running: raise exception.BackupNotCompleteError(backup_id=backup_id) if backup_info.size > target_size: raise exception.BackupTooLarge(backup_size=backup_info.size, disk_size=target_size) if not backup_info.check_swift_object_exist( context, verify_checksum=CONF.verify_swift_checksum_on_restore): raise exception.BackupFileNotFound( location=backup_info.location) if (backup_info.datastore_version_id and backup_info.datastore.name != datastore.name): raise exception.BackupDatastoreMismatchError( datastore1=backup_info.datastore.name, datastore2=datastore.name) if slave_of_id: replication_support = datastore_cfg.replication_strategy if not replication_support: raise exception.ReplicationNotSupported( datastore=datastore.name) try: # looking for replica source replica_source = DBInstance.find_by(context, id=slave_of_id, deleted=False) if replica_source.slave_of_id: raise exception.Forbidden( _("Cannot create a replica of a replica %(id)s.") % {'id': slave_of_id}) except exception.ModelNotFoundError: LOG.exception( _("Cannot create a replica of %(id)s " "as that instance could not be found.") % {'id': slave_of_id}) raise exception.NotFound(uuid=slave_of_id) if not nics: nics = [] if CONF.default_neutron_networks: nics = [{ "net-id": net_id } for net_id in CONF.default_neutron_networks] + nics def _create_resources(): if cluster_config: cluster_id = cluster_config.get("id", None) shard_id = cluster_config.get("shard_id", None) instance_type = cluster_config.get("instance_type", None) else: cluster_id = shard_id = instance_type = None db_info = DBInstance.create( name=name, flavor_id=flavor_id, tenant_id=context.tenant, volume_size=volume_size, datastore_version_id=datastore_version.id, task_status=InstanceTasks.BUILDING, configuration_id=configuration_id, slave_of_id=slave_of_id, cluster_id=cluster_id, shard_id=shard_id, type=instance_type) LOG.debug("Tenant %(tenant)s created new Trove instance %(db)s.", { 'tenant': context.tenant, 'db': db_info.id }) # if a configuration group is associated with an instance, # generate an overrides dict to pass into the instance creation # method config = Configuration(context, configuration_id) overrides = config.get_configuration_overrides() service_status = InstanceServiceStatus.create( instance_id=db_info.id, status=tr_instance.ServiceStatuses.NEW) if CONF.trove_dns_support: dns_client = create_dns_client(context) hostname = dns_client.determine_hostname(db_info.id) db_info.hostname = hostname db_info.save() root_password = None if cls.get_root_on_create( datastore_version.manager) and not backup_id: root_password = utils.generate_random_password() task_api.API(context).create_instance( db_info.id, name, flavor, image_id, databases, users, datastore_version.manager, datastore_version.packages, volume_size, backup_id, availability_zone, root_password, nics, overrides, slave_of_id, cluster_config) return SimpleInstance(context, db_info, service_status, root_password) return run_with_quotas(context.tenant, deltas, _create_resources)
def create(self, req, body, tenant_id): LOG.info("Creating a database instance for tenant '%s'", tenant_id) LOG.debug("req : '%s'\n\n", strutils.mask_password(req)) LOG.debug("body : '%s'\n\n", strutils.mask_password(body)) context = req.environ[wsgi.CONTEXT_KEY] policy.authorize_on_tenant(context, 'instance:create') context.notification = notification.DBaaSInstanceCreate(context, request=req) name = body['instance']['name'] slave_of_id = body['instance'].get('replica_of') replica_count = body['instance'].get('replica_count') flavor_ref = body['instance'].get('flavorRef') datastore_args = body['instance'].get('datastore', {}) volume_info = body['instance'].get('volume', {}) availability_zone = body['instance'].get('availability_zone') nics = body['instance'].get('nics', []) locality = body['instance'].get('locality') region_name = body['instance'].get( 'region_name', CONF.service_credentials.region_name) access = body['instance'].get('access', None) if slave_of_id: if flavor_ref: msg = 'Cannot specify flavor when creating replicas.' raise exception.BadRequest(message=msg) if datastore_args: msg = 'Cannot specify datastore when creating replicas.' raise exception.BadRequest(message=msg) if volume_info: msg = 'Cannot specify volume when creating replicas.' raise exception.BadRequest(message=msg) if locality: msg = 'Cannot specify locality when creating replicas.' raise exception.BadRequest(message=msg) backup_model.verify_swift_auth_token(context) else: if replica_count and replica_count > 1: msg = (f"Replica count only valid when creating replicas. " f"Cannot create {replica_count} instances.") raise exception.BadRequest(message=msg) flavor_id = utils.get_id_from_href(flavor_ref) if volume_info: volume_size = int(volume_info.get('size')) volume_type = volume_info.get('type') else: volume_size = None volume_type = None if slave_of_id: try: replica_source = models.DBInstance.find_by(context, id=slave_of_id, deleted=False) flavor_id = replica_source.flavor_id except exception.ModelNotFoundError: LOG.error(f"Cannot create a replica of {slave_of_id} as that " f"instance could not be found.") raise exception.NotFound(uuid=slave_of_id) if replica_source.slave_of_id: raise exception.Forbidden( f"Cannot create a replica of a replica {slave_of_id}") datastore_version = ds_models.DatastoreVersion.load_by_uuid( replica_source.datastore_version_id) datastore = ds_models.Datastore.load( datastore_version.datastore_id) else: datastore, datastore_version = ds_models.get_datastore_version( **datastore_args) # If only image_tags is configured in the datastore version, get # the image ID using the tags. glance_client = clients.create_glance_client(context) image_id = common_glance.get_image_id(glance_client, datastore_version.image_id, datastore_version.image_tags) LOG.info(f'Using image {image_id} for creating instance') databases = populate_validated_databases(body['instance'].get( 'databases', [])) database_names = [database.get('_name', '') for database in databases] users = None try: users = populate_users(body['instance'].get('users', []), database_names) except ValueError as ve: raise exception.BadRequest(message=str(ve)) if slave_of_id and (databases or users): raise exception.ReplicaCreateWithUsersDatabasesError() configuration = self._configuration_parse(context, body) modules = body['instance'].get('modules') # The following operations have their own API calls. # We need to make sure the same policies are enforced when # creating an instance. # i.e. if attaching configuration group to an existing instance is not # allowed, it should not be possible to create a new instance with the # group attached either if configuration: policy.authorize_on_tenant(context, 'instance:update') if modules: policy.authorize_on_tenant(context, 'instance:module_apply') if users: policy.authorize_on_tenant(context, 'instance:extension:user:create') if databases: policy.authorize_on_tenant(context, 'instance:extension:database:create') if 'restorePoint' in body['instance']: backupRef = body['instance']['restorePoint']['backupRef'] backup_id = utils.get_id_from_href(backupRef) else: backup_id = None # Only 1 nic is allowed as defined in API jsonschema. # Use list just for backward compatibility. if len(nics) > 0: nic = nics[0] LOG.info('Checking user provided instance network %s', nic) if slave_of_id and nic.get('ip_address'): msg = "Cannot specify IP address when creating replicas." raise exception.BadRequest(message=msg) self._check_nic(context, nic) if locality: locality_domain = ['affinity', 'anti-affinity'] locality_domain_msg = ("Invalid locality '%s'. " "Must be one of ['%s']" % (locality, "', '".join(locality_domain))) if locality not in locality_domain: raise exception.BadRequest(message=locality_domain_msg) instance = models.Instance.create(context, name, flavor_id, image_id, databases, users, datastore, datastore_version, volume_size, backup_id, availability_zone, nics, configuration, slave_of_id, replica_count=replica_count, volume_type=volume_type, modules=modules, locality=locality, region_name=region_name, access=access) view = views.InstanceDetailView(instance, req=req) return wsgi.Result(view.data(), 200)
def _create_replication_slave(self, context, instance_id, name, flavor, image_id, databases, users, datastore_manager, packages, volume_size, availability_zone, root_password, nics, overrides, slave_of_id, backup_id, volume_type, modules, access=None, ds_version=None): if type(instance_id) in [list]: ids = instance_id root_passwords = root_password else: ids = [instance_id] root_passwords = [root_password] replica_number = 0 replica_backup_id = backup_id replicas = [] master_instance_tasks = BuiltInstanceTasks.load(context, slave_of_id) server_group = master_instance_tasks.server_group scheduler_hints = srv_grp.ServerGroup.convert_to_hint(server_group) LOG.debug("Using scheduler hints %s for creating instance %s", scheduler_hints, instance_id) # Create backup for master snapshot = None try: instance_tasks = FreshInstanceTasks.load(context, ids[0]) snapshot = instance_tasks.get_replication_master_snapshot( context, slave_of_id, flavor, parent_backup_id=replica_backup_id) LOG.info('Snapshot info for creating replica of %s: %s', slave_of_id, snapshot) except Exception as err: LOG.error( 'Failed to get master snapshot info for creating ' 'replica, error: %s', str(err)) if snapshot and snapshot.get('dataset', {}).get('snapshot_id'): backup_id = snapshot['dataset']['snapshot_id'] Backup.delete(context, backup_id) raise # Create replicas using the master backup replica_backup_id = snapshot['dataset']['snapshot_id'] try: for replica_index in range(0, len(ids)): replica_number += 1 LOG.info(f"Creating replica {replica_number} " f"({ids[replica_index]}) of {len(ids)}.") instance_tasks = FreshInstanceTasks.load( context, ids[replica_index]) instance_tasks.create_instance(flavor, image_id, databases, users, datastore_manager, packages, volume_size, replica_backup_id, availability_zone, root_passwords[replica_index], nics, overrides, None, snapshot, volume_type, modules, scheduler_hints, access=access, ds_version=ds_version) replicas.append(instance_tasks) for replica in replicas: replica.wait_for_instance(CONF.restore_usage_timeout, flavor) LOG.info('Replica %s created successfully', replica.id) except Exception as err: LOG.error('Failed to create replica from %s, error: %s', slave_of_id, str(err)) raise finally: Backup.delete(context, replica_backup_id)