def test_show_deleted_mgmt_instances(self): args = {'deleted': 0, 'cluster_id': None} db_infos_active = DBInstance.find_all(**args) args = {'deleted': 1, 'cluster_id': None} db_infos_deleted = DBInstance.find_all(**args) args = {'cluster_id': None} # db_infos_all = DBInstance.find_all(**args) # TODO(SlickNik) Fix this assert to work reliably in the gate. # This fails intermittenly when the unit tests run in parallel. # self.assertTrue(db_infos_all.count() == # db_infos_active.count() + # db_infos_deleted.count()) with patch.object(self.context, 'is_admin', return_value=True): deleted_instance = db_infos_deleted.all()[0] active_instance = db_infos_active.all()[0] instance = DBInstance.find_by(context=self.context, id=active_instance.id) self.assertEqual(active_instance.id, instance.id) self.assertRaises( exception.ModelNotFoundError, DBInstance.find_by, context=self.context, id=deleted_instance.id, deleted=False) instance = DBInstance.find_by(context=self.context, id=deleted_instance.id, deleted=True) self.assertEqual(deleted_instance.id, instance.id)
def _get_cluster_instance_id(self, tenant_id, cluster_id): instance_ids = self._find_cluster_node_ids(tenant_id, cluster_id) args = {'tenant_id': tenant_id, 'cluster_id': cluster_id, 'type': 'master'} master_instance = DBInstance.find_by(**args) master_instance_id = master_instance.id return master_instance_id, instance_ids
def prepare( self, memory_mb, databases, users, device_path=None, mount_point=None, backup_id=None, config_contents=None, root_password=None, ): from trove.instance.models import DBInstance from trove.instance.models import InstanceServiceStatus from trove.guestagent.models import AgentHeartBeat LOG.debug("users... %s" % users) LOG.debug("databases... %s" % databases) instance_name = DBInstance.find_by(id=self.id).name self.create_user(users) self.create_database(databases) def update_db(): status = InstanceServiceStatus.find_by(instance_id=self.id) if instance_name.endswith("GUEST_ERROR"): status.status = rd_instance.ServiceStatuses.FAILED else: status.status = rd_instance.ServiceStatuses.RUNNING status.save() AgentHeartBeat.create(instance_id=self.id) eventlet.spawn_after(1.0, update_db)
def update_statuses_on_time_out(self): if CONF.update_status_on_fail: #Updating service status service = InstanceServiceStatus.find_by(instance_id=self.id) service.set_status(ServiceStatuses. FAILED_TIMEOUT_GUESTAGENT) service.save() LOG.error(_("Service status: %(status)s") % {'status': ServiceStatuses. FAILED_TIMEOUT_GUESTAGENT.api_status}) LOG.error(_("Service error description: %(desc)s") % {'desc': ServiceStatuses. FAILED_TIMEOUT_GUESTAGENT.description}) #Updating instance status db_info = DBInstance.find_by(name=self.name) db_info.set_task_status(InstanceTasks. BUILDING_ERROR_TIMEOUT_GA) db_info.save() LOG.error(_("Trove instance status: %(action)s") % {'action': InstanceTasks. BUILDING_ERROR_TIMEOUT_GA.action}) LOG.error(_("Trove instance status description: %(text)s") % {'text': InstanceTasks. BUILDING_ERROR_TIMEOUT_GA.db_text})
def _get_cluster_instance_id(self, tenant_id, cluster_id): args = {'tenant_id': tenant_id, 'cluster_id': cluster_id} cluster_instances = DBInstance.find_all(**args).all() instance_ids = [db_instance.id for db_instance in cluster_instances] args = {'tenant_id': tenant_id, 'cluster_id': cluster_id, 'type': 'master'} master_instance = DBInstance.find_by(**args) master_instance_id = master_instance.id return master_instance_id, instance_ids
def get_manager(self, tenant_id, target_id): args = {'id': target_id, 'tenant_id': tenant_id} is_cluster = False try: db_info = DBInstance.find_by(**args) except exception.ModelNotFoundError: is_cluster = True db_info = DBCluster.find_by(**args) ds_version = (datastore_models.DatastoreVersion. load_by_uuid(db_info.datastore_version_id)) ds_manager = ds_version.manager return (ds_manager, is_cluster)
def _get_datastore(self, tenant_id, instance_or_cluster_id): """ Returns datastore manager and a boolean showing if instance_or_cluster_id is a cluster id """ args = {'id': instance_or_cluster_id, 'tenant_id': tenant_id} is_cluster = False try: db_info = DBInstance.find_by(**args) except exception.ModelNotFoundError: is_cluster = True db_info = DBCluster.find_by(**args) ds_version = (datastore_models.DatastoreVersion. load_by_uuid(db_info.datastore_version_id)) ds_manager = ds_version.manager return (ds_manager, is_cluster)
def __init__(self, host_info): self.name = host_info.name self.percent_used = host_info.percentUsed self.total_ram = host_info.totalRAM self.used_ram = host_info.usedRAM self.instances = host_info.instances for instance in self.instances: instance["server_id"] = instance["uuid"] del instance["uuid"] try: db_info = DBInstance.find_by(compute_instance_id=instance["server_id"]) instance["id"] = db_info.id instance["tenant_id"] = db_info.tenant_id status = InstanceServiceStatus.find_by(instance_id=db_info.id) instance_info = SimpleInstance(None, db_info, status) instance["status"] = instance_info.status except exception.TroveError as re: LOG.error(re) LOG.error("Compute Instance ID found with no associated RD " "instance: %s" % instance["server_id"]) instance["id"] = None
def __init__(self, host_info): self.name = host_info.name self.percent_used = host_info.percentUsed self.total_ram = host_info.totalRAM self.used_ram = host_info.usedRAM self.instances = host_info.instances for instance in self.instances: instance['server_id'] = instance['uuid'] del instance['uuid'] try: db_info = DBInstance.find_by( compute_instance_id=instance['server_id']) instance['id'] = db_info.id instance['tenant_id'] = db_info.tenant_id status = InstanceServiceStatus.find_by(instance_id=db_info.id) instance_info = SimpleInstance(None, db_info, status) instance['status'] = instance_info.status except exception.TroveError as re: LOG.error(re) LOG.error("Compute Instance ID found with no associated RD " "instance: %s" % instance['server_id']) instance['id'] = None
def prepare(self, memory_mb, databases, users, device_path=None, mount_point=None, backup_id=None, config_location=None, config_contents=None): from trove.instance.models import DBInstance from trove.instance.models import InstanceServiceStatus from trove.instance.models import ServiceStatuses from trove.guestagent.models import AgentHeartBeat LOG.debug("users... %s" % users) LOG.debug("databases... %s" % databases) instance_name = DBInstance.find_by(id=self.id).name self.create_user(users) self.create_database(databases) def update_db(): status = InstanceServiceStatus.find_by(instance_id=self.id) if instance_name.endswith('GUEST_ERROR'): status.status = ServiceStatuses.FAILED else: status.status = ServiceStatuses.RUNNING status.save() AgentHeartBeat.create(instance_id=self.id) self.event_spawn(1.0, update_db)
def delete_binlogs(self, req, tenant_id, id, binlog_name): LOG.info(_("delete binlogs for instance %s, purge to %s") % (id, binlog_name)) context = req.environ[wsgi.CONTEXT_KEY] inst = DBInstance.find_by(context=context, id=id) group_id = inst.group_id backup = backup_model.get_latest_available_autobackup(context, group_id) ''' delete binlogs after lastest backup is not allowed ''' binlogs = models.Instance.list_binlogs(context, id) for bin in binlogs: if bin['log_name'] == binlog_name: bin_time = datetime.strptime(bin['start_time'], "%Y-%m-%d %H:%M:%S") if bin_time < backup.backup_timestamp: models.Instance.delete_binlogs(context, id, binlog_name) return wsgi.Result(None, 202) else: return wsgi.Result(None, 403) return wsgi.Result(None, 404)
def prepare(self, memory_mb, packages, databases, users, device_path=None, mount_point=None, backup_info=None, config_contents=None, root_password=None, overrides=None, cluster_config=None): from trove.instance.models import DBInstance from trove.instance.models import InstanceServiceStatus from trove.guestagent.models import AgentHeartBeat LOG.debug("users... %s" % users) LOG.debug("databases... %s" % databases) instance_name = DBInstance.find_by(id=self.id).name self.create_user(users) self.create_database(databases) self.overrides = overrides or {} def update_db(): status = InstanceServiceStatus.find_by(instance_id=self.id) if instance_name.endswith('GUEST_ERROR'): status.status = rd_instance.ServiceStatuses.FAILED else: status.status = rd_instance.ServiceStatuses.RUNNING status.save() AgentHeartBeat.create(instance_id=self.id) eventlet.spawn_after(1.0, update_db)
def _is_slave(tenant_id, instance_id): args = {'id': instance_id, 'tenant_id': tenant_id} instance_info = DBInstance.find_by(**args) return instance_info.slave_of_id
def fake_deleted_instance_delete(cls, context, instance_id): base_msg = " instance_id: %s " % instance_id success = True msg = " fake_deleted_instance_delete %s " % base_msg deleted_at = utils.utcnow() db_info = None try: db_info = DBInstance.find_by(context=context, id=instance_id, task_id=InstanceTasks.FAKE_DELETED.code, deleted=True) db_info.update(task_status=InstanceTasks.DELETING) LOG.debug("fake_deleted_instance_delete, load instance ok, %s " % base_msg) except Exception: LOG.debug("fake_deleted_instance_delete failed, deleted instance not found, %s " % base_msg) if db_info is None: success = False msg = " fake_deleted_instance_delete failed, load instance error %s " % base_msg return success, msg try: server = load_server(context, db_info.id, db_info.compute_instance_id) LOG.debug("fake_deleted_instance_delete, load server: %s ok, %s ", db_info.compute_instance_id, base_msg) nova_client = create_nova_client(context) def server_is_finished(): try: server_id = db_info.compute_instance_id _server = nova_client.servers.get(server_id) if _server.status not in ['SHUTDOWN', 'ACTIVE']: _msg = "Server %s got into %s status during delete " \ "of instance %s!" % (server.id, server.status, instance_id) LOG.error(_msg) return False except nova_exceptions.NotFound: return True try: LOG.debug("Delete compute server %s" % server.id) server.delete() poll_until(server_is_finished, sleep_time=1, time_out=CONF.server_delete_time_out) guest = create_guest_client(context, db_info.id) guest.delete_queue() LOG.debug("fake_deleted_instance_delete, delete server: %s ok, %s ", db_info.compute_instance_id, base_msg) except Exception as ex: LOG.error(utils.get_traceback_stack()) success = False msg += " ,deleted server error, compute_instance_id: %s, ex:%s, %s " \ % (db_info.compute_instance_id, str(ex), base_msg) except Exception as ex: LOG.error("COMPUTE ID = %s" % db_info.compute_instance_id) success = False msg += " ,load server error, compute_instance_id: %s, %s " % (db_info.compute_instance_id, base_msg) if CONF.trove_vip_support: try: db_info.update(task_status=InstanceTasks.RELEASE_VIP) instance_vip = DBInstanceVip.find_by(context, instance_id=instance_id, deleted=False) vip_info = DBVips.find_by(context, id=instance_vip.vip_id, deleted=False) InstanceVip.release_vip(context, vip_info.vip) LOG.debug("fake_deleted_instance_delete, release_vip: %s ok, %s " % (vip_info.vip, base_msg)) except Exception as ex: LOG.error(utils.get_traceback_stack()) success = False msg += " ,release_vip error, ex:%s, %s " % (str(ex), base_msg) if CONF.trove_security_groups_support: db_info.update(task_status=InstanceTasks.DELETEING_SECURITY_GROUP) try: SecurityGroup.delete_for_group(db_info.group_id, context) LOG.debug( "fake_deleted_instance_delete, delete SecurityGroup: %s ok, %s " % (db_info.group_id, base_msg)) except Exception as ex: LOG.error(utils.get_traceback_stack()) success = False msg += " ,delete SecurityGroup error, ex:%s, %s " % (str(ex), base_msg) db_info.update(deleted_at=deleted_at, task_status=InstanceTasks.NONE) if success is True: msg = "fake_deleted_instance_delete finished, %s " % base_msg return success, msg
def set_server_running(): instance = DBInstance.find_by(compute_instance_id=id) LOG.debug("Setting server %s to running" % instance.id) status = InstanceServiceStatus.find_by(instance_id=instance.id) status.status = rd_instance.ServiceStatuses.RUNNING status.save()
def _reload_db_info(self): self.db_info = DBInstance.find_by(id=self.db_info.id, deleted=False)
def set_server_running(): instance = DBInstance.find_by(compute_instance_id=id) LOG.debug("Setting server %s to running" % instance.id) status = InstanceServiceStatus.find_by(instance_id=instance.id) status.status = rd_instance.ServiceStatuses.RUNNING status.save()
def _reload_db_info(self): self.db_info = DBInstance.find_by(id=self.db_info.id, deleted=False)
def check_cluster_instance_actions(self, instance_id): # Check if instance is in a cluster and if actions are allowed instance = DBInstance.find_by(id=instance_id) if instance.cluster_id and self._block_cluster_instance_actions(): raise exception.ClusterInstanceOperationNotSupported()
def check_cluster_instance_actions(self, instance_id): # Check if instance is in a cluster and if actions are allowed instance = DBInstance.find_by(id=instance_id) if instance.cluster_id and self._block_cluster_instance_actions(): raise exception.ClusterInstanceOperationNotSupported()
def create(self, req, body, tenant_id): LOG.debug("Creating a Backup for tenant '%s'" % tenant_id) context = req.environ[wsgi.CONTEXT_KEY] data = body['backup'] instance = data.get('instance',None) group = data.get('group',None) name = data['name'] type = data.get("type", "snapshot") #expire_at = data.get("expire_after", 7) desc = data.get('description') parent_id = data.get('parent_id') LOG.info("parent_id:%s", parent_id) if group is None and instance is None: raise exception.BadRequest("you must specify group or instance") instance_id = None if group is not None: try: instance_id = InstanceGroupItem.get_by_gid_type(context, group, DBInstanceType.STANDBY).instance_id except: instance_id = InstanceGroupItem.get_by_gid_type(context, group, DBInstanceType.SINGLE).instance_id if instance_id is None and instance is not None: instance_id = inst_utils.virtual_instid_2_origin_instid(instance) _instance = DBInstance.find_by(context,id=instance_id) _type = _instance.service_type #_image = ServiceImage.find_by(context,service_name=_type) #service_image_id = _image.id ds,ds_version = ds_models.get_datastore_version(_type) service_image_id = ds_version.image_id grp_item = InstanceGroupItem.get_by_instance_id(context, _instance.id) group_id = grp_item.group_id # get this group's autobackup config and set the expire_after default _autobackup = AutoBackup.get_by_gid(context, group_id) expire_after = data.get("expire_after", _autobackup.expire_after) duration = _autobackup.duration expire_at = AutoBackup.calculate_expire_at(expire_after, duration) LOG.info("group_id %s, expire_at :%s", group_id, time.ctime(expire_at)) if grp_item.type == DBInstanceType.MASTER: try: instance_id = InstanceGroupItem.get_by_gid_type(context, group_id, DBInstanceType.STANDBY).instance_id except Exception as e: LOG.error(e) backup = Backup.create(context, instance_id, name, description=desc,group_id=group_id,backup_type=type,expire_at=expire_at,service_image_id=service_image_id,parent_id=parent_id) try: #service = inst_models.ServiceImage.find_by(id=backup.service_image_id) #backup.db_type = service['service_name'] ds,ds_version = ds_patch_models.find_datastore_by_image_id(backup.service_image_id) backup.db_type = ds.name except Exception as ex: backup.db_type = "" LOG.warn("Failed get db type information of backup %s, %s", backup.id, ex) chain = self._get_chain_ids(context, id) LOG.info(_("chain : '%s'") % chain) return wsgi.Result(views.BackupView(backup).data(), 202)
def get_instance(id,deleted=False): return DBInstance.find_by(id = id,deleted =deleted)