Пример #1
0
    def resize_ha(self, instance_id=None):
        self.inst_id = instance_id
        if not self.inst_id:
            self.inst_id = utils.get_instance_id_bytenant(DBInstanceType.MASTER)
        _inst = utils.get_instance(self.inst_id, deleted=0)
        self.group_id = _inst.group_id

        master = InstanceGroupItem.get_by_gid_type(utils.get_context(), self.group_id, DBInstanceType.MASTER)
        standby = InstanceGroupItem.get_by_gid_type(utils.get_context(), self.group_id, DBInstanceType.STANDBY)
        if master:
            old_master_inst_id = master.instance_id
        if standby:
            old_standy_inst_id = standby.instance_id
        
        old_master_inst = utils.get_builtin_instance(old_master_inst_id)
        old_flavor_id = old_master_inst.flavor_id
        old_virtual_instance_id = old_master_inst.virtual_instance_id
        self.inst_id = old_master_inst.id
        
        if old_flavor_id not in ['1', '2', '3', '4']:
            raise Exception("It is not support to do resizing based on flavor id: %s, supported flavor_ids should be in (1,2,3,4)" % (old_flavor_id))
        
        flavor = str(int(old_flavor_id) + 1)
        LOG.info("old flavor : %s, new flavor : %s" % (old_flavor_id, flavor))
        
        utils.resize_flavor_byclient(self.inst_id, flavor)
        inst_ids = utils.check_resize_status(self.group_id)

        if len(inst_ids) >= 2:
            utils.check_rpl_delay(inst_ids[0])
            utils.check_rpl_delay(inst_ids[1])

            import time

            time.sleep(60)

            new_master = utils.get_builtin_instance(old_virtual_instance_id)
            new_standby = InstanceGroupItem.get_by_gid_type(utils.get_context(), new_master.group_id, DBInstanceType.STANDBY)

            if new_master.virtual_instance_id == old_virtual_instance_id and \
                            new_master.id != old_master_inst.id:
                self.inst_id = new_master.id
                self.dbslave_id = new_standby.instance_id
                self.vip_id = utils.get_vip_id(self.inst_id)
            else:
                raise Exception("resize for ha failed,new_master.virtual_instance_id %s,"
                                " old_virtual_instance_id %s, new_master.id %s,"
                                " old_master_inst.id %s, new_standby_id %s,"
                                " old_standy_inst_id %s" % (
                                    new_master.virtual_instance_id, old_virtual_instance_id,
                                    new_master.id, old_master_inst.id,
                                    new_standby.instance_id, old_standy_inst_id))
            
            self.validate()
            
        else:
            raise Exception("the num of instance_id should be equal or greater than two after resize HA ")
Пример #2
0
    def delete_async(self):
        LOG.debug("prepare delete instance %s" % self.id)
        deleted_at = utils.utcnow()
        # Delete guest queue.
        _item = InstanceGroupItem.get_by_instance_id(self.context,self.id)
        group_id = _item.group_id

        if _item.type == DBInstanceType.MASTER:
            standby = None
            try:
                standby = InstanceGroupItem.get_by_gid_type(self.context, group_id,DBInstanceType.STANDBY)
            except exception.ModelNotFoundError:
                pass
            if standby is not None:

                standby.delete()
                standby_inst_id = standby.instance_id
                standby_tasks = self.load(self.context,standby_inst_id)
                standby_tasks.update_db(deleted=True, deleted_at=deleted_at,
                                        task_status=InstanceTasks.NONE)
                standby_tasks.set_servicestatus_deleted()
                standby_tasks._delete_instance_config()

        item_list = InstanceGroupItem.list_by_gid(self.context,group_id)
        if len(item_list)==1:
            # Delete associated security group
            if CONF.trove_security_groups_support:
                try:
                    SecurityGroup.delete_for_group(self.group_id,
                                                   self.context)
                except Exception as e:
                    LOG.error(utils.get_traceback_stack())

            LOG.info("send notify to monitor when delete instance %s" % self.id)
            group_rpcapi.API(self.context).group_update(group_id,notify_when_delete=True)

            LOG.info("Delete group %s" % group_id)
            InstanceGroup.delete(self.context,group_id)

        if _item.type in [DBInstanceType.MASTER,DBInstanceType.SINGLE]:
            try:
                LOG.info("Delete autobackup_setting of group_id %s" % group_id)
                AutoBackup.delete(self.context,group_id)
            except:
                LOG.error(utils.get_traceback_stack())


        _item.delete()
        self.update_db(deleted=True, deleted_at=deleted_at,
                       task_status=InstanceTasks.NONE)
        self.set_servicestatus_deleted()
Пример #3
0
 def group_update(self, group_id,notify_when_delete=False):
     LOG.debug("Making async send group_info %s to %s" % (group_id,self._get_routing_key()))
     
     item_json = dict()
     try:
         group = InstanceGroup.get_by_groupid(self.context, group_id)
     except exception.ModelNotFoundError:
         if notify_when_delete:
             group = InstanceGroup.get_by_groupid(self.context, group_id,deleted=True)
     tenant_id = group.tenant_id
     
     item_list = InstanceGroupItem.list_by_gid(self.context, group_id)
     for item in item_list:
         _id = item.instance_id
         if self._get_running_instance(_id):
             if item.type ==DBInstanceType.SINGLE or item.type == DBInstanceType.MASTER:
                 db_type = DBInstanceType.MASTER
             else:
                 db_type = item.type
             _data = {'id':item.instance_id,'role':item.type,'group_id':item.group_id}
             if item.type == DBInstanceType.READ_REPLI:
                 _list = item_json.get(item.type,[])
                 _list.append(_data)
                 item_json[db_type] = _list
             else:
                 item_json[db_type] = _data
     if item_json or notify_when_delete:
         ret_json = {"id":group_id}
         ret_json.update({'tenant_id':tenant_id})
         ret_json.update(item_json)
         self._cast("group_update",{"value":ret_json})
         
Пример #4
0
 def backup_config(self, req, tenant_id, id):
     """
     List instance autobackup config
     """
     LOG.info("Get instance %s autobackup config."%id)
     context = req.environ[wsgi.CONTEXT_KEY]
     try:
         try:
             instance_group = InstanceGroupItem.get_by_instance_id(context, id)
         except exception.NotFound as e:
             group_id = id
         else:
             group_id = instance_group.group_id
         try:
             autobackupModels = autobackup_models.AutoBackup.get_by_gid(context, group_id)
         except exception.NotFound as e:
             return wsgi.Result({'backup_config': {} }, 202)
         autobackup_at = autobackupModels.autobackup_at
         duration = autobackupModels.duration
         expire_after = autobackupModels.expire_after 
     except Exception as e:
         raise exception.BadRequest(str(e))
     config = {}
     config['autobackup_at'] = autobackup_at
     config['duration'] = duration
     config['expire_after'] = expire_after
     config['group_id'] = group_id
     ret = {'backup_config': config }
     return wsgi.Result(ret, 202)
Пример #5
0
    def _action_modify_backup_config(self, instance, body):
        config = body['modify_backup_config']
        context = instance.context
        try:
            autobackup_at = int(config['autobackup_at'])
            duration = int(config['duration'])
            expire_after = int(config['expire_after'])
        except Exception:
            msg = (_("Cann't modify autobackup config, need parameter: autobackup_at, duration, expire_after"))
            LOG.error(msg)
            raise exception.BadRequest(msg)
        LOG.info("Modify instance %s autobackup config." % instance.id)
        LOG.info("autobackup_at:%s  duration:%s expire_after:%s" % (autobackup_at, duration, expire_after))

        check_ok, check_msg = autobackup_models.AutoBackup.check_config(duration, expire_after, autobackup_at)
        if not check_ok:
            raise exception.BadRequest(check_msg)

        try:
            instance_group = InstanceGroupItem.get_by_instance_id(context, instance.id)
            group_id = instance_group.group_id
            autobackupModels = autobackup_models.AutoBackup.get_by_gid(context, group_id)
            autobackupModels.autobackup_at = autobackup_at
            autobackupModels.duration = duration
            autobackupModels.expire_after = expire_after
            autobackup_models.AutoBackup.update(context, autobackupModels)
        except Exception as e:
            raise exception.BadRequest(str(e))
        return wsgi.Result(None, 202) 
Пример #6
0
    def delete_async(self, fake):
        LOG.debug("prepare delete instance %s, fake: %s " % (self.id, fake))
        modified_group_id = self.group_id
        self._delete_resources(fake)
        # Delete guest queue.
        _item = InstanceGroupItem.get_by_instance_id(self.context, self.id)
        group_id = _item.group_id
        del_instance_type = _item.type

        # if size of item_list equal 1,then we will delete last instance in group
        item_list = InstanceGroupItem.list_by_gid(self.context, modified_group_id)
        if len(item_list) == 1:
            if CONF.trove_security_groups_support:
                if fake is True:
                    LOG.debug("fake is True, %s skip delete secgroup rules", self.group_id)
                else:
                    # Delete associated security group
                    self.update_db(task_status=InstanceTasks.DELETEING_SECURITY_GROUP)
                    try:
                        SecurityGroup.delete_for_group(modified_group_id, self.context)
                    except Exception as e:
                        LOG.error(utils.get_traceback_stack())

        self.set_servicestatus_deleted()

        # zs: configuration is needed for restore deleted instance, DO NOT DELETE!
        # self._delete_instance_config()

        LOG.info("Delete instance_group_item for instance %s" % self.id)
        _type = self.type
        InstanceGroupItem.delete(self.context, self.id)
        deleted_at = utils.utcnow()
        if fake is True and _type == DBInstanceType.MASTER:
            LOG.debug("fake is True, %s is MASTER, set task_status :%s ", self.id, InstanceTasks.FAKE_DELETED)
            self.update_db(deleted=True, deleted_at=deleted_at, task_status=InstanceTasks.FAKE_DELETED)
        else:
            self.update_db(deleted=True, deleted_at=deleted_at, task_status=InstanceTasks.NONE)

        LOG.info("send notify to monitor when delete instance %s" % self.id)
        group_rpcapi.API(self.context).group_update(group_id, notify_when_delete=True)

        if len(item_list) == 1:
            LOG.info("Delete group %s" % group_id)
            InstanceGroup.delete(self.context, group_id)

        self._send_usage_event(self.server, utils.utcnow())
 def test_list(self):
     self._create(self.group_id, MASTER)
     self._create(self.group_id, STANDBY)
     
     when(InstanceGroupItem)._get_item_by_status(any()).thenReturn(True)
     
     db_list = InstanceGroupItem.list(self.context)
     self.assertEqual(2, len(db_list))
Пример #8
0
    def _get_ignore_hosts(self,group_id):
        standby = InstanceGroupItem.get_by_gid_type(self.context, group_id,DBInstanceType.STANDBY)
        master = InstanceGroupItem.get_by_gid_type(self.context, group_id,DBInstanceType.MASTER)
        ignore_hosts = []

        if standby:
            standby_phyhost = inst_models.Instance.get_pyhhostname(self.context, standby.instance_id)
            ignore_hosts.append(standby_phyhost)
        else:
            LOG.error("standby is not exist in group_id %s." % group_id)

        if master:
            master_phyhost = inst_models.Instance.get_pyhhostname(self.context,master.instance_id)
            ignore_hosts.append(master_phyhost)
        else:
            LOG.error("master is not exist in group_id %s." % group_id)

        return ignore_hosts
 def test_list_by_tenant_id(self):
     pass
     group_id = self.group_id
     self._create(group_id, MASTER)
     self._create(group_id, STANDBY)
     
     when(InstanceGroupItem)._get_item_by_status(any()).thenReturn(True)
     
     _list = InstanceGroupItem.list_by_tenant_id(self.context,is_active=False)
     self.assertEqual(2,len(_list))
 def test_create(self):
     self.created = True
     id = self._create(self.group_id,MASTER).instance_id
     db_record = InstanceGroupItem.get_by_instance_id(self.context, id)
     self.assertTrue(db_record)
     self.assertEqual(MASTER, db_record.type)
     
     id = self._create(self.group_id,STANDBY).instance_id
     db_record = InstanceGroupItem.get_by_instance_id(self.context,id)
     self.assertTrue(db_record)
     self.assertEqual(STANDBY, db_record.type)
     
     id = self._create(self.group_id,READ_REPLI).instance_id
     db_record = InstanceGroupItem.get_by_instance_id(self.context,id)
     self.assertTrue(db_record)
     self.assertEqual(READ_REPLI, db_record.type)
     
     id = self._create(self.group_id,SINGLE).instance_id
     db_record = InstanceGroupItem.get_by_instance_id(self.context,id)
     self.assertTrue(db_record)
     self.assertEqual(SINGLE, db_record.type)
Пример #11
0
    def _update_instance_type(self, instance_id, instance_type):
        #update group item
        instance = self.load(self.context, instance_id)
        guest = instance.get_guest()
        group_item = InstanceGroupItem.get_by_instance_id(self.context, instance_id)

        def __updateDBInstance(createRplAccount = True, read_only = True):

            if createRplAccount:
                rplUser = MySQLUser()
                rplUser.name = CONF.rds_rpl_user
                rplUser.password = self._gen_rpl_user_password(group_item.group_id)
                guest.ksc_create_rpl_user(rplUser.serialize())
                guest.ksc_alter_heartbeat_event_enable()

            #guest.ksc_set_read_only(read_only)
            self._change_variable({"read_only":"ON" if read_only else "OFF"})

        __updateDBInstance(createRplAccount = (instance_type in [DBInstanceType.MASTER, DBInstanceType.SINGLE]),
                           read_only = (instance_type == DBInstanceType.READ_REPLI or \
                                        instance_type == DBInstanceType.STANDBY))

        InstanceGroupItem.update_type(self.context, item_id=group_item.id, type=instance_type)
Пример #12
0
 def create_binlog(self, context, instance_id, group_id, name, location, size, binlog_start_time):
     success = True
     try: 
         item = binlog_models.DBBinlog.find_by(instance_id = instance_id, name = name, location = location, deleted = 0) 
         item.size = size
     except:
         igi = InstanceGroupItem.get_by_instance_id(context, instance_id)
         if igi.type not in [DBInstanceType.MASTER, DBInstanceType.SINGLE]:
             LOG.info("**************** instance %s type is not MASTER or SINGLE, ignore ***************" % instance_id)
             success = False
             return  success
         
         binlog_models.DBBinlog.create(context, instance_id, group_id, name, location, size, binlog_start_time)
     else:
         item.save()
     
     return success
Пример #13
0
    def _relocate_master(self, master_id, slave_id,backup_id=None):
        '''
        Fore. 2014/9/29 desperated method. don't use it.
        :param master_id:
        :param slave_id:
        :param backup_id:
        '''

        master_group_item = InstanceGroupItem.get_by_instance_id(self.context, master_id)
        def __show_master_status(inst_id):
            _instance = self.load(self.context, inst_id)
            _guest = _instance.get_guest()
            mStatus = _guest.ksc_show_master_status()
            log_path, log_pos = mStatus['file'], mStatus['position']
            return log_path, log_pos

        if backup_id is None:
            log_path, log_pos = __show_master_status(master_id)
        else:
            bk_info = Backup.get_by_id(self.context, backup_id)
            if master_group_item.group_id == bk_info.group_id:
                log_path, log_pos = Backup.get_binlog_info(self.context, backup_id)
            else:
                log_path, log_pos = __show_master_status(master_id)

        LOG.debug("relocate master instance %s get binlog_path:%s binlog_pos:%s" \
                  %(master_id, log_path, log_pos))

        group_item = master_group_item
        master_host = self._get_instance_ip(master_id)

        master_guest = FreshInstanceTasks.load(self.context, master_id).guest
        master_port = int(master_guest.ksc_list_variables(["port"])["port"])

        repl_user = CONF.rds_rpl_user
        repl_password = self._gen_rpl_user_password(group_item.group_id)

        master_log_file = log_path
        master_log_pos = log_pos

        slave_instance = self.load(self.context, slave_id)
        slave_guest = slave_instance.get_guest()
        slave_guest.ksc_relocate_master(master_host=master_host, master_port = master_port, repl_user=repl_user,
                                        repl_password=repl_password, master_log_file=master_log_file,
                                        master_log_pos=master_log_pos)
 def test_list_by_gid(self):
     group_id = self.group_id
     self._create(group_id, MASTER)
     self._create(group_id, STANDBY)
     
     _instance = fake()
     task_status = fake()
     task_status.action = "NONE"
     _instance.task_status = task_status
     
     service_status = fake()
     service_status.status = models.ServiceStatuses.RUNNING
     
     when(models.DBInstance).find_by(id = any()).thenReturn(_instance)
     when(models.InstanceServiceStatus).find_by(instance_id = any()).thenReturn(service_status)
     
     
     db_list = InstanceGroupItem.list_by_gid(self.context, group_id)
     for group in db_list:
         print group.data()
     self.assertEqual(2, len(db_list))
Пример #15
0
 def validate(self):
     utils.check_server_status(self.inst_id, expected_task=utils.tasks.InstanceTasks.NONE,
                               type=DBInstanceType.MASTER,
                               expected_svr_status=utils.ServiceStatuses.RUNNING,
                               deleted=False, timeout=600)
     
     utils.check_server_status(self.dbslave_id, expected_task=utils.tasks.InstanceTasks.NONE,
                               type=DBInstanceType.STANDBY,
                               expected_svr_status=utils.ServiceStatuses.RUNNING,
                               deleted=False, timeout=600)
     
     rr_items = InstanceGroupItem.get_by_gid_type(utils.get_context(), self.group_id, DBInstanceType.READ_REPLI, deleted = False)
     slave_ids = []
     for rr in rr_items:
         slave_ids.append(rr.instance_id)
     slave_ids.append(self.dbslave_id)
     
     utils.check_mysql_adminuser(self.inst_id)
     utils.check_mysql_adminuser(self.dbslave_id)
     
     for _id in [self.inst_id, self.dbslave_id]:
         utils.check_mysql_is_running(self.inst_id)
     
     utils.check_vip(self.inst_id, vip_id=self.vip_id)
     self.backup_id = utils.check_backup(self.group_id)
     utils.check_backup_status(self.backup_id) 
     utils.check_backup_path(self.backup_id)
     
     for slave_id in slave_ids:
         utils.check_rpl_delay(slave_id)
         
     master_inst = utils.get_builtin_instance(self.inst_id)
     slave_inst = utils.get_builtin_instance(self.dbslave_id)
     master_ip = utils.check_allocate_ip(master_inst.server)
     slave_ip = utils.check_allocate_ip(slave_inst.server)
     LOG.info("master_ip:%s  slave_ip:%s" % (master_ip, slave_ip))
     utils.check_rpl_consist(self.inst_id, slave_ids, master_ip, [slave_ip])
     utils.check_rpl_topo_ha(self.group_id)
Пример #16
0
    def backups(self, req, tenant_id, id):
        """Return all backups for the specified instance."""
        LOG.info(_("req : '%s'\n\n") % req)
        LOG.info(_("Indexing backups for instance or group '%s'")%id)

        if False==uuid.is_uuid_like(id):
            msg = (_("Wrong id of instance or group."))
            LOG.error(msg)
            raise exception.BadRequest(msg)

        group_id = None
        context = req.environ[wsgi.CONTEXT_KEY]
        try:
            db_info = models.get_db_info(context, id)
            group_id = db_info.group_id
        except exception.NotFound:
            LOG.debug("Not instance id %s"%id)
            pass

        if group_id==None:
            try:
                InstanceGroup.get_by_groupid(context, id)
                group_id = id
            except exception.NotFound:
                msg = (_("Without find instance or group of the id"))
                LOG.error(msg)
                raise exception.BadRequest(msg)    
        else:
            try:
                item = InstanceGroupItem.get_by_instance_id(context, id)
                if item.type==DBInstanceType.READ_REPLI or item.type==DBInstanceType.STANDBY:
                    msg = (_("Without backups for the instance.")) 
                    LOG.error(msg)
                    raise exception.BadRequest(msg)
            except exception.NotFound:
                msg = (_("Without find instance group item information."))
                LOG.error(msg)
                raise exception.BadRequest(msg)   
 
        backups = backup_model.list_autobackup(context, group_id)
        backups += backup_model.list_snapshot(context, group_id)
        bks = []
        #for backup in backups:
        #    instance = models.FreshInstance.load(context, backup.instance_id)
        #    backup.db_type = instance.service_type
        #    bks.append(backup)
        #TODO(ksc-need-discuss) backup.db_type didn't used in backup view, really need it?
        tmp = dict()
        for backup in backups:
            ## autobackup's service_image_id is NULL
            if backup.service_image_id:
                if backup.service_image_id not in tmp:
                    #service_image = models.ServiceImage.find_by(id=backup.service_image_id)
                    #backup.db_type = service_image.service_name
                    #tmp[backup.service_image_id] = service_image.service_name
                    try:
                        #image_id may be is not exists in datastaore
                        datastore, datastore_version = ds_path_models.find_datastore_by_image_id(backup.service_image_id)
                        tmp[backup.service_image_id] = datastore.name
                        backup.db_type = datastore.name
                    except Exception:
                        backup.db_type = ""
                else:
                    backup.db_type = tmp[backup.service_image_id]
            else:
                backup.db_type = ""
            bks.append(backup)
        backups = bks
        LOG.debug('Getted backups %s'%backups)
        return wsgi.Result(backup_views.BackupViews(backups).data(), 200)
Пример #17
0
    def create(cls, context, instance_id, name, description=None, group_id=None, backup_type=None,
               expire_at=None, init=False, service_image_id=None, parent_id=None):
        if parent_id is not None:
            parent_id = str(parent_id)
            LOG.info("parent_id:%s, parent_id.len:%s", parent_id, len(parent_id.strip()))
            if len(parent_id.strip()) == 0:
                parent_id = None
        _parent_id = parent_id

        from trove.instance.models import Instance
        instance_id = utils.get_id_from_href(instance_id)
        instance_model = Instance.load(context, instance_id)
        if init:
            if instance_model.db_info.server_status != 'ACTIVE':
                msg = ("Instance is not currently available for an action to be "
                       "performed (server_status was %s).", instance_model.db_info.server_status)
                LOG.error(msg)
                raise exception.UnprocessableEntity(msg)
        else:
            instance_model.validate_can_perform_action()

        if instance_model.type == DBInstanceType.MASTER:
            try:
                standby_id = InstanceGroupItem.get_by_gid_type(context, instance_model.group_id, DBInstanceType.STANDBY).instance_id
                instance_model = Instance.load(context, standby_id)
                instance_model.validate_can_perform_action()
                instance_id = standby_id
            except Exception as e:
                LOG.error(e)
                raise e

        if group_id is None:
            raise exception.TroveError("group_id can't None")
        if backup_type is None or backup_type not in [Type.SNAPSHOT, Type.AUTOBACKUP]:
            raise exception.TroveError("instType can't None, only accept value: snapshot or autobackup ")

        if backup_type == Type.SNAPSHOT:
            expire_time = 0
            _parent_id = None  # force full
        elif backup_type == Type.AUTOBACKUP:
            expire_time = int(expire_at)
            if parent_id and parent_id == '0':
                _parent_id = None  # force full
            elif parent_id and parent_id != '0':
                try:
                    backup_parent = cls.get_by_id(context, parent_id)
                    LOG.debug("backup_parent:%s", backup_parent)
                except:
                    raise exception.NotFound("not found backup with parent_id: %s" % parent_id)
                if not backup_parent:
                    raise exception.NotFound("not found backup with parent_id: %s" % parent_id)
            elif parent_id is None:
                LOG.debug("parent_id is None:%s", parent_id)
                last_backup_chain = cls.get_last_backup_chain(group_id)
                backup_incremental_chain_size = CONF.backup_incremental_chain_size
                LOG.info("last_backup_chain: %s, backup_incremental_chain_size: %s",
                         last_backup_chain, backup_incremental_chain_size)
                if len(last_backup_chain) == 0 \
                        or len(last_backup_chain) >= int(backup_incremental_chain_size):
                    _parent_id = None  # create full
                else:
                    compare_instance = None
                    try:
                        compare_instance = InstanceGroupItem.get_by_gid_type(context, group_id, DBInstanceType.STANDBY)
                    except exception.NotFound:  # not has standby
                        try:
                            compare_instance = InstanceGroupItem.get_by_gid_type(context, group_id,
                                                                                 DBInstanceType.SINGLE)
                        except exception.NotFound:  # not has single
                            pass
                    if compare_instance:
                        compare_id = compare_instance.instance_id
                        switched = False
                        for b in last_backup_chain:  # has standby
                            if b["instance_id"] != compare_id:
                                switched = True  # create full
                                LOG.debug("last_backup_chain: %s, switched: %s, backup_instance_id: %s, b.instance_id: %s"
                                         , last_backup_chain, switched, compare_id, b["instance_id"])
                                break
                        if not switched:
                            parent = last_backup_chain.pop()  # create incremental
                            _parent_id = parent["id"]
                    else:  # not found standby and single
                        _parent_id = None  # create full
        LOG.debug("create backup use parent_id: %s", _parent_id)

        def _create_resources():
            try:
                db_info = models.DBBackup.create(name=name,
                                                 description=description,
                                                 tenant_id=context.tenant,
                                                 state=models.BackupState.NEW,
                                                 instance_id=instance_id,
                                                 deleted=False,
                                                 group_id=group_id,
                                                 type=backup_type,
                                                 expire_at=expire_time,
                                                 service_image_id=service_image_id,
                                                 parent_id=_parent_id)
            except exception.InvalidModelError as ex:
                LOG.exception("Unable to create Backup record:")
                msg = "Unable to create Backup record, group_id %s, instance_id %s, parent_id %s " % (group_id, _instance_id, _parent_id)
                AlarmRpc(context).alarm(context.tenant, level=AlarmRpc.LEVEL_ERROR,
                                        _type=AlarmRpc.TYPE_TASKMANAGER, message=msg+str(ex))             
                raise exception.BackupCreationError(str(ex))

            api.API(context).create_backup(db_info.id, instance_id)
            return db_info

        return run_with_quotas(context.tenant,
                               {'backups': 1},
                               _create_resources)
Пример #18
0
 def _create(self,group_id,type):
     id = self.instance_id
     InstanceGroupItem.create(self.context,self.tenant_id,id,group_id,type)
     return id
Пример #19
0
    def create(self, req, body, tenant_id):
        LOG.debug("Creating a Backup for tenant '%s'" % tenant_id)
        context = req.environ[wsgi.CONTEXT_KEY]
        data = body['backup']
        instance = data.get('instance',None)
        group = data.get('group',None)
        name = data['name']
        type = data.get("type", "snapshot")
        #expire_at = data.get("expire_after", 7)
        desc = data.get('description')
        parent_id = data.get('parent_id')
        LOG.info("parent_id:%s", parent_id)

        if group is None and instance is None:
            raise exception.BadRequest("you must specify group or instance")

        instance_id = None

        if group is not None:
            try:
                instance_id = InstanceGroupItem.get_by_gid_type(context, group, DBInstanceType.STANDBY).instance_id
            except:
                instance_id = InstanceGroupItem.get_by_gid_type(context, group, DBInstanceType.SINGLE).instance_id

        if instance_id is None and instance is not None:
            instance_id = inst_utils.virtual_instid_2_origin_instid(instance)

        _instance = DBInstance.find_by(context,id=instance_id)
        _type = _instance.service_type
        #_image = ServiceImage.find_by(context,service_name=_type)
        #service_image_id = _image.id
        ds,ds_version = ds_models.get_datastore_version(_type)
        service_image_id = ds_version.image_id

        grp_item = InstanceGroupItem.get_by_instance_id(context, _instance.id)
        group_id = grp_item.group_id

        # get this group's autobackup config and set the expire_after default
        _autobackup = AutoBackup.get_by_gid(context, group_id)
        expire_after = data.get("expire_after", _autobackup.expire_after)
        duration = _autobackup.duration
        expire_at = AutoBackup.calculate_expire_at(expire_after, duration)
        LOG.info("group_id %s, expire_at :%s", group_id, time.ctime(expire_at))

        if grp_item.type == DBInstanceType.MASTER:
            try:
                instance_id = InstanceGroupItem.get_by_gid_type(context, group_id, DBInstanceType.STANDBY).instance_id
            except Exception as e:
                LOG.error(e)

        backup = Backup.create(context, instance_id, name, description=desc,group_id=group_id,backup_type=type,expire_at=expire_at,service_image_id=service_image_id,parent_id=parent_id)
        try:
            #service = inst_models.ServiceImage.find_by(id=backup.service_image_id)
            #backup.db_type = service['service_name']
            ds,ds_version = ds_patch_models.find_datastore_by_image_id(backup.service_image_id)
            backup.db_type = ds.name
        except Exception as ex:
            backup.db_type = ""
            LOG.warn("Failed get db type information of backup %s, %s", backup.id, ex)
        chain = self._get_chain_ids(context, id)
        LOG.info(_("chain : '%s'") % chain)
        return wsgi.Result(views.BackupView(backup).data(), 202)
Пример #20
0
    def create_instance(self, flavor, image_id, databases, users,
                        service_type, volume_size, security_groups,
                        backup_id, instance_type, ignore_hosts=None,
                        master_id=None, extend=None):

        if instance_type==DBInstanceType.STANDBY or instance_type==DBInstanceType.READ_REPLI:
            if master_id is None:
                raise Exception("when instance_type is STANDBY or RR, The master_id can't none")

        availability_zone=None
        overrides = {}
        if extend != None:
            availability_zone=extend.get('availability_zone', None)
            ds_version_id = extend.get('datastore_version_id', None)
            overrides = extend.get('overrides', {})

        self.update_db(task_status=InstanceTasks.BUILDING_SERVER)
        try:
            server, volume_info = self._create_server_volume_individually(
                flavor,
                image_id,
                security_groups,
                service_type,
                ignore_hosts, availability_zone)
        except Exception as e:
            self.set_servicestatus(ServiceStatuses.UNKNOWN)
            raise e

        try:
            configuration_id =self.db_info.configuration_id
            LOG.debug("Prepare task  instance id = %s,  configuration id =%s" % (self.id, configuration_id))
            overrides = KSC_Configuration.get_configuration_overrides(self.context, configuration_id)
        except Exception:
            pass

        self.update_db(task_status=InstanceTasks.GUEST_PREPARE)
        LOG.info("======= > groupid %s" % self.group_id)
        group = InstanceGroup.get_by_groupid(self.context, self.group_id)
        sys_variables = {"port" : group.db_port, "read_only" : "OFF" if instance_type in (DBInstanceType.MASTER, DBInstanceType.SINGLE) else "ON"}
        overrides.update(sys_variables)
        config = self._render_config(flavor)
        overrides_config = self._render_override_config(flavor, overrides)

        self._guest_prepare(server, flavor['ram'], volume_info,
                            databases, users, backup_id,
                            config.config_contents,
                            overrides_contents = overrides_config.config_contents)

        try:
            utils.poll_until(self._service_is_active,
                             sleep_time=USAGE_SLEEP_TIME,
                             time_out=USAGE_TIMEOUT)
        except Exception as e:
            self.set_servicestatus(ServiceStatuses.UNKNOWN)
            raise e

        group_item = InstanceGroupItem.get_by_instance_id(self.context, self.id)
        group_id = group_item.group_id

        self.update_db(task_status=InstanceTasks.CONFIG_MYSQL)
        if instance_type==DBInstanceType.STANDBY or instance_type==DBInstanceType.READ_REPLI:
            if instance_type==DBInstanceType.STANDBY:
                self._update_instance_type(master_id, DBInstanceType.MASTER)

            self._update_instance_type(self.id, instance_type)
            self._relocate_master(master_id,self.id,backup_id)

            #when upgrade single to ha
            if instance_type == DBInstanceType.STANDBY:
                master_group_item = InstanceGroupItem.get_by_instance_id(self.context,master_id)
                InstanceGroupItem.update_type(self.context, item_id=master_group_item.id, type=DBInstanceType.MASTER)

        if instance_type==DBInstanceType.SINGLE or instance_type==DBInstanceType.MASTER:
            self._create_master_user(instance_id=self.id, user=extend.get('admin_user'), \
                                     password=extend.get('admin_password'))

            self._update_instance_type(self.id, instance_type)


        if instance_type==DBInstanceType.SINGLE or instance_type==DBInstanceType.MASTER:
            try:
                self.update_db(task_status=InstanceTasks.BACKUPING)
                expiretime = AutoBackup.get_autobackup_expiretime(self.context, group_id)
                backup_name = self._backup_name(instance_id=self.id)


                # Fore. 2014/07/02 Get Service Image ID from datastore version 
                #                 _type = self.service_type
                #                 _image = ServiceImage.find_by(self.context,service_name=_type)
                #ds, ds_version = ds_models.get_datastore_version(type = None, version = ds_version_id)
                #service_image_id = image_id

                desc = 'Init backup for new instance'
                Backup.create(context=self.context, instance=self.id,
                              name=backup_name, description=desc, group_id=group_id,
                              backup_type=BackupType.AUTOBACKUP, expire_at=expiretime,init=True,service_image_id=image_id)
            except Exception as e:
                msg = "Error creating backup for instance %s %s" % (self.id,utils.get_traceback_stack())
                LOG.error(msg)

        self.update_db(task_status=InstanceTasks.SETIOTUNE)
        LOG.debug("Set block iotune for instance %s ." % self.id)

        self._set_blkiotune(flavor)

        if instance_type!=DBInstanceType.STANDBY:
            if CONF.trove_vip_support:
                self.update_db(task_status=InstanceTasks.ALLOCATE_VIP)
                try:
                    rip = self._get_instance_ip(self.id)
                    vip = InstanceVip.allocate(self.context,instance_id=self.id,rip = rip)
                    LOG.debug("Allocated vip %s for instance %s"%(vip, self.id))
                except Exception as e:
                    self.set_servicestatus(ServiceStatuses.UNKNOWN)
                    raise e

        self.update_db(deleted=False,task_status=InstanceTasks.NONE)

        LOG.info("create instance_id:%s,notify monitor,autobackup" % self.id)
        group_rpcapi.API(self.context).group_update(group_id)
        self.send_usage_event('create', instance_size=flavor['ram'])
 def _create(self,group_id,type):
     id = self.instance_id
     InstanceServiceStatus.create(status=ServiceStatuses.RUNNING,instance_id=id)
     return InstanceGroupItem.create(self.context,self.tenant_id,id,group_id,type)
 def test_update_type(self):
     item = self._create(self.group_id, MASTER)
     InstanceGroupItem.update_type(self.context, item.id,STANDBY)
     new_item = InstanceGroupItem.get_by_instance_id(self.context,item.instance_id)
     self.assertEqual(STANDBY, new_item.type)
 def test_create_instanceid_repeat(self):
     instance_id = self.instance_id
     InstanceGroupItem.create(self.context,self.tenant_id,instance_id,self.group_id,MASTER)
     self.assertRaises(InstanceIdRepeat,InstanceGroupItem.create,self.context,self.tenant_id,instance_id,self.group_id,MASTER)
 def test_delete(self):
     instance_id = self._create(self.group_id, STANDBY).instance_id
     InstanceGroupItem.delete(self.context, instance_id)
     
     self.assertRaises(exception.NotFound,InstanceGroupItem.get_by_instance_id,self.context, instance_id,False)
Пример #25
0
    def _failover_test(self, group_id, trigger_inst_id, 
                          do_workload = False, do_prepare = False,
                          mysqld_killed = False,
                          host_rebooted = False,
                          remove_tmp_initsql = False, 
                          mysql_data_lost = False,
                          check_vip = False, 
                          check_rpl_consist = True, 
                          check_binlog_range = False):
                
        LOG.info("Doing Failover Test, group_id:%s, instance_id:%s, do_workload:%s, do_prepare:%s." % 
                 (group_id, trigger_inst_id, do_workload, do_prepare))
        before_group_items = InstanceGroupItem.list_by_gid(test_utils.get_context(), group_id, deleted = False)
        before_items = set(map(lambda x: x.type + "_" + x.instance_id, before_group_items))
        
        before_instance = test_utils.get_builtin_instance( trigger_inst_id)
        before_rip = test_utils.check_allocate_ip(before_instance.server)
        before_origin_instid = before_instance.id
        
        
        rt_before = rt_after = None
        if check_binlog_range:
            rt_before = test_utils.get_restorable_time(trigger_inst_id)
        
        if do_workload and before_instance.type == DBInstanceType.MASTER:
            FAILOVERInstance.__run_workload(do_prepare = do_prepare)

        if remove_tmp_initsql:
            FAILOVERInstance.__trigger_vm_remove_tmp_sql_file(trigger_inst_id)
            
        if mysqld_killed:
            FAILOVERInstance.__trigger_mysqld_crash(trigger_inst_id)
            test_utils.check_server_status(trigger_inst_id, expected_task=tasks.InstanceTasks.NONE, 
                                           type=before_instance.type, expected_svr_status=test_utils.ServiceStatuses.SHUTDOWN, 
                                           deleted=False, timeout=120)

        if host_rebooted:
            FAILOVERInstance.__trigger_host_reboot(trigger_inst_id)
            # when host-machine rebooted, no guestagent  update service's status. 
#             test_utils.check_server_status(trigger_inst_id, expected_task=tasks.InstanceTasks.NONE, 
#                                            type=before_instance.type, expected_svr_status=test_utils.ServiceStatuses.SHUTDOWN, 
#                                            deleted=False, timeout=120)
        
        if mysql_data_lost:
            FAILOVERInstance.__trigger_mysql_data_lost(trigger_inst_id)

        rpc.call(test_utils.get_context(), "taskmanager", 
                        {"method": "failover", "args": {'instance_id':before_origin_instid}}, timeout = 3600)

    
        ## check vip <--> rip mapping.
        ## vip should be changed in 10 seconds.
        if before_instance.type == DBInstanceType.MASTER or before_instance.type == DBInstanceType.READ_REPLI:
            after_instance = test_utils.get_builtin_instance( trigger_inst_id)
            after_nova_inst = after_instance.server
            after_rip = test_utils.check_allocate_ip(after_nova_inst)
            assert  after_instance.vip == before_instance.vip and before_rip != after_rip
        
        if before_instance.type == DBInstanceType.MASTER:
            test_utils.check_server_status(before_instance.id, 
                                           expected_task = tasks.InstanceTasks.NONE, 
                                           type=DBInstanceType.MASTER, 
                                           expected_svr_status = test_utils.ServiceStatuses.RUNNING, 
                                           deleted=False, timeout=120)
        
        ## check replication topo
        after_group_items = InstanceGroupItem.list_by_gid(test_utils.get_context(), group_id, deleted = False)
        after_items = set(map(lambda x: x.type + "_" + x.instance_id, after_group_items))
        LOG.info("before " + str(before_items))
        LOG.info("after " + str(after_items))
        
        if check_rpl_consist:
            diff_items = (before_items - after_items)
#             assert len(diff_items) == 0
            assert len(before_group_items) == len(after_group_items), "size of mysql cluster should be the same."
            
            for group_item in after_group_items:
                
                if group_item.type == DBInstanceType.STANDBY and group_item.instance_id == before_instance.id:
                    item = InstanceGroupItem.get_by_instance_id(test_utils.get_context(), group_item.instance_id, deleted = False)
                    assert item != None
                    continue
                
                test_utils.check_server_status(group_item.instance_id, 
                                               expected_task = tasks.InstanceTasks.NONE, 
                                               type = group_item.type, 
                                               expected_svr_status = test_utils.ServiceStatuses.RUNNING, 
                                               deleted = False, timeout = 120)
                
        if check_binlog_range:
            rt_after = test_utils.get_restorable_time(trigger_inst_id)
            assert  rt_after.end > rt_before.end, (rt_after.end, rt_before.end)
            time.sleep(60)
            rt_after2 = test_utils.get_restorable_time(trigger_inst_id)
            assert  rt_after2.end > rt_after.end, (rt_after2.end, rt_after.end)
Пример #26
0
    def _delete_resources(self, fake):
        group_item = InstanceGroupItem.get_by_instance_id(self.context, self.id)
        group_id = group_item.group_id
        inst_type = group_item.type
        instance_id = self.db_info.id

        if self.server and self.db_info.server_status == "ACTIVE":
            # set instance to read only model
            LOG.info("Set readonly for instance %s" % self.id)
            self._set_instance_readonly(instance_id=self.id)
        else:
            LOG.info("vm_status is not ACTIVE for %s" % self.id)

        if inst_type == DBInstanceType.MASTER:
            rrinsts = []
            try:
                standby = InstanceGroupItem.get_by_gid_type(self.context, group_id, DBInstanceType.STANDBY)
                rrinsts = InstanceGroupItem.get_by_gid_type(self.context, group_id, DBInstanceType.READ_REPLI)
                standby_inst_id = standby.instance_id
                LOG.info("MASTER %s,it hava STANDBY %s,RRS %s",
                         (self.id, standby_inst_id, [_inst.instance_id for _inst in rrinsts]))
                InstanceGroupItem.delete(self.context, standby_inst_id)
            except Exception as e:
                LOG.error(utils.get_traceback_stack())

            # waite replication group db sysnc
            if len(rrinsts) > 0:
                self.guest.ksc_set_read_only(True)

            for _inst in rrinsts:
                try:
                    rr_instance = self.load(self.context, _inst.instance_id)
                    rr_instance.waite_rpl_synchronize(time_out=CONF.delete_waite_rplg_sync)
                except Exception as e:
                    LOG.error(utils.get_traceback_stack())

            # delete standby instance
            try:
                try:
                    standby_instance = self.load(self.context, standby_inst_id)
                except exception.UnprocessableEntity:
                    standby_instance = FreshInstanceTasks.load(self.context, standby_inst_id)
                standby_instance.update_db(deleted=True, deleted_at=utils.utcnow(), task_status=InstanceTasks.NONE)
                standby_instance.set_servicestatus_deleted()
                standby_instance._delete_instance_config()

                if standby_instance.server:
                    LOG.info("Delete STANDBY compute server %s" % standby_instance.server.id)
                    standby_instance.get_guest().delete_queue()
                    standby_instance.server.delete()

                    poll_until(standby_instance.server_is_finished, sleep_time=1, time_out=CONF.server_delete_time_out)
                else:
                    LOG.info("standby instance vm_status is not ACTIVE for %s" % standby_inst_id)
            except Exception as e:
                LOG.error(utils.get_traceback_stack())

            if fake is True and self.type == DBInstanceType.MASTER:
                try:
                    LOG.debug("fake is True, %s is MASTER, stop mysqld", self.id)
                    self.guest.ksc_stop_db(do_not_start_on_reboot=True)
                except Exception as e:
                    msg = "fake_delete, instance: %s, stop mysqld error, exception: %s " % (self.id, str(e))
                    LOG.error("%s, %s", msg, utils.get_traceback_stack())
                    AlarmRpc(self.context).alarm(self.tenant_id, level=AlarmRpc.LEVEL_ERROR,
                                                 _type=AlarmRpc.TYPE_TASKMANAGER, message=msg)

        if self.server:
            if fake is True and self.type == DBInstanceType.MASTER:
                LOG.debug("fake is True, %s is MASTER, skip delete server", self.id)
            else:
                try:
                    LOG.info("Delete compute server %s" % self.server.id)
                    guest = self.get_guest()
                    guest.delete_queue()
                    self.server.delete()
                    poll_until(self.server_is_finished, sleep_time=1, time_out=CONF.server_delete_time_out)
                except Exception as e:
                    LOG.error(utils.get_traceback_stack())

        # delete group_item/autobackup_setting/group
        if self.type in [DBInstanceType.MASTER, DBInstanceType.SINGLE]:
            try:
                LOG.info("Delete autobackup_setting of group_id %s" % group_id)
                AutoBackup.delete(self.context, group_id)
            except:
                LOG.error(utils.get_traceback_stack())

        # remove vip.
        if CONF.trove_vip_support and \
                        self.type in [DBInstanceType.MASTER, DBInstanceType.SINGLE, DBInstanceType.READ_REPLI]:
            if fake is True and self.type == DBInstanceType.MASTER:
                LOG.debug("fake is True, %s is MASTER, skip release vip", self.id)
            else:
                try:
                    self.update_db(task_status=InstanceTasks.RELEASE_VIP)
                    LOG.info("release vip for instance %s" % instance_id)
                    if inst_type in [DBInstanceType.MASTER, DBInstanceType.SINGLE]:
                        cur_vip = vipService.InstanceVip.get_by_instance_id(self.context, instance_id)
                        vipService.InstanceVip.release_vip(self.context, cur_vip)
                    elif inst_type in [DBInstanceType.READ_REPLI]:
                        vipService.InstanceVip.deallocate(self.context, instance_id, deleted=False, purge=True)
                except Exception as e:
                    LOG.error(utils.get_traceback_stack())
 def test_NULLlist_by_gid(self):
     group_id = "not_found"
     db_list = InstanceGroupItem.list_by_gid(self.context, group_id)
     self.assertEqual(0,len(db_list))