def setUp(self): super(BackupQueryTest, self).setUp() self.context = _prep_conf(utils.utcnow()) current_time = str(utils.utcnow()) self.instance_id = 'INSTANCE-' + current_time self.snapshot = models.DBBackup.create(tenant_id=self.context.tenant, name=BACKUP_NAME, state=BACKUP_STATE, instance_id=self.instance_id, deleted=False, size=2.0, location=BACKUP_LOCATION, group_id=GROUP_ID, type= TYPE) import time time.sleep(2) expire_at = int(time.time()) - (2 * 24 * 60 * 60) self.backup = models.DBBackup.create(tenant_id=self.context.tenant, name=BACKUP_NAME, state=BACKUP_STATE, instance_id=self.instance_id, deleted=False, size=2.0, location=BACKUP_LOCATION, group_id=GROUP_ID, type= BACKUP_TYPE, expire_at = expire_at ) self.deleted = False
def __init__(self, tenant_id, resource, hard_limit, id=utils.generate_uuid(), created=utils.utcnow(), update=utils.utcnow()): self.tenant_id = tenant_id self.resource = resource self.hard_limit = hard_limit self.id = id self.created = created self.update = update
def delete(self): self['updated'] = utils.utcnow() LOG.debug(_("Deleting %s: %s") % (self.__class__.__name__, self.__dict__)) if self.preserve_on_delete: self['deleted_at'] = utils.utcnow() self['deleted'] = True return self.db_api.save(self) else: return self.db_api.delete(self)
def delete(self): self['updated'] = utils.utcnow() LOG.debug(_("Deleting %(name)s: %(dict)s") % {'name': self.__class__.__name__, 'dict': self.__dict__}) if self.preserve_on_delete: self['deleted_at'] = utils.utcnow() self['deleted'] = True return self.db_api.save(self) else: return self.db_api.delete(self)
def setUp(self): super(BackupORMTest, self).setUp() self.context = _prep_conf(utils.utcnow()) current_time = str(utils.utcnow()) self.instance_id = 'INSTANCE-' + current_time self.backup = models.DBBackup.create(tenant_id=self.context.tenant, name=BACKUP_NAME, state=BACKUP_STATE, instance_id=self.instance_id, deleted=False, size=2.0, location=BACKUP_LOCATION+"/" + BACKUP_FILENAME, group_id=GROUP_ID, instType= TYPE)
def _datastore_version_metadata_add(cls, datastore_name, datastore_version_name, datastore_version_id, key, value, exception_class): """ Create a record of the specified key and value in the metadata table. """ # if an association does not exist, create a new one. # if a deleted association exists, undelete it. # if an un-deleted association exists, raise an exception. try: db_record = DBDatastoreVersionMetadata.find_by( datastore_version_id=datastore_version_id, key=key, value=value) if db_record.deleted == 1: db_record.deleted = 0 db_record.updated_at = utils.utcnow() db_record.save() return else: raise exception_class( datastore=datastore_name, datastore_version=datastore_version_name, id=value) except exception.NotFound: pass # the record in the database only contains the datastore_verion_id DBDatastoreVersionMetadata.create( datastore_version_id=datastore_version_id, key=key, value=value)
def save(self): if not self.is_valid(): raise exception.InvalidModelError(errors=self.errors) self['updated_at'] = utils.utcnow() LOG.debug(_("Saving %s: %s") % (self.__class__.__name__, self.__dict__)) return get_db_api().save(self)
def save(self): if not self.is_valid(): raise exception.InvalidModelError(errors=self.errors) self['updated'] = utils.utcnow() LOG.debug(_("Saving %(name)s: %(dict)s") % {'name': self.__class__.__name__, 'dict': self.__dict__}) return self.db_api.save(self)
def setUp(self): super(OrderingTests, self).setUp() util.init_db() now = utils.utcnow() self.context, self.instance_id = _prep_conf(now) info = { 'tenant_id': self.context.tenant, 'state': BACKUP_STATE, 'instance_id': self.instance_id, 'size': 2.0, 'deleted': False } four = now - datetime.timedelta(days=4) one = now - datetime.timedelta(days=1) three = now - datetime.timedelta(days=3) two = now - datetime.timedelta(days=2) # Create backups out of order, save/create set the 'updated' field, # so we need to use the db_api directly. models.DBBackup().db_api.save( models.DBBackup(name='four', updated=four, id=utils.generate_uuid(), **info)) models.DBBackup().db_api.save( models.DBBackup(name='one', updated=one, id=utils.generate_uuid(), **info)) models.DBBackup().db_api.save( models.DBBackup(name='three', updated=three, id=utils.generate_uuid(), **info)) models.DBBackup().db_api.save( models.DBBackup(name='two', updated=two, id=utils.generate_uuid(), **info))
def _datastore_version_metadata_add(cls, datastore_version_id, key, value, exception_class): """Create an entry in the Datastore Version Metadata table.""" # Do we have a mapping in the db? # yes: and its deleted then modify the association # yes: and its not deleted then error on create # no: then just create the new association try: db_record = DBDatastoreVersionMetadata.find_by( datastore_version_id=datastore_version_id, key=key, value=value) if db_record.deleted == 1: db_record.deleted = 0 db_record.updated_at = utils.utcnow() db_record.save() return else: raise exception_class( datastore_version_id=datastore_version_id, flavor_id=value) except exception.NotFound: pass DBDatastoreVersionMetadata.create( datastore_version_id=datastore_version_id, key=key, value=value)
def delete_async(self, fake): LOG.debug("prepare delete instance %s, fake: %s " % (self.id, fake)) modified_group_id = self.group_id self._delete_resources(fake) # Delete guest queue. _item = InstanceGroupItem.get_by_instance_id(self.context, self.id) group_id = _item.group_id del_instance_type = _item.type # if size of item_list equal 1,then we will delete last instance in group item_list = InstanceGroupItem.list_by_gid(self.context, modified_group_id) if len(item_list) == 1: if CONF.trove_security_groups_support: if fake is True: LOG.debug("fake is True, %s skip delete secgroup rules", self.group_id) else: # Delete associated security group self.update_db(task_status=InstanceTasks.DELETEING_SECURITY_GROUP) try: SecurityGroup.delete_for_group(modified_group_id, self.context) except Exception as e: LOG.error(utils.get_traceback_stack()) self.set_servicestatus_deleted() # zs: configuration is needed for restore deleted instance, DO NOT DELETE! # self._delete_instance_config() LOG.info("Delete instance_group_item for instance %s" % self.id) _type = self.type InstanceGroupItem.delete(self.context, self.id) deleted_at = utils.utcnow() if fake is True and _type == DBInstanceType.MASTER: LOG.debug("fake is True, %s is MASTER, set task_status :%s ", self.id, InstanceTasks.FAKE_DELETED) self.update_db(deleted=True, deleted_at=deleted_at, task_status=InstanceTasks.FAKE_DELETED) else: self.update_db(deleted=True, deleted_at=deleted_at, task_status=InstanceTasks.NONE) LOG.info("send notify to monitor when delete instance %s" % self.id) group_rpcapi.API(self.context).group_update(group_id, notify_when_delete=True) if len(item_list) == 1: LOG.info("Delete group %s" % group_id) InstanceGroup.delete(self.context, group_id) self._send_usage_event(self.server, utils.utcnow())
def save(self): if not self.is_valid(): raise exception.InvalidModelError(errors=self.errors) self['updated'] = utils.utcnow() LOG.debug("Saving %(name)s: %(dict)s" % { 'name': self.__class__.__name__, 'dict': self.__dict__ }) return self.db_api.save(self)
def create(cls, **values): if 'id' not in values: values['id'] = utils.generate_uuid() if hasattr(cls, 'deleted') and 'deleted' not in values: values['deleted'] = False values['created'] = utils.utcnow() instance = cls(**values).save() if not instance.is_valid(): raise exception.InvalidModelError(errors=instance.errors) return instance
def setUp(self): super(BackupORMTest, self).setUp() util.init_db() self.context, self.instance_id = _prep_conf(utils.utcnow()) self.backup = models.DBBackup.create(tenant_id=self.context.tenant, name=BACKUP_NAME, state=BACKUP_STATE, instance_id=self.instance_id, deleted=False, location=BACKUP_LOCATION) self.deleted = False
def create(cls, **values): init_vals = { 'id': utils.generate_uuid(), 'created': utils.utcnow(), } if hasattr(cls, 'deleted'): init_vals['deleted'] = False init_vals.update(values) instance = cls(**init_vals) if not instance.is_valid(): raise exception.InvalidModelError(errors=instance.errors) return instance.save()
def setUp(self): super(BackupORMTest, self).setUp() util.init_db() self.context, self.instance_id = _prep_conf(utils.utcnow()) self.backup = models.DBBackup.create(tenant_id=self.context.tenant, name=BACKUP_NAME, state=BACKUP_STATE, instance_id=self.instance_id, deleted=False, size=2.0, location=BACKUP_LOCATION) self.deleted = False
def delete_async(self): LOG.debug("prepare delete instance %s" % self.id) deleted_at = utils.utcnow() # Delete guest queue. _item = InstanceGroupItem.get_by_instance_id(self.context,self.id) group_id = _item.group_id if _item.type == DBInstanceType.MASTER: standby = None try: standby = InstanceGroupItem.get_by_gid_type(self.context, group_id,DBInstanceType.STANDBY) except exception.ModelNotFoundError: pass if standby is not None: standby.delete() standby_inst_id = standby.instance_id standby_tasks = self.load(self.context,standby_inst_id) standby_tasks.update_db(deleted=True, deleted_at=deleted_at, task_status=InstanceTasks.NONE) standby_tasks.set_servicestatus_deleted() standby_tasks._delete_instance_config() item_list = InstanceGroupItem.list_by_gid(self.context,group_id) if len(item_list)==1: # Delete associated security group if CONF.trove_security_groups_support: try: SecurityGroup.delete_for_group(self.group_id, self.context) except Exception as e: LOG.error(utils.get_traceback_stack()) LOG.info("send notify to monitor when delete instance %s" % self.id) group_rpcapi.API(self.context).group_update(group_id,notify_when_delete=True) LOG.info("Delete group %s" % group_id) InstanceGroup.delete(self.context,group_id) if _item.type in [DBInstanceType.MASTER,DBInstanceType.SINGLE]: try: LOG.info("Delete autobackup_setting of group_id %s" % group_id) AutoBackup.delete(self.context,group_id) except: LOG.error(utils.get_traceback_stack()) _item.delete() self.update_db(deleted=True, deleted_at=deleted_at, task_status=InstanceTasks.NONE) self.set_servicestatus_deleted()
def default_config_items_update(self, id, value_file, config_name = None, description = None): try: self.db_api.configure_db(CONF) context = TroveContext(tenant = CONF.default_template_tenant_id) context.is_admin = True group = config_models.KSC_Configuration.load(context, id) ''' instances = instances_models.DBInstance.find_all( tenant_id=context.tenant, configuration_id=id, deleted=False).all() ''' # if name/description are provided in the request body, update the # model with these values as well. if config_name is not None: group.name = config_name if description is not None: group.description = description fmt_values = {} if value_file is not None and os.path.isfile(value_file): config = open(value_file).read() cfg_parser = configurations.MySQLConfParser(config) parsed = cfg_parser.parse() values = dict(parsed) rules = configurations.get_validation_rules(datastore_manager='mysql') fmt_values = {} for k, v in values.iteritems(): # get the validation rule dictionary, which will ensure there is a # rule for the given key name. An exception will be thrown if no # valid rule is located. rule = config_service.ConfigurationsController._get_item(k, rules['configuration-parameters']) valueType = rule.get('type') if "integer" == valueType: fmt_values[k] = int(v) elif "boolean" == valueType: fmt_values[k] = bool(v) else: fmt_values[k] = v if len(fmt_values) > 0: config_service.ConfigurationsController._validate_configuration(fmt_values, datastore_manager='mysql') config_models.KSC_Configuration.remove_all_items(context,id, utils.utcnow()) items = config_models.KSC_Configuration.get_items_by_overrides(context,id, fmt_values) config_models.KSC_Configuration.save(context, group, items, []) except exception as e: print(e)
def setUp(self): super(PaginationTests, self).setUp() util.init_db() self.context, self.instance_id = _prep_conf(utils.utcnow()) # Create a bunch of backups bkup_info = { 'tenant_id': self.context.tenant, 'state': BACKUP_STATE, 'instance_id': self.instance_id, 'size': 2.0, 'deleted': False } for backup in xrange(50): bkup_info.update({'name': 'Backup-%s' % backup}) models.DBBackup.create(**bkup_info)
def setUp(self): super(PaginationTests, self).setUp() util.init_db() self.context, self.instance_id = _prep_conf(utils.utcnow()) # Create a bunch of backups bkup_info = { "tenant_id": self.context.tenant, "state": BACKUP_STATE, "instance_id": self.instance_id, "size": 2.0, "deleted": False, } for backup in xrange(50): bkup_info.update({"name": "Backup-%s" % backup}) models.DBBackup.create(**bkup_info)
def delete(context, group): if not context.is_admin and group.tenant_id == CONF.default_template_tenant_id: raise exception.Forbidden if group.config_type == ConfigurationType.INSTANCE_CONFIG: instances = instances_models.DBInstance.find_all( configuration_id=group.id, deleted=False).all() if instances: return deleted_at = utils.utcnow() KSC_Configuration.remove_all_items(context, group.id, deleted_at) group.deleted = True group.deleted_at = deleted_at group.save()
def setUp(self): util.init_db() ds_models.update_datastore("mysql") self.datastore = ds_models.Datastore.load("mysql") ds_models.update_datastore_version(self.datastore.id, "mysql-55", "mysql", "None", "None", True) self.ds_version = ds_models.DatastoreVersion.load(self.datastore, "mysql-55") test_time = utils.utcnow() self.conf_name = "test_name_%s " %test_time self.conf_desc = "test_desc_%s " %test_time self.configuration = None self.defaultConfiguration = None self.tanant_id = "fake_fore_1231242345" self.context = TroveContext(tenant = self.tanant_id) self.default_tanant_id = CONF.default_template_tenant_id
def update(self, **values): for key in values: if hasattr(self, key): setattr(self, key, values[key]) self['updated'] = utils.utcnow() return self.db_api.save(self)
def fake_deleted_instance_delete(cls, context, instance_id): base_msg = " instance_id: %s " % instance_id success = True msg = " fake_deleted_instance_delete %s " % base_msg deleted_at = utils.utcnow() db_info = None try: db_info = DBInstance.find_by(context=context, id=instance_id, task_id=InstanceTasks.FAKE_DELETED.code, deleted=True) db_info.update(task_status=InstanceTasks.DELETING) LOG.debug("fake_deleted_instance_delete, load instance ok, %s " % base_msg) except Exception: LOG.debug("fake_deleted_instance_delete failed, deleted instance not found, %s " % base_msg) if db_info is None: success = False msg = " fake_deleted_instance_delete failed, load instance error %s " % base_msg return success, msg try: server = load_server(context, db_info.id, db_info.compute_instance_id) LOG.debug("fake_deleted_instance_delete, load server: %s ok, %s ", db_info.compute_instance_id, base_msg) nova_client = create_nova_client(context) def server_is_finished(): try: server_id = db_info.compute_instance_id _server = nova_client.servers.get(server_id) if _server.status not in ['SHUTDOWN', 'ACTIVE']: _msg = "Server %s got into %s status during delete " \ "of instance %s!" % (server.id, server.status, instance_id) LOG.error(_msg) return False except nova_exceptions.NotFound: return True try: LOG.debug("Delete compute server %s" % server.id) server.delete() poll_until(server_is_finished, sleep_time=1, time_out=CONF.server_delete_time_out) guest = create_guest_client(context, db_info.id) guest.delete_queue() LOG.debug("fake_deleted_instance_delete, delete server: %s ok, %s ", db_info.compute_instance_id, base_msg) except Exception as ex: LOG.error(utils.get_traceback_stack()) success = False msg += " ,deleted server error, compute_instance_id: %s, ex:%s, %s " \ % (db_info.compute_instance_id, str(ex), base_msg) except Exception as ex: LOG.error("COMPUTE ID = %s" % db_info.compute_instance_id) success = False msg += " ,load server error, compute_instance_id: %s, %s " % (db_info.compute_instance_id, base_msg) if CONF.trove_vip_support: try: db_info.update(task_status=InstanceTasks.RELEASE_VIP) instance_vip = DBInstanceVip.find_by(context, instance_id=instance_id, deleted=False) vip_info = DBVips.find_by(context, id=instance_vip.vip_id, deleted=False) InstanceVip.release_vip(context, vip_info.vip) LOG.debug("fake_deleted_instance_delete, release_vip: %s ok, %s " % (vip_info.vip, base_msg)) except Exception as ex: LOG.error(utils.get_traceback_stack()) success = False msg += " ,release_vip error, ex:%s, %s " % (str(ex), base_msg) if CONF.trove_security_groups_support: db_info.update(task_status=InstanceTasks.DELETEING_SECURITY_GROUP) try: SecurityGroup.delete_for_group(db_info.group_id, context) LOG.debug( "fake_deleted_instance_delete, delete SecurityGroup: %s ok, %s " % (db_info.group_id, base_msg)) except Exception as ex: LOG.error(utils.get_traceback_stack()) success = False msg += " ,delete SecurityGroup error, ex:%s, %s " % (str(ex), base_msg) db_info.update(deleted_at=deleted_at, task_status=InstanceTasks.NONE) if success is True: msg = "fake_deleted_instance_delete finished, %s " % base_msg return success, msg
def instance_id(self): current_time = str(utils.utcnow()) instance_id = 'instance-' + current_time return instance_id
def save(self): self['updated_at'] = utils.utcnow() return get_db_api().save(self)
def __init__(self, instance_id, user): self.id = instance_id self.user = user self.created = utils.utcnow()
def setUp(self): super(BackupCreateTest, self).setUp() util.init_db() self.context, self.instance_id = _prep_conf(utils.utcnow()) self.created = False
def group_id(self): current_time = str(utils.utcnow()) group_id = 'group-' + current_time return group_id
def _delete_resources(self, fake): group_item = InstanceGroupItem.get_by_instance_id(self.context, self.id) group_id = group_item.group_id inst_type = group_item.type instance_id = self.db_info.id if self.server and self.db_info.server_status == "ACTIVE": # set instance to read only model LOG.info("Set readonly for instance %s" % self.id) self._set_instance_readonly(instance_id=self.id) else: LOG.info("vm_status is not ACTIVE for %s" % self.id) if inst_type == DBInstanceType.MASTER: rrinsts = [] try: standby = InstanceGroupItem.get_by_gid_type(self.context, group_id, DBInstanceType.STANDBY) rrinsts = InstanceGroupItem.get_by_gid_type(self.context, group_id, DBInstanceType.READ_REPLI) standby_inst_id = standby.instance_id LOG.info("MASTER %s,it hava STANDBY %s,RRS %s", (self.id, standby_inst_id, [_inst.instance_id for _inst in rrinsts])) InstanceGroupItem.delete(self.context, standby_inst_id) except Exception as e: LOG.error(utils.get_traceback_stack()) # waite replication group db sysnc if len(rrinsts) > 0: self.guest.ksc_set_read_only(True) for _inst in rrinsts: try: rr_instance = self.load(self.context, _inst.instance_id) rr_instance.waite_rpl_synchronize(time_out=CONF.delete_waite_rplg_sync) except Exception as e: LOG.error(utils.get_traceback_stack()) # delete standby instance try: try: standby_instance = self.load(self.context, standby_inst_id) except exception.UnprocessableEntity: standby_instance = FreshInstanceTasks.load(self.context, standby_inst_id) standby_instance.update_db(deleted=True, deleted_at=utils.utcnow(), task_status=InstanceTasks.NONE) standby_instance.set_servicestatus_deleted() standby_instance._delete_instance_config() if standby_instance.server: LOG.info("Delete STANDBY compute server %s" % standby_instance.server.id) standby_instance.get_guest().delete_queue() standby_instance.server.delete() poll_until(standby_instance.server_is_finished, sleep_time=1, time_out=CONF.server_delete_time_out) else: LOG.info("standby instance vm_status is not ACTIVE for %s" % standby_inst_id) except Exception as e: LOG.error(utils.get_traceback_stack()) if fake is True and self.type == DBInstanceType.MASTER: try: LOG.debug("fake is True, %s is MASTER, stop mysqld", self.id) self.guest.ksc_stop_db(do_not_start_on_reboot=True) except Exception as e: msg = "fake_delete, instance: %s, stop mysqld error, exception: %s " % (self.id, str(e)) LOG.error("%s, %s", msg, utils.get_traceback_stack()) AlarmRpc(self.context).alarm(self.tenant_id, level=AlarmRpc.LEVEL_ERROR, _type=AlarmRpc.TYPE_TASKMANAGER, message=msg) if self.server: if fake is True and self.type == DBInstanceType.MASTER: LOG.debug("fake is True, %s is MASTER, skip delete server", self.id) else: try: LOG.info("Delete compute server %s" % self.server.id) guest = self.get_guest() guest.delete_queue() self.server.delete() poll_until(self.server_is_finished, sleep_time=1, time_out=CONF.server_delete_time_out) except Exception as e: LOG.error(utils.get_traceback_stack()) # delete group_item/autobackup_setting/group if self.type in [DBInstanceType.MASTER, DBInstanceType.SINGLE]: try: LOG.info("Delete autobackup_setting of group_id %s" % group_id) AutoBackup.delete(self.context, group_id) except: LOG.error(utils.get_traceback_stack()) # remove vip. if CONF.trove_vip_support and \ self.type in [DBInstanceType.MASTER, DBInstanceType.SINGLE, DBInstanceType.READ_REPLI]: if fake is True and self.type == DBInstanceType.MASTER: LOG.debug("fake is True, %s is MASTER, skip release vip", self.id) else: try: self.update_db(task_status=InstanceTasks.RELEASE_VIP) LOG.info("release vip for instance %s" % instance_id) if inst_type in [DBInstanceType.MASTER, DBInstanceType.SINGLE]: cur_vip = vipService.InstanceVip.get_by_instance_id(self.context, instance_id) vipService.InstanceVip.release_vip(self.context, cur_vip) elif inst_type in [DBInstanceType.READ_REPLI]: vipService.InstanceVip.deallocate(self.context, instance_id, deleted=False, purge=True) except Exception as e: LOG.error(utils.get_traceback_stack())
def execute_backup(self, context, backup_id, runner=None, type = DBInstanceType.SINGLE): LOG.debug("Searching for backup instance %s", backup_id) #replace to conductor #backup = DBBackup.find_by(id=backup_id) backup = conductor_api.API(context).get_backup(backup_id) if not backup: raise Exception("get backup occur error, backup_id:%s" % backup_id) LOG.info("Setting task state to %s for instance %s", BackupState.BUILDING, backup.instance_id) backup.state = BackupState.BUILDING #replace to conductor #backup.save() conductor_api.API(context).save_backup(backup) backup_parent = None lsn = -1 if backup.parent_id: backup_parent = conductor_api.API(context).get_backup(backup.parent_id) if not backup_parent: raise Exception("get backup_parent occur error, backup_id:%s, parent_id:%s" % (backup_id, backup.parent_id)) backup_chain = conductor_api.API(context).get_chain_before_backup(backup_id) LOG.info('backup_chain: %s', backup_chain) if not backup_chain or len(backup_chain) < 2 or backup_chain[0]['parent_id'] is not None: raise Exception("get backup_chain occur error, backup_id:%s, parent_id:%s, base_backup:%s" % (backup_id, backup.parent_id, backup_chain[0]['id'])) for bk in backup_chain: LOG.info('backup: %s', bk) if bk['id'] != backup_id: if not bk['extend']: raise Exception("get extend occur error, backup_id:%s" % bk['id']) lsn = json.loads(bk['extend']).get("lsn", -1) if lsn == -1: raise Exception("get lsn occur error, backup_id:%s, parent_id:%s" % (bk['id'], bk['parent_id'])) if backup.type == Type.AUTOBACKUP: _path_list = [CONF.ebs_backup_path, str(backup.group_id), str(backup.id)] elif backup.type == Type.SNAPSHOT: _path_list = [CONF.ebs_snapshot_path, str(backup.group_id), str(backup.id)] if type in [DBInstanceType.SINGLE, DBInstanceType.MASTER]: if backup_parent: runner = RUNNER_master_incremental _path_list = [CONF.ebs_incremental_path, str(backup.group_id), str(backup.id)] else: runner = RUNNER_master elif type in [DBInstanceType.STANDBY, DBInstanceType.READ_REPLI]: if backup_parent: runner = RUNNER_slave_incremental _path_list = [CONF.ebs_incremental_path, str(backup.group_id), str(backup.id)] else: runner = RUNNER_slave LOG.info("select backup strategies %s for instance_type %s", (runner, type)) backup_location = "/".join(_path_list) LOG.info("backup_location:%s", backup_location) success = False SUCCESS_FLAG = "completed OK" output = cStringIO.StringIO() try: utils.poll_until(self._no_innobackupex_running, sleep_time=3, time_out=CONF.usage_timeout) with runner(filename=backup_id,mysql_dir=CONF.mount_point,backup_dir=backup_location,parallel=4,compress_thread=4,lsn=lsn)\ as bkup: LOG.info("Starting Backup: %s", backup_id) _fd = bkup.process.stderr last_line = None while True: line = _fd.readline() output.write(line) if line: last_line = line else: break if last_line and last_line.find(SUCCESS_FLAG) >=0: success = True note = "Successfully saved data to ebs!" location = backup_location if success: binlog,offset = self._get_binlog_and_offset(output.getvalue()) lsn = self._get_lsn(output.getvalue()) else: LOG.error("print innobackupex error info:%s", output.getvalue()) raise BackupError("backup occur error! %s" % last_line) LOG.info("Backup %s completed,status: %s binlog: %s offset: %s", (backup_id,success,binlog,offset)) LOG.info('Backup %s location: %s', backup_id, backup_location) LOG.info('Backup %s lsn: %s', backup_id, lsn) except Exception as e: LOG.error(e) msg = "Error saving Backup: %s " % backup_id LOG.error(msg) AlarmRpc(context).alarm(context.tenant, level=AlarmRpc.LEVEL_ERROR, _type=AlarmRpc.TYPE_GUEST_AGENT, message=msg+str(e)) backup.state = BackupState.FAILED backup.backup_timestamp = utils.utcnow() #replace to conductor conductor_api.API(context).save_backup(backup) raise else: LOG.info("Saving %s Backup Info to model", backup_id) backup.state = BackupState.COMPLETED backup.location = location backup.note = note backup.backup_type = bkup.backup_type backup.backup_timestamp = utils.utcnow() backup.size = self._get_backup_size(backup_location) backup.extend = json.dumps({'binlog':binlog,'offset':offset,'lsn':lsn}) #replace to conductor conductor_api.API(context).save_backup(backup)
def setUp(self): print '---setup----------' super(InstanceVipAllocateTest, self).setUp() self.vip = "192.168.16.22" self.lb_id = "lbid-"+str(utils.utcnow())