def purge_unused_exports(): from dbaas_nfsaas.models import HostAttr databaseinfras = DatabaseInfra.objects.filter( plan__provider=Plan.CLOUDSTACK).prefetch_related('instances') for databaseinfra in databaseinfras: instances = databaseinfra.get_driver().get_database_instances() environment = databaseinfra.environment for instance in instances: exports = HostAttr.objects.filter(host=instance.hostname, is_active=False) for export in exports: snapshots = Snapshot.objects.filter(export_path=export.nfsaas_path, purge_at=None) if snapshots: continue LOG.info( 'Export {} will be removed'.format(export.nfsaas_export_id)) host = export.host export_id = export.nfsaas_export_id clean_unused_data(export_id=export_id, export_path=export.nfsaas_path, host=instance.hostname, databaseinfra=databaseinfra) nfsaas_client = NfsaasProvider() nfsaas_client.revoke_access(environment=environment, host=host, export_id=export_id) nfsaas_client.drop_export(environment=environment, export_id=export_id) export.delete()
def undo(self, workflow_dict): try: environment = workflow_dict['environment'] if 'volume' in workflow_dict: volume = workflow_dict['volume'] LOG.info("Destroying nfsaas volume...") provider = NfsaasProvider() provider.revoke_access(environment=environment, host=volume.host, export_id=volume.nfsaas_export_id) provider.drop_export(environment=environment, export_id=volume.nfsaas_export_id) volume.delete() return True except Exception: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0022) workflow_dict['exceptions']['traceback'].append(traceback) return False
def clean_unused_data(export_id, export_path, host, databaseinfra): provider = NfsaasProvider() provider.grant_access(environment=databaseinfra.environment, host=host, export_id=export_id) mount_path = "/mnt_{}_{}".format(databaseinfra.name, export_id) command = "mkdir -p {}".format(mount_path) command += "\nmount -t nfs -o bg,intr {} {}".format( export_path, mount_path) command += "\nrm -rf {}/*".format(mount_path) command += "\numount {}".format(mount_path) command += "\nrm -rf {}".format(mount_path) LOG.info(command) cs_host_attr = CsHostAttr.objects.get(host=host) output = {} exec_remote_command(server=host.address, username=cs_host_attr.vm_user, password=cs_host_attr.vm_password, command=command, output=output) LOG.info(output)
def purge_unused_exports(): from dbaas_nfsaas.models import HostAttr databaseinfras = DatabaseInfra.objects.filter(plan__provider=Plan.CLOUDSTACK).prefetch_related('instances') for databaseinfra in databaseinfras: instances = databaseinfra.instances.exclude(instance_type__in=[Instance.MONGODB_ARBITER, Instance.REDIS_SENTINEL]) environment = databaseinfra.environment plan = databaseinfra.plan for instance in instances: exports = HostAttr.objects.filter(host=instance.hostname, is_active=False) for export in exports: snapshots = Snapshot.objects.filter(export_path=export.nfsaas_path, purge_at=None) if snapshots: continue LOG.info('Export {} will be removed'.format(export.nfsaas_export_id)) host = export.host export_id = export.nfsaas_export_id nfsaas_client = NfsaasProvider() nfsaas_client.revoke_access(environment=environment, plan=plan, host=host, export_id=export_id) nfsaas_client.drop_export(environment=environment, plan=plan, export_id=export_id) export.delete()
def undo(self, workflow_dict): LOG.info("Running undo...") try: from dbaas_nfsaas.models import HostAttr databaseinfra = workflow_dict['databaseinfra'] instance = workflow_dict['source_instances'][0] if 'database_locked' in workflow_dict and workflow_dict[ 'database_locked']: driver = databaseinfra.get_driver() client = driver.get_client(instance) driver.unlock_database(client) if 'snapshopt_id' in workflow_dict: host_attr = HostAttr.objects.get(host=instance.hostname, is_active=True) NfsaasProvider.remove_snapshot( environment=databaseinfra.environment, host_attr=host_attr, snapshot_id=workflow_dict['snapshopt_id']) return True except Exception: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0020) workflow_dict['exceptions']['traceback'].append(traceback) return False
def undo(self, workflow_dict): LOG.info("Running undo...") try: from dbaas_nfsaas.models import HostAttr databaseinfra = workflow_dict["databaseinfra"] instance = workflow_dict["source_instances"][0] if "database_locked" in workflow_dict and workflow_dict["database_locked"]: driver = databaseinfra.get_driver() client = driver.get_client(instance) driver.unlock_database(client) if "snapshopt_id" in workflow_dict: host_attr = HostAttr.objects.get(host=instance.hostname, is_active=True) NfsaasProvider.remove_snapshot( environment=databaseinfra.environment, host_attr=host_attr, snapshot_id=workflow_dict["snapshopt_id"], ) return True except Exception: traceback = full_stack() workflow_dict["exceptions"]["error_codes"].append(DBAAS_0020) workflow_dict["exceptions"]["traceback"].append(traceback) return False
def do(self, workflow_dict): try: workflow_dict['hosts_and_exports'] = [] databaseinfra = workflow_dict['databaseinfra'] snapshot_id = workflow_dict['snapshot_id'] nfsaas_export_id = workflow_dict['export_id_snapshot'] provider = NfsaasProvider() restore_result = provider.restore_snapshot( environment=databaseinfra.environment, export_id=nfsaas_export_id, snapshot_id=snapshot_id) job_result = provider.check_restore_nfsaas_job( environment=databaseinfra.environment, job_id=restore_result['job']) if 'id' in job_result['result']: new_export_id = job_result['result']['id'] new_export_path = job_result['result']['path'] else: raise Exception('Error while restoring nfs snapshot') host = workflow_dict['host'] workflow_dict['hosts_and_exports'].append({ 'host': host, 'old_export_id': workflow_dict['export_id'], 'old_export_path': workflow_dict['export_path'], 'new_export_id': new_export_id, 'new_export_path': new_export_path, }) old_host_attr = HostAttr.objects.get( nfsaas_export_id=nfsaas_export_id) new_host_attr = HostAttr() new_host_attr.host = old_host_attr.host new_host_attr.nfsaas_export_id = new_export_id new_host_attr.nfsaas_path = new_export_path new_host_attr.is_active = False new_host_attr.nfsaas_team_id = old_host_attr.nfsaas_team_id new_host_attr.nfsaas_project_id = old_host_attr.nfsaas_project_id new_host_attr.nfsaas_environment_id = old_host_attr.nfsaas_environment_id new_host_attr.nfsaas_size_id = old_host_attr.nfsaas_size_id new_host_attr.save() return True except Exception: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0021) workflow_dict['exceptions']['traceback'].append(traceback) return False
def remove_snapshot_backup(snapshot): LOG.info("Removing backup for %s" % (snapshot)) instance = snapshot.instance databaseinfra = instance.databaseinfra NfsaasProvider.remove_snapshot(environment = databaseinfra.environment, plan = databaseinfra.plan, host = instance.hostname, snapshopt = snapshot.snapshopt_id) snapshot.purge_at = datetime.datetime.now() snapshot.save() return
def remove_snapshot_backup(snapshot): LOG.info("Removing backup for %s" % (snapshot)) instance = snapshot.instance databaseinfra = instance.databaseinfra NfsaasProvider.remove_snapshot(environment=databaseinfra.environment, plan=databaseinfra.plan, host=instance.hostname, snapshopt=snapshot.snapshopt_id) snapshot.purge_at = datetime.datetime.now() snapshot.save() return
def do(self, workflow_dict): try: workflow_dict['hosts_and_exports'] = [] databaseinfra = workflow_dict['databaseinfra'] snapshot_id = workflow_dict['snapshot_id'] nfsaas_export_id = workflow_dict['export_id_snapshot'] provider = NfsaasProvider() restore_result = provider.restore_snapshot(environment=databaseinfra.environment, export_id=nfsaas_export_id, snapshot_id=snapshot_id) job_result = provider.check_restore_nfsaas_job(environment=databaseinfra.environment, job_id=restore_result['job']) if 'id' in job_result['result']: new_export_id = job_result['result']['id'] new_export_path = job_result['result']['path'] else: raise Exception('Error while restoring nfs snapshot') host = workflow_dict['host'] workflow_dict['hosts_and_exports'].append({ 'host': host, 'old_export_id': workflow_dict['export_id'], 'old_export_path': workflow_dict['export_path'], 'new_export_id': new_export_id, 'new_export_path': new_export_path, }) old_host_attr = HostAttr.objects.get(nfsaas_export_id=nfsaas_export_id) new_host_attr = HostAttr() new_host_attr.host = old_host_attr.host new_host_attr.nfsaas_export_id = new_export_id new_host_attr.nfsaas_path = new_export_path new_host_attr.is_active = False new_host_attr.nfsaas_team_id = old_host_attr.nfsaas_team_id new_host_attr.nfsaas_project_id = old_host_attr.nfsaas_project_id new_host_attr.nfsaas_environment_id = old_host_attr.nfsaas_environment_id new_host_attr.nfsaas_size_id = old_host_attr.nfsaas_size_id new_host_attr.save() return True except Exception: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0021) workflow_dict['exceptions']['traceback'].append(traceback) return False
def make_host_backup(database, instance, export_id): from backup.models import Snapshot from dbaas_nfsaas.models import HostAttr as Nfsaas_HostAttr import datetime LOG.info("Make instance backup for %s" % (instance)) nfsaas_hostattr = Nfsaas_HostAttr.objects.get(nfsaas_export_id=export_id) snapshot = Snapshot() snapshot.start_at = datetime.datetime.now() snapshot.type = Snapshot.SNAPSHOPT snapshot.status = Snapshot.RUNNING snapshot.instance = instance snapshot.environment = instance.databaseinfra.environment snapshot.export_path = nfsaas_hostattr.nfsaas_path snapshot.database_name = database.name databaseinfra = instance.databaseinfra nfs_snapshot = NfsaasProvider.create_snapshot( environment=databaseinfra.environment, host=instance.hostname) if 'id' in nfs_snapshot and 'snapshot' in nfs_snapshot: snapshot.status = Snapshot.SUCCESS snapshot.snapshopt_id = nfs_snapshot['id'] snapshot.snapshot_name = nfs_snapshot['snapshot'] snapshot.end_at = datetime.datetime.now() snapshot.save() return True else: snapshot.status = Snapshot.ERROR snapshot.end_at = datetime.datetime.now() snapshot.save() return False
def do(self, workflow_dict): try: for host in workflow_dict['source_hosts']: LOG.info("Removing database files on host %s" % host) host_csattr = CsHostAttr.objects.get(host=host) output = {} exec_remote_command( server=host.address, username=host_csattr.vm_user, password=host_csattr.vm_password, command="/opt/dbaas/scripts/dbaas_deletedatabasefiles.sh", output=output) LOG.info(output) LOG.info("Removing disks on host %s" % host) NfsaasProvider().destroy_disk( environment=workflow_dict['source_environment'], host=host) return True except Exception: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0020) workflow_dict['exceptions']['traceback'].append(traceback) return False
def do(self, workflow_dict): try: workflow_dict['disks'] = [] for instance in workflow_dict['instances']: host = instance.hostname if instance.is_arbiter: LOG.info("Do not creat nfsaas disk for Arbiter...") continue LOG.info("Creating nfsaas disk...") disk = NfsaasProvider().create_disk( environment=workflow_dict['environment'], plan=workflow_dict['plan'], host=host) if not disk: return False workflow_dict['disks'].append(disk) return True except Exception: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0009) workflow_dict['exceptions']['traceback'].append(traceback) return False
def remove_snapshot_backup(snapshot): from dbaas_nfsaas.models import HostAttr LOG.info("Removing backup for %s" % (snapshot)) instance = snapshot.instance databaseinfra = instance.databaseinfra host_attr = HostAttr.objects.get(nfsaas_path=snapshot.export_path) NfsaasProvider.remove_snapshot(environment=databaseinfra.environment, host_attr=host_attr, snapshot_id=snapshot.snapshopt_id) snapshot.purge_at = datetime.datetime.now() snapshot.save() return
def make_host_backup(database, instance, export_id): from backup.models import Snapshot from dbaas_nfsaas.models import HostAttr as Nfsaas_HostAttr import datetime LOG.info("Make instance backup for %s" % (instance)) nfsaas_hostattr = Nfsaas_HostAttr.objects.get(nfsaas_export_id=export_id) snapshot = Snapshot() snapshot.start_at = datetime.datetime.now() snapshot.type = Snapshot.SNAPSHOPT snapshot.status = Snapshot.RUNNING snapshot.instance = instance snapshot.environment = instance.databaseinfra.environment snapshot.export_path = nfsaas_hostattr.nfsaas_path snapshot.database_name = database.name databaseinfra = instance.databaseinfra nfs_snapshot = NfsaasProvider.create_snapshot(environment=databaseinfra.environment, host=instance.hostname) if 'id' in nfs_snapshot and 'snapshot' in nfs_snapshot: snapshot.status = Snapshot.SUCCESS snapshot.snapshopt_id = nfs_snapshot['id'] snapshot.snapshot_name = nfs_snapshot['snapshot'] snapshot.end_at = datetime.datetime.now() snapshot.save() return True else: snapshot.status = Snapshot.ERROR snapshot.end_at = datetime.datetime.now() snapshot.save() return False
def do(self, workflow_dict): try: environment = workflow_dict['environment'] plan = workflow_dict['plan'] host = workflow_dict['host'] LOG.info("Creating nfsaas volume...") volume = NfsaasProvider().create_disk(environment=environment, plan=plan, host=host) if not volume: return False volume = HostAttr.objects.get(host=host, nfsaas_path=volume['path']) workflow_dict['volume'] = volume return True except Exception: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0022) workflow_dict['exceptions']['traceback'].append(traceback) return False
def do(self, workflow_dict): try: workflow_dict['disks'] = [] for instance in workflow_dict['target_instances']: if instance.instance_type == instance.MONGODB_ARBITER: LOG.info("Do not creat nfsaas disk for Arbiter...") continue LOG.info("Creating nfsaas disk...") host = instance.hostname disk = NfsaasProvider().create_disk( environment=workflow_dict['target_environment'], plan=workflow_dict['target_plan'], host=host) if not disk: LOG.info("nfsaas disk could not be created...") return False workflow_dict['disks'].append(disk) return True except Exception: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0019) workflow_dict['exceptions']['traceback'].append(traceback) return False
def make_instance_snapshot_backup(instance, error): LOG.info("Make instance backup for %s" % (instance)) snapshot = Snapshot() snapshot.start_at = datetime.datetime.now() snapshot.type = Snapshot.SNAPSHOPT snapshot.status = Snapshot.RUNNING snapshot.instance = instance snapshot.environment = instance.databaseinfra.environment from dbaas_nfsaas.models import HostAttr as Nfsaas_HostAttr nfsaas_hostattr = Nfsaas_HostAttr.objects.get(host=instance.hostname) snapshot.export_path = nfsaas_hostattr.nfsaas_path databases = Database.objects.filter(databaseinfra=instance.databaseinfra) if databases: snapshot.database_name = databases[0].name snapshot.save() databaseinfra = instance.databaseinfra driver = databaseinfra.get_driver() client = driver.get_client(instance) cloudstack_hostattr = Cloudstack_HostAttr.objects.get( host=instance.hostname) try: LOG.debug('Locking instance %s' % str(instance)) driver.lock_database(client) LOG.debug('Instance %s is locked' % str(instance)) if type(driver).__name__ == 'MySQL': mysql_binlog_save(client, instance, cloudstack_hostattr) nfs_snapshot = NfsaasProvider.create_snapshot( environment=databaseinfra.environment, plan=databaseinfra.plan, host=instance.hostname) if 'error' in nfs_snapshot: errormsg = nfs_snapshot['error'] error['errormsg'] = errormsg set_backup_error(databaseinfra, snapshot, errormsg) return False if 'id' in nfs_snapshot and 'snapshot' in nfs_snapshot: snapshot.snapshopt_id = nfs_snapshot['id'] snapshot.snapshot_name = nfs_snapshot['snapshot'] else: errormsg = 'There is no snapshot information' error['errormsg'] = errormsg set_backup_error(databaseinfra, snapshot, errormsg) return False except Exception, e: errormsg = "Error creating snapshot: %s" % (e) error['errormsg'] = errormsg set_backup_error(databaseinfra, snapshot, errormsg) return False
def do(self, workflow_dict): try: databaseinfra = workflow_dict['databaseinfra'] for host_and_export in workflow_dict['hosts_and_exports']: NfsaasProvider.grant_access(environment=databaseinfra.environment, host=host_and_export['host'], export_id=host_and_export['new_export_id']) return True except Exception: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0021) workflow_dict['exceptions']['traceback'].append(traceback) return False
def do(self, workflow_dict): try: databaseinfra = workflow_dict['databaseinfra'] host = workflow_dict['host'] volume = workflow_dict['volume'] NfsaasProvider.grant_access(environment=databaseinfra.environment, host=host, export_id=volume.nfsaas_export_id) return True except Exception: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0022) workflow_dict['exceptions']['traceback'].append(traceback) return False
def make_instance_snapshot_backup(instance, error): LOG.info("Make instance backup for %s" % (instance)) snapshot = Snapshot() snapshot.start_at = datetime.datetime.now() snapshot.type = Snapshot.SNAPSHOPT snapshot.status = Snapshot.RUNNING snapshot.instance = instance snapshot.environment = instance.databaseinfra.environment from dbaas_nfsaas.models import HostAttr as Nfsaas_HostAttr nfsaas_hostattr = Nfsaas_HostAttr.objects.get( host=instance.hostname, is_active=True) snapshot.export_path = nfsaas_hostattr.nfsaas_path databases = Database.objects.filter(databaseinfra=instance.databaseinfra) if databases: snapshot.database_name = databases[0].name snapshot.save() databaseinfra = instance.databaseinfra driver = databaseinfra.get_driver() client = driver.get_client(instance) cloudstack_hostattr = Cloudstack_HostAttr.objects.get( host=instance.hostname) try: LOG.debug('Locking instance %s' % str(instance)) driver.lock_database(client) LOG.debug('Instance %s is locked' % str(instance)) if type(driver).__name__ == 'MySQL': mysql_binlog_save(client, instance, cloudstack_hostattr) nfs_snapshot = NfsaasProvider.create_snapshot(environment=databaseinfra.environment, host=instance.hostname) if 'error' in nfs_snapshot: errormsg = nfs_snapshot['error'] error['errormsg'] = errormsg set_backup_error(databaseinfra, snapshot, errormsg) return False if 'id' in nfs_snapshot and 'snapshot' in nfs_snapshot: snapshot.snapshopt_id = nfs_snapshot['id'] snapshot.snapshot_name = nfs_snapshot['snapshot'] else: errormsg = 'There is no snapshot information' error['errormsg'] = errormsg set_backup_error(databaseinfra, snapshot, errormsg) return False except Exception, e: errormsg = "Error creating snapshot: %s" % (e) error['errormsg'] = errormsg set_backup_error(databaseinfra, snapshot, errormsg) return False
def do(self, workflow_dict): try: databaseinfra = workflow_dict['databaseinfra'] for host_and_export in workflow_dict['hosts_and_exports']: NfsaasProvider.grant_access( environment=databaseinfra.environment, host=host_and_export['host'], export_id=host_and_export['new_export_id']) return True except Exception: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0021) workflow_dict['exceptions']['traceback'].append(traceback) return False
def do(self, workflow_dict): try: databaseinfra = workflow_dict['databaseinfra'] source_host = workflow_dict['source_hosts'][0] target_host = source_host.future_host nfsaas_export_id = source_host.nfsaas_host_attributes.all()[0].nfsaas_export_id NfsaasProvider.revoke_access(environment=databaseinfra.environment, host=target_host, export_id=nfsaas_export_id) return True except Exception: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0020) workflow_dict['exceptions']['traceback'].append(traceback) return False
def purge_unused_exports(): from dbaas_nfsaas.models import HostAttr databaseinfras = DatabaseInfra.objects.filter(plan__provider=Plan.CLOUDSTACK).prefetch_related('instances') for databaseinfra in databaseinfras: instances = databaseinfra.instances.exclude(instance_type__in=[Instance.MONGODB_ARBITER, Instance.REDIS_SENTINEL]) environment = databaseinfra.environment plan = databaseinfra.plan for instance in instances: exports = HostAttr.objects.filter(host=instance.hostname, is_active=False) for export in exports: snapshots = Snapshot.objects.filter(export_path=export.nfsaas_path, purge_at=None) if snapshots: continue LOG.info('Export {} will be removed'.format(export.nfsaas_export_id)) host = export.host export_id = export.nfsaas_export_id nfsaas_client = NfsaasProvider() nfsaas_client.revoke_access(environment=environment, host=host, export_id=export_id) nfsaas_client.drop_export(environment=environment, export_id=export_id) export.delete()
def make_instance_snapshot_backup(instance, error): LOG.info("Make instance backup for %s" % (instance)) snapshot = Snapshot() snapshot.start_at = datetime.datetime.now() snapshot.type=Snapshot.SNAPSHOPT snapshot.status=Snapshot.RUNNING snapshot.instance = instance snapshot.environment = instance.databaseinfra.environment from dbaas_nfsaas.models import HostAttr as Nfsaas_HostAttr nfsaas_hostattr = Nfsaas_HostAttr.objects.get(host=instance.hostname) snapshot.export_path = nfsaas_hostattr.nfsaas_path databases = Database.objects.filter(databaseinfra=instance.databaseinfra) if databases: snapshot.database_name = databases[0].name snapshot.save() databaseinfra = instance.databaseinfra driver = databaseinfra.get_driver() client = driver.get_client(instance) try: driver.lock_database(client) nfs_snapshot = NfsaasProvider.create_snapshot(environment = databaseinfra.environment, plan = databaseinfra.plan, host = instance.hostname) driver.unlock_database(client) if 'error' in nfs_snapshot: errormsg = nfs_snapshot['error'] error['errormsg'] = errormsg set_backup_error(databaseinfra, snapshot, errormsg) return False if 'id' in nfs_snapshot and 'snapshot' in nfs_snapshot: snapshot.snapshopt_id = nfs_snapshot['id'] snapshot.snapshot_name = nfs_snapshot['snapshot'] else: errormsg = 'There is no snapshot information' error['errormsg'] = errormsg set_backup_error(databaseinfra, snapshot, errormsg) return False except Exception, e: errormsg = "Error creating snapshot: %s" % (e) error['errormsg'] = errormsg set_backup_error(databaseinfra, snapshot, errormsg) return False
def do(self, workflow_dict): try: from dbaas_nfsaas.models import HostAttr databaseinfra = workflow_dict['databaseinfra'] instance = workflow_dict['source_instances'][0] host_attr = HostAttr.objects.get(host=instance.hostname, is_active=True) NfsaasProvider.remove_snapshot(environment=databaseinfra.environment, host_attr=host_attr, snapshot_id=workflow_dict['snapshopt_id']) del workflow_dict['snapshopt_id'] return True except Exception: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0020) workflow_dict['exceptions']['traceback'].append(traceback) return False
def undo(self, workflow_dict): LOG.info("Running undo...") try: for host in workflow_dict['target_hosts']: LOG.info("Destroying nfsaas disk...") NfsaasProvider().destroy_disk( environment=workflow_dict['target_environment'], host=host) return True except Exception: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0020) workflow_dict['exceptions']['traceback'].append(traceback) return False
def do(self, workflow_dict): try: databaseinfra = workflow_dict['databaseinfra'] driver = databaseinfra.get_driver() instance = workflow_dict['source_instances'][0] client = driver.get_client(instance) driver.lock_database(client) workflow_dict['database_locked'] = True LOG.debug('Instance %s is locked' % str(instance)) client.query('show master status') r = client.store_result() row = r.fetch_row(maxrows=0, how=1) workflow_dict['binlog_file'] = row[0]['File'] workflow_dict['binlog_pos'] = row[0]['Position'] nfs_snapshot = NfsaasProvider.create_snapshot( environment=databaseinfra.environment, host=instance.hostname) LOG.info('nfs_snapshot: {}'.format(nfs_snapshot)) if 'error' in nfs_snapshot: errormsg = nfs_snapshot['error'] raise Exception(errormsg) if 'id' in nfs_snapshot and 'snapshot' in nfs_snapshot: workflow_dict['snapshopt_id'] = nfs_snapshot['id'] workflow_dict['snapshot_name'] = nfs_snapshot['snapshot'] else: errormsg = 'There is no snapshot information' raise Exception(errormsg) driver.unlock_database(client) workflow_dict['database_locked'] = False return True except Exception: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0020) workflow_dict['exceptions']['traceback'].append(traceback) return False
def do(self, workflow_dict): try: databaseinfra = workflow_dict["databaseinfra"] driver = databaseinfra.get_driver() instance = workflow_dict["source_instances"][0] client = driver.get_client(instance) driver.lock_database(client) workflow_dict["database_locked"] = True LOG.debug("Instance %s is locked" % str(instance)) client.query("show master status") r = client.store_result() row = r.fetch_row(maxrows=0, how=1) workflow_dict["binlog_file"] = row[0]["File"] workflow_dict["binlog_pos"] = row[0]["Position"] nfs_snapshot = NfsaasProvider.create_snapshot(environment=databaseinfra.environment, host=instance.hostname) LOG.info("nfs_snapshot: {}".format(nfs_snapshot)) if "error" in nfs_snapshot: errormsg = nfs_snapshot["error"] raise Exception(errormsg) if "id" in nfs_snapshot and "snapshot" in nfs_snapshot: workflow_dict["snapshopt_id"] = nfs_snapshot["id"] workflow_dict["snapshot_name"] = nfs_snapshot["snapshot"] else: errormsg = "There is no snapshot information" raise Exception(errormsg) driver.unlock_database(client) workflow_dict["database_locked"] = False return True except Exception: traceback = full_stack() workflow_dict["exceptions"]["error_codes"].append(DBAAS_0020) workflow_dict["exceptions"]["traceback"].append(traceback) return False
def undo(self, workflow_dict): try: for host in workflow_dict['hosts']: LOG.info("Destroying nfsaas disk...") disk = NfsaasProvider().destroy_disk( environment=workflow_dict['environment'], host=host) if not disk: return False return True except Exception: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0009) workflow_dict['exceptions']['traceback'].append(traceback) return False
def purge_unused_exports(): from dbaas_nfsaas.models import HostAttr databaseinfras = DatabaseInfra.objects.filter( plan__provider=Plan.CLOUDSTACK).prefetch_related('instances') for databaseinfra in databaseinfras: instances = databaseinfra.get_driver().get_database_instances() environment = databaseinfra.environment for instance in instances: exports = HostAttr.objects.filter(host=instance.hostname, is_active=False) for export in exports: snapshots = Snapshot.objects.filter( export_path=export.nfsaas_path, purge_at=None) if snapshots: continue LOG.info('Export {} will be removed'.format( export.nfsaas_export_id)) host = export.host export_id = export.nfsaas_export_id clean_unused_data(export_id=export_id, export_path=export.nfsaas_path, host=instance.hostname, databaseinfra=databaseinfra) nfsaas_client = NfsaasProvider() nfsaas_client.revoke_access(environment=environment, host=host, export_id=export_id) nfsaas_client.drop_export(environment=environment, export_id=export_id) export.delete()
def make_instance_snapshot_backup(instance, error): LOG.info("Make instance backup for %s" % (instance)) snapshot = Snapshot() snapshot.start_at = datetime.datetime.now() snapshot.type = Snapshot.SNAPSHOPT snapshot.status = Snapshot.RUNNING snapshot.instance = instance snapshot.environment = instance.databaseinfra.environment from dbaas_nfsaas.models import HostAttr as Nfsaas_HostAttr nfsaas_hostattr = Nfsaas_HostAttr.objects.get(host=instance.hostname, is_active=True) snapshot.export_path = nfsaas_hostattr.nfsaas_path databases = Database.objects.filter(databaseinfra=instance.databaseinfra) if databases: snapshot.database_name = databases[0].name snapshot.save() databaseinfra = instance.databaseinfra driver = databaseinfra.get_driver() client = driver.get_client(instance) cloudstack_hostattr = Cloudstack_HostAttr.objects.get( host=instance.hostname) try: LOG.debug('Locking instance %s' % str(instance)) driver.lock_database(client) LOG.debug('Instance %s is locked' % str(instance)) if type(driver).__name__ == 'MySQL': mysql_binlog_save(client, instance, cloudstack_hostattr) nfs_snapshot = NfsaasProvider.create_snapshot( environment=databaseinfra.environment, host=instance.hostname) if 'error' in nfs_snapshot: errormsg = nfs_snapshot['error'] error['errormsg'] = errormsg set_backup_error(databaseinfra, snapshot, errormsg) return False if 'id' in nfs_snapshot and 'snapshot' in nfs_snapshot: snapshot.snapshopt_id = nfs_snapshot['id'] snapshot.snapshot_name = nfs_snapshot['snapshot'] else: errormsg = 'There is no snapshot information' error['errormsg'] = errormsg set_backup_error(databaseinfra, snapshot, errormsg) return False except Exception as e: errormsg = "Error creating snapshot: %s" % (e) error['errormsg'] = errormsg set_backup_error(databaseinfra, snapshot, errormsg) return False finally: LOG.debug('Unlocking instance %s' % str(instance)) driver.unlock_database(client) LOG.debug('Instance %s is unlocked' % str(instance)) output = {} command = "du -sb /data/.snapshot/%s | awk '{print $1}'" % ( snapshot.snapshot_name) try: exec_remote_command(server=instance.hostname.address, username=cloudstack_hostattr.vm_user, password=cloudstack_hostattr.vm_password, command=command, output=output) size = int(output['stdout'][0]) snapshot.size = size except Exception as e: snapshot.size = 0 LOG.error("Error exec remote command %s" % (e)) backup_path = databases[0].backup_path if backup_path: infraname = databaseinfra.name now = datetime.datetime.now() target_path = "{backup_path}/{today_str}/{hostname}/{now_str}/{infraname}".format( backup_path=backup_path, today_str=now.strftime("%Y_%m_%d"), hostname=instance.hostname.hostname.split('.')[0], now_str=now.strftime("%Y%m%d%H%M%S"), infraname=infraname) snapshot_path = "/data/.snapshot/{}/data/".format( snapshot.snapshot_name) output = {} command = """ if [ -d "{backup_path}" ] then rm -rf {backup_path}/20[0-9][0-9]_[0-1][0-12]_[0-3][0-9] mkdir -p {target_path} cp -r {snapshot_path} {target_path} & fi """.format(backup_path=backup_path, target_path=target_path, snapshot_path=snapshot_path) try: exec_remote_command(server=instance.hostname.address, username=cloudstack_hostattr.vm_user, password=cloudstack_hostattr.vm_password, command=command, output=output) except Exception as e: LOG.error("Error exec remote command %s" % (e)) snapshot.status = Snapshot.SUCCESS snapshot.end_at = datetime.datetime.now() snapshot.save() register_backup_dbmonitor(databaseinfra, snapshot) return True
def destroy_unused_export(export_id, export_path, host, databaseinfra): clean_unused_data(export_id, export_path, host, databaseinfra) provider = NfsaasProvider() provider.drop_export(environment=databaseinfra.environment, export_id=export_id)
def make_instance_snapshot_backup(instance, error): LOG.info("Make instance backup for %s" % (instance)) snapshot = Snapshot() snapshot.start_at = datetime.datetime.now() snapshot.type = Snapshot.SNAPSHOPT snapshot.status = Snapshot.RUNNING snapshot.instance = instance snapshot.environment = instance.databaseinfra.environment from dbaas_nfsaas.models import HostAttr as Nfsaas_HostAttr nfsaas_hostattr = Nfsaas_HostAttr.objects.get( host=instance.hostname, is_active=True) snapshot.export_path = nfsaas_hostattr.nfsaas_path databases = Database.objects.filter(databaseinfra=instance.databaseinfra) if databases: snapshot.database_name = databases[0].name snapshot.save() databaseinfra = instance.databaseinfra driver = databaseinfra.get_driver() client = driver.get_client(instance) cloudstack_hostattr = Cloudstack_HostAttr.objects.get( host=instance.hostname) try: LOG.debug('Locking instance %s' % str(instance)) driver.lock_database(client) LOG.debug('Instance %s is locked' % str(instance)) if type(driver).__name__ == 'MySQL': mysql_binlog_save(client, instance, cloudstack_hostattr) nfs_snapshot = NfsaasProvider.create_snapshot(environment=databaseinfra.environment, host=instance.hostname) if 'error' in nfs_snapshot: errormsg = nfs_snapshot['error'] error['errormsg'] = errormsg set_backup_error(databaseinfra, snapshot, errormsg) return False if 'id' in nfs_snapshot and 'snapshot' in nfs_snapshot: snapshot.snapshopt_id = nfs_snapshot['id'] snapshot.snapshot_name = nfs_snapshot['snapshot'] else: errormsg = 'There is no snapshot information' error['errormsg'] = errormsg set_backup_error(databaseinfra, snapshot, errormsg) return False except Exception as e: errormsg = "Error creating snapshot: %s" % (e) error['errormsg'] = errormsg set_backup_error(databaseinfra, snapshot, errormsg) return False finally: LOG.debug('Unlocking instance %s' % str(instance)) driver.unlock_database(client) LOG.debug('Instance %s is unlocked' % str(instance)) output = {} command = "du -sb /data/.snapshot/%s | awk '{print $1}'" % ( snapshot.snapshot_name) try: exec_remote_command(server=instance.hostname.address, username=cloudstack_hostattr.vm_user, password=cloudstack_hostattr.vm_password, command=command, output=output) size = int(output['stdout'][0]) snapshot.size = size except Exception as e: snapshot.size = 0 LOG.error("Error exec remote command %s" % (e)) backup_path = databases[0].backup_path if backup_path: infraname = databaseinfra.name now = datetime.datetime.now() target_path = "{backup_path}/{today_str}/{hostname}/{now_str}/{infraname}".format( backup_path=backup_path, today_str=now.strftime("%Y_%m_%d"), hostname=instance.hostname.hostname.split('.')[0], now_str=now.strftime("%Y%m%d%H%M%S"), infraname=infraname) snapshot_path = "/data/.snapshot/{}/data/".format(snapshot.snapshot_name) output = {} command = """ if [ -d "{backup_path}" ] then rm -rf {backup_path}/20[0-9][0-9]_[0-1][0-12]_[0-3][0-9] mkdir -p {target_path} cp -r {snapshot_path} {target_path} & fi """.format(backup_path=backup_path, target_path=target_path, snapshot_path=snapshot_path) try: exec_remote_command(server=instance.hostname.address, username=cloudstack_hostattr.vm_user, password=cloudstack_hostattr.vm_password, command=command, output=output) except Exception as e: LOG.error("Error exec remote command %s" % (e)) snapshot.status = Snapshot.SUCCESS snapshot.end_at = datetime.datetime.now() snapshot.save() register_backup_dbmonitor(databaseinfra, snapshot) return True