def database_environment_migrate( database, new_environment, new_offering, task, hosts_zones, since_step=None ): database_migrate = DatabaseMigrate() database_migrate.task = task database_migrate.database = database database_migrate.environment = new_environment database_migrate.origin_environment = database.environment database_migrate.offering = new_offering database_migrate.origin_offering = database.infra.offering database_migrate.save() instances = build_migrate_hosts(hosts_zones, database_migrate) instances = sorted(instances, key=lambda k: k.dns) steps = get_steps(database) result = steps_for_instances( steps, instances, task, database_migrate.update_step, since_step ) database_migrate = DatabaseMigrate.objects.get(id=database_migrate.id) if result: database = database_migrate.database database.environment = database_migrate.environment database.save() infra = database.infra infra.environment = database_migrate.environment infra.save() database_migrate.set_success() task.set_status_success('Database migrated with success') else: database_migrate.set_error() task.set_status_error('Could not migrate database')
def node_zone_migrate(host, zone, new_environment, task, since_step=None, step_manager=None): instance = host.instances.first() if step_manager: host_migrate = step_manager host_migrate.id = None else: host_migrate = HostMigrate() host_migrate.task = task host_migrate.host = instance.hostname host_migrate.zone = zone host_migrate.environment = new_environment host_migrate.save() steps = get_steps(host) result = steps_for_instances( steps, [instance], task, host_migrate.update_step, since_step, step_manager=host_migrate ) host_migrate = HostMigrate.objects.get(id=host_migrate.id) if result: host_migrate.set_success() task.set_status_success('Node migrated with success') else: host_migrate.set_error() task.set_status_error('Could not migrate host')
def migrate_filer_disk_for_database(self, database): infra = database.infra class_path = infra.plan.replication_topology.class_path steps = get_filer_migrate_steps(class_path) task = self.register_task(database) instances = self._get_instances(infra) step_manager = self.register_step_manager(task, instances) if not self._can_run(database, task): self.set_error(task, step_manager) return task.add_detail( "Migrating disk for database {}...".format(database.name), level=2 ) steps_result = steps_for_instances( steps, instances, task, step_counter_method=step_manager.update_step, since_step=step_manager.current_step ) if not steps_result: self.set_error(task, step_manager) return task.set_status_success('Migrate filer finished with success')
def reinstall_vm_database(self, database, instance, user, task, since_step=0): worker_name = get_worker_name() task = TaskHistory.register(self.request, user, task, worker_name) infra = database.infra class_path = infra.plan.replication_topology.class_path steps = get_reinstallvm_steps_setting(class_path) database_reinstallvm = DatabaseReinstallVM() database_reinstallvm.database = database database_reinstallvm.instance = instance database_reinstallvm.task = task database_reinstallvm.save() instances = [instance,] success = steps_for_instances( steps, instances, task, database_reinstallvm.update_step, since_step ) if success: database_reinstallvm.set_success() task.update_status_for(TaskHistory.STATUS_SUCCESS, 'Done') else: database_reinstallvm.set_error() task.update_status_for( TaskHistory.STATUS_ERROR, 'Could not do reinstall vm.\nReinstall VM doesn\'t have rollback' )
def switch_write_database(self, database, instance, user, task): from workflow.workflow import steps_for_instances from util.providers import get_switch_write_instance_steps self.request.kwargs['database'] = database self.request.kwargs['instance'] = instance infra = instance.databaseinfra worker_name = get_worker_name() task = TaskHistory.register(self.request, user, task, worker_name) plan = infra.plan class_path = plan.replication_topology.class_path steps = get_switch_write_instance_steps(class_path) instances = [] instances.append(instance) success = steps_for_instances( list_of_groups_of_steps=steps, instances=instances, task=task ) if success: task.update_status_for(TaskHistory.STATUS_SUCCESS, 'Done') else: task.update_status_for(TaskHistory.STATUS_ERROR, 'Done') database.finish_task()
def remove_readonly_instance(self, instance, user, task): from workflow.workflow import steps_for_instances from util.providers import get_remove_readonly_instance_steps infra = instance.databaseinfra database = infra.databases.last() self.request.kwargs['database'] = database self.request.kwargs['instance'] = instance worker_name = get_worker_name() task = TaskHistory.register(self.request, user, task, worker_name) plan = infra.plan class_path = plan.replication_topology.class_path steps = get_remove_readonly_instance_steps(class_path) instances = [] instances.append(instance) success = steps_for_instances( list_of_groups_of_steps=steps, instances=instances, task=task, undo=True ) if success: task.update_status_for(TaskHistory.STATUS_SUCCESS, 'Done') else: task.update_status_for(TaskHistory.STATUS_ERROR, 'Done')
def restore_snapshot(database, group, task, retry_from=None): restore = DatabaseRestore() restore.task = task restore.database = database restore.group = group new_group = retry_from.new_group if retry_from else BackupGroup() new_group.save() restore.new_group = new_group restore.save() restore.load_instances(retry_from) topology_path = database.plan.replication_topology.class_path steps = get_restore_snapshot_settings(topology_path) since_step = retry_from.current_step if retry_from else None if steps_for_instances( steps, restore.instances, task, restore.update_step, since_step=since_step ): restore.set_success() task.set_status_success('Restore is done') else: restore.set_error() task.set_status_error( 'Could not do restore\n' 'Please check error message and do retry' )
def upgrade_database(self, database, user, task, since_step=0): from workflow.workflow import steps_for_instances from maintenance.models import DatabaseUpgrade worker_name = get_worker_name() task = TaskHistory.register(self.request, user, task, worker_name) source_plan = database.infra.plan target_plan = source_plan.engine_equivalent_plan class_path = target_plan.replication_topology.class_path steps = get_database_upgrade_setting(class_path) database_upgrade = DatabaseUpgrade() database_upgrade.database = database database_upgrade.source_plan = source_plan database_upgrade.target_plan = target_plan database_upgrade.task = task database_upgrade.save() success = steps_for_instances(steps, database.infra.instances.all(), task, database_upgrade.update_step, since_step) if success: database_upgrade.set_success() task.update_status_for(TaskHistory.STATUS_SUCCESS, 'Done') else: database_upgrade.set_error() task.update_status_for( TaskHistory.STATUS_ERROR, 'Could not do upgrade.\nUpgrade don\'t has rollback')
def create_database(name, plan, environment, team, project, description, task, backup_hour, maintenance_window, maintenance_day, subscribe_to_email_events=True, is_protected=False, user=None, retry_from=None): topology_path = plan.replication_topology.class_path name = slugify(name) base_name = gen_infra_names(name, 0) infra = get_or_create_infra(base_name, plan, environment, backup_hour, maintenance_window, maintenance_day, retry_from) instances = get_instances_for(infra, topology_path) database_create = DatabaseCreate() database_create.task = task database_create.name = name database_create.plan = plan database_create.environment = environment database_create.team = team database_create.project = project database_create.description = description database_create.subscribe_to_email_events = subscribe_to_email_events database_create.is_protected = is_protected database_create.user = user.username if user else task.user database_create.infra = infra database_create.database = infra.databases.first() database_create.save() steps = get_deploy_settings(topology_path) since_step = None if retry_from: since_step = retry_from.current_step if steps_for_instances(steps, instances, task, database_create.update_step, since_step=since_step): database_create.set_success() task.set_status_success('Database created') database_create.database.finish_task() else: database_create.set_error() task.set_status_error('Could not create database\n' 'Please check error message and do retry')
def run(self): result = steps_for_instances(self.steps, self.instances, self.task, self.step_manager.update_step, self.current_step, step_manager=self.step_manager) self.reload_step_manager() if result: self.step_manager.set_success() self.task.set_status_success(self.success_msg) else: self.step_manager.set_error() self.task.set_status_error(self.error_msg) self.run_auto_rollback_if_configured() self.run_auto_cleanup_if_configured()
def update_ssl(self, database, task, since_step=None, step_manager=None, scheduled_task=None): from maintenance.models import UpdateSsl task = TaskHistory.register(request=self.request, task_history=task, user=task.user, worker_name=get_worker_name()) if step_manager: step_manager = step_manager step_manager.id = None step_manager.started_at = None since_step = step_manager.current_step else: retry_from = UpdateSsl.objects.filter(can_do_retry=True, database=database, status=UpdateSsl.ERROR).last() step_manager = UpdateSsl() if retry_from: step_manager.current_step = retry_from.current_step since_step = retry_from.current_step step_manager.task_schedule = retry_from.task_schedule step_manager.database = database step_manager.task = task if scheduled_task: step_manager.task_schedule = scheduled_task step_manager.set_running() step_manager.save() steps = database.databaseinfra.update_ssl_steps() instances = database.infra.get_driver().get_database_instances() result = steps_for_instances(steps, instances, task, step_manager.update_step, since_step, step_manager=step_manager) step_manager = UpdateSsl.objects.get(id=step_manager.id) if result: step_manager.set_success() task.set_status_success('SSL Update with success') else: step_manager.set_error() task.set_status_error('Could not update SSL')
def upgrade_database(self, database, user, task, since_step=0): worker_name = get_worker_name() task = TaskHistory.register(self.request, user, task, worker_name) infra = database.infra source_plan = infra.plan target_plan = source_plan.engine_equivalent_plan class_path = target_plan.replication_topology.class_path steps = get_database_upgrade_setting(class_path) database_upgrade = DatabaseUpgrade() database_upgrade.database = database database_upgrade.source_plan = source_plan database_upgrade.target_plan = target_plan database_upgrade.task = task database_upgrade.save() hosts = [] for instance in database.infra.instances.all(): if instance.hostname not in hosts: hosts.append(instance.hostname) instances = [] for host in hosts: instances.append(host.instances.all()[0]) instances = instances success = steps_for_instances( steps, instances, task, database_upgrade.update_step, since_step ) if success: infra.plan = target_plan infra.engine = target_plan.engine infra.save() database_upgrade.set_success() task.update_status_for(TaskHistory.STATUS_SUCCESS, 'Done') else: database_upgrade.set_error() task.update_status_for( TaskHistory.STATUS_ERROR, 'Could not do upgrade.\nUpgrade doesn\'t have rollback' )
def resize_database(self, database, user, task, cloudstackpack, original_cloudstackpack=None, since_step=0): from util.providers import get_cloudstack_pack self.request.kwargs['database'] = database self.request.kwargs['cloudstackpack'] = cloudstackpack.offering worker_name = get_worker_name() task = TaskHistory.register( self.request, user, task, worker_name, ) infra = database.infra if not original_cloudstackpack: original_cloudstackpack = get_cloudstack_pack(database) database_resize = DatabaseResize(database=database, source_offer=original_cloudstackpack, target_offer=cloudstackpack, task=task) class_path = infra.plan.replication_topology.class_path steps = get_resize_settings(class_path) instances_to_resize = infra.get_driver().get_database_instances() success = steps_for_instances(steps, instances_to_resize, task, database_resize.update_step, since_step) if success: database_resize.set_success() task.update_status_for(TaskHistory.STATUS_SUCCESS, 'Done.') else: database_resize.set_error() task.update_status_for( TaskHistory.STATUS_ERROR, 'Could not do resize.\nResize doesn\'t have rollback')
def database_environment_migrate( database, new_environment, new_offering, task, hosts_zones, since_step=None, step_manager=None ): if step_manager: database_migrate = copy(step_manager) database_migrate.id = None database_migrate.finished_at = None database_migrate.created_at = datetime.now() else: database_migrate = DatabaseMigrate() database_migrate.task = task database_migrate.database = database database_migrate.environment = new_environment database_migrate.origin_environment = database.environment database_migrate.offering = new_offering database_migrate.origin_offering = database.infra.offering database_migrate.save() instances = build_migrate_hosts(hosts_zones, database_migrate, step_manager=step_manager) instances = sorted(instances, key=lambda k: k.dns) steps = get_steps(database) result = steps_for_instances( steps, instances, task, database_migrate.update_step, since_step, step_manager=step_manager ) database_migrate = DatabaseMigrate.objects.get(id=database_migrate.id) if result: database = database_migrate.database database.environment = database_migrate.environment database.save() infra = database.infra infra.environment = database_migrate.environment infra.plan = infra.plan.get_equivalent_plan_for_env( database_migrate.environment ) infra.save() database_migrate.set_success() task.set_status_success('Database migrated with success') else: database_migrate.set_error() task.set_status_error('Could not migrate database')
def recreate_slave(self, database, host, task, since_step=None, step_manager=None): from maintenance.models import RecreateSlave task = TaskHistory.register(request=self.request, task_history=task, user=task.user, worker_name=get_worker_name()) instance = host.instances.first() if step_manager: step_manager = step_manager step_manager.id = None step_manager.started_at = None since_step = step_manager.current_step else: retry_from = RecreateSlave.objects.filter( can_do_retry=True, host=host, status=RecreateSlave.ERROR).last() step_manager = RecreateSlave() if retry_from: step_manager.current_step = retry_from.current_step step_manager.snapshot = retry_from.snapshot since_step = retry_from.current_step step_manager.host = instance.hostname step_manager.task = task step_manager.save() steps = host.instances.first().databaseinfra.recreate_slave_steps() result = steps_for_instances(steps, [instance], task, step_manager.update_step, since_step, step_manager=step_manager) step_manager = RecreateSlave.objects.get(id=step_manager.id) if result: step_manager.set_success() task.set_status_success('Slave recreated with success') else: step_manager.set_error() task.set_status_error('Could not recreate slave')
def start_switch(self): self.task.add_detail("Switching master in {}...".format(self.zone)) for instance in self.instances: infra = instance.databaseinfra env = infra.environment host = instance.hostname hp = Provider(instance, env) try: info = hp.host_info(host) except HostProviderInfoException as e: self.task.add_detail("ERROR-{}-{}".format(host, e), level=2) self.task.set_status_error('Could not load host info') return if info["zone"] != self.zone: self.task.add_detail( "OK-{}-{}".format(host, info["zone"]), level=2 ) continue database = infra.databases.first() if database.is_being_used_elsewhere(): self.task.add_detail( "ERROR-{}-Being used to another task".format(host), level=2 ) self.task.set_status_error( 'Database is being used by another task' ) return self.task.add_detail( "SWITCHING-{}-{}...".format(host, info["zone"]), level=2 ) class_path = infra.plan.replication_topology.class_path steps = get_switch_write_instance_steps(class_path) if not steps_for_instances(steps, [instance], self.task): self.task.set_status_error('Could not switch all masters') return self.task.set_status_success('Could switch all masters')
def migrate_filer_disk_for_database(self, database): infra = database.infra class_path = infra.plan.replication_topology.class_path steps = get_filer_migrate_steps(class_path) task = self.register_task(database) instances = self._get_instances(infra) step_manager = self.register_step_manager(task, instances) if not self._can_run(database, task): self.set_error(task, step_manager) return task.add_detail("Migrating disk for database {}...".format( database.name), level=2) steps_result = steps_for_instances( steps, instances, task, step_counter_method=step_manager.update_step, since_step=step_manager.current_step) if not steps_result: self.set_error(task, step_manager) return task.set_status_success('Migrate filer finished with success')
def create_database( name, plan, environment, team, project, description, task, subscribe_to_email_events=True, is_protected=False, user=None, retry_from=None ): topology_path = plan.replication_topology.class_path number_of_vms = get_deploy_instances_size(topology_path) name = slugify(name) base_name = gen_infra_names(name, number_of_vms) infra = get_or_create_infra(base_name, plan, environment, retry_from) instances = [] for i in range(number_of_vms): try: instance_name = '{}-0{}-{}'.format( base_name['name_prefix'], i+1, base_name['name_stamp'] ) instance = infra.instances.get( Q(hostname__hostname__startswith=instance_name) | Q(dns__startswith=instance_name) ) except Instance.DoesNotExist: instance = Instance() instance.dns = base_name['vms'][i] instance.databaseinfra = infra driver = infra.get_driver() instance.port = driver.get_default_database_port() instance.instance_type = driver.get_default_instance_type() instance.vm_name = instance.dns instances.append(instance) database_create = DatabaseCreate() database_create.task = task database_create.name = name database_create.plan = plan database_create.environment = environment database_create.team = team database_create.project = project database_create.description = description database_create.subscribe_to_email_events = subscribe_to_email_events database_create.is_protected = is_protected database_create.user = '******' database_create.infra = infra database_create.database = infra.databases.first() database_create.save() steps = get_deploy_settings(topology_path) since_step = None if retry_from: since_step = retry_from.current_step if steps_for_instances( steps, instances, task, database_create.update_step, since_step=since_step ): database_create.set_success() task.set_status_success('Database created') else: database_create.set_error() task.set_status_error( 'Could not create database\n' 'Please check error message and do retry' )
def database_environment_migrate(database, new_environment, new_offering, task, hosts_zones, since_step=None, step_manager=None): infra = database.infra #database.infra.disk_offering_type = database.infra.disk_offering_type.get_type_to(new_environment) #database.save() if step_manager: migration_stage = step_manager.migration_stage if not can_migrate(database, task, migration_stage, False): return database_migrate = rebuild_database_migrate(task, step_manager) instances = rebuild_hosts_migrate(database_migrate, step_manager, infra.in_last_migration_stage) else: infra.migration_stage += 1 if not can_migrate(database, task, infra.migration_stage, False): return infra.save() database_migrate = build_database_migrate(task, database, new_environment, new_offering, infra.migration_stage) if infra.migration_stage == 1: instances = build_hosts_migrate(hosts_zones, database_migrate) else: last_db_migrate = DatabaseMigrate.objects.filter( database=database, status=DatabaseMigrate.SUCCESS).last() instances = rebuild_hosts_migrate(database_migrate, last_db_migrate, infra.in_last_migration_stage) instances = sorted(instances, key=lambda k: k.id) steps = get_migrate_steps(database, infra.migration_stage) if not can_migrate_check_steps(steps, instances, since_step, database_migrate, task, False): return result = steps_for_instances(steps, instances, task, database_migrate.update_step, since_step, step_manager=step_manager) database_migrate = DatabaseMigrate.objects.get(id=database_migrate.id) if result: database = database_migrate.database infra = database.infra migration_stage = infra.migration_stage if infra.in_last_migration_stage: database.environment = database_migrate.environment database.save() infra.environment = database_migrate.environment infra.plan = infra.plan.get_equivalent_plan_for_env( database_migrate.environment) infra.disk_offering_type = infra.disk_offering_type.get_type_to( new_environment) infra.migration_stage = infra.NOT_STARTED infra.save() database_migrate.set_success() task.set_status_success('Database migrated with success') else: database_migrate.set_error() task.set_status_error('Could not migrate database')
def change_parameters_database(self, database, user, task, since_step=0): worker_name = get_worker_name() task = TaskHistory.register(self.request, user, task, worker_name) infra = database.infra plan = infra.plan class_path = plan.replication_topology.class_path from physical.models import DatabaseInfraParameter changed_parameters = DatabaseInfraParameter.get_databaseinfra_changed_parameters( databaseinfra=infra, ) all_dinamic = True custom_procedure = None for changed_parameter in changed_parameters: if changed_parameter.parameter.dynamic is False: all_dinamic = False break for changed_parameter in changed_parameters: if changed_parameter.parameter.custom_method: custom_procedure = changed_parameter.parameter.custom_method break steps = get_database_change_parameter_setting( class_path, all_dinamic, custom_procedure) LOG.info(steps) task.add_detail("Changed parameters:", level=0) for changed_parameter in changed_parameters: msg = "{}: old value: [{}], new value: [{}]".format( changed_parameter.parameter.name, changed_parameter.current_value, changed_parameter.value ) task.add_detail(msg, level=1) task.add_detail("", level=0) if since_step > 0: steps_dec = get_database_change_parameter_retry_steps_count( class_path, all_dinamic, custom_procedure) LOG.info('since_step: {}, steps_dec: {}'.format(since_step, steps_dec)) since_step = since_step - steps_dec if since_step < 0: since_step = 0 database_change_parameter = DatabaseChangeParameter() database_change_parameter.database = database database_change_parameter.task = task database_change_parameter.save() instances_to_change_parameters = infra.get_driver().get_database_instances() success = steps_for_instances( steps, instances_to_change_parameters, task, database_change_parameter.update_step, since_step ) if success: database_change_parameter.set_success() task.update_status_for(TaskHistory.STATUS_SUCCESS, 'Done') else: database_change_parameter.set_error() task.update_status_for( TaskHistory.STATUS_ERROR, 'Could not do change the database parameters.\nChange parameters doesn\'t have rollback' )
def region_migration_start(self, infra, instances, since_step=None): steps = [{ 'Disable monitoring and alarms': ( 'workflow.steps.util.zabbix.DestroyAlarms', 'workflow.steps.util.db_monitor.DisableMonitoring', )}, { 'Stopping infra': ( 'workflow.steps.util.database.Stop', 'workflow.steps.util.database.CheckIsDown', )}, { 'Creating new virtual machine': ( 'workflow.steps.util.vm.MigrationCreateNewVM', )}, { 'Creating new infra': ( 'workflow.steps.util.vm.MigrationWaitingBeReady', 'workflow.steps.util.infra.MigrationCreateInstance', 'workflow.steps.util.disk.MigrationCreateExport', )}, { 'Configuring new infra': ( 'workflow.steps.util.volume_provider.MountDataVolume', 'workflow.steps.util.plan.InitializationMigration', 'workflow.steps.util.plan.ConfigureMigration', 'workflow.steps.util.metric_collector.ConfigureTelegraf', )}, { 'Preparing new environment': ( 'workflow.steps.util.disk.AddDiskPermissionsOldest', 'workflow.steps.util.disk.MountOldestExportMigration', 'workflow.steps.util.disk.CopyDataBetweenExportsMigration', 'workflow.steps.util.disk.FilePermissionsMigration', 'workflow.steps.util.disk.UnmountNewerExportMigration', 'workflow.steps.util.vm.ChangeInstanceHost', 'workflow.steps.util.vm.UpdateOSDescription', 'workflow.steps.util.infra.OfferingMigration', 'workflow.steps.util.infra.UpdateMigrateEnvironment', 'workflow.steps.util.infra.UpdateMigratePlan', )}, { 'Starting new infra': ( 'workflow.steps.util.database.Start', 'workflow.steps.util.database.CheckIsUp', 'workflow.steps.util.metric_collector.RestartTelegraf', )}, { 'Enabling access': ( 'workflow.steps.util.dns.ChangeEndpoint', 'workflow.steps.util.acl.ReplicateAclsMigration', )}, { 'Destroying old infra': ( 'workflow.steps.util.disk.DisableOldestExportMigration', 'workflow.steps.util.disk.DiskUpdateHost', 'workflow.steps.util.vm.RemoveHostMigration', )}, { 'Enabling monitoring and alarms': ( 'workflow.steps.util.db_monitor.EnableMonitoring', 'workflow.steps.util.zabbix.CreateAlarms', )}, { 'Restart replication': ( 'workflow.steps.util.database.SetSlavesMigration', ) }] task = TaskHistory() task.task_id = self.request.id task.task_name = "migrating_zone" task.task_status = TaskHistory.STATUS_RUNNING task.context = {'infra': infra, 'instances': instances} task.arguments = {'infra': infra, 'instances': instances} task.user = '******' task.save() if steps_for_instances(steps, instances, task, since_step=since_step): task.set_status_success('Region migrated with success') else: task.set_status_error('Could not migrate region') database = infra.databases.first() database.environment = infra.environment database.save()
def update_ssl(self, database, task, since_step=None, step_manager=None, scheduled_task=None, auto_rollback=False): from maintenance.models import UpdateSsl task = TaskHistory.register(request=self.request, task_history=task, user=task.user, worker_name=get_worker_name()) if step_manager: step_manager = step_manager step_manager.id = None step_manager.started_at = None since_step = step_manager.current_step else: retry_from = UpdateSsl.objects.filter(can_do_retry=True, database=database, status=UpdateSsl.ERROR).last() step_manager = UpdateSsl() if retry_from: step_manager.current_step = retry_from.current_step since_step = retry_from.current_step step_manager.task_schedule = retry_from.task_schedule step_manager.database = database step_manager.task = task if scheduled_task: step_manager.task_schedule = scheduled_task step_manager.set_running() step_manager.save() steps = database.databaseinfra.update_ssl_steps() hosts = [] for instance in database.infra.instances.all(): if instance.hostname not in hosts: hosts.append(instance.hostname) instances = [] for host in hosts: instances.append(host.instances.all()[0]) result = steps_for_instances(steps, instances, task, step_manager.update_step, since_step, step_manager=step_manager) step_manager = UpdateSsl.objects.get(id=step_manager.id) if result: step_manager.set_success() task.set_status_success('SSL Update with success') else: step_manager.set_error() task.set_status_error('Could not update SSL') if auto_rollback: from workflow.workflow import rollback_for_instances_full new_task = task new_task.id = None new_task.details = '' new_task.task_name += '_rollback' new_task.task_status = new_task.STATUS_RUNNING new_task.save() rollback_step_manager = step_manager rollback_step_manager.id = None rollback_step_manager.task_schedule = None rollback_step_manager.can_do_retry = 0 rollback_step_manager.save() result = rollback_for_instances_full( steps, instances, new_task, rollback_step_manager.get_current_step, rollback_step_manager.update_step, ) if result: rollback_step_manager.set_success() task.set_status_success('Rollback SSL Update with success') else: if hasattr(rollback_step_manager, 'cleanup'): rollback_step_manager.cleanup(instances) rollback_step_manager.set_error() task.set_status_error('Could not rollback update SSL')
def region_migration_start(self, infra, instances, since_step=None): steps = [{ 'Disable monitoring and alarms': ( 'workflow.steps.util.zabbix.DestroyAlarms', 'workflow.steps.util.db_monitor.DisableMonitoring', ) }, { 'Stopping infra': ( 'workflow.steps.util.database.Stop', 'workflow.steps.util.database.CheckIsDown', ) }, { 'Creating new virtual machine': ('workflow.steps.util.vm.MigrationCreateNewVM', ) }, { 'Creating new infra': ( 'workflow.steps.util.vm.MigrationWaitingBeReady', 'workflow.steps.util.infra.MigrationCreateInstance', 'workflow.steps.util.disk.MigrationCreateExport', ) }, { 'Configuring new infra': ( 'workflow.steps.util.volume_provider.MountDataVolume', 'workflow.steps.util.plan.InitializationMigration', 'workflow.steps.util.plan.ConfigureMigration', 'workflow.steps.util.plan.ConfigureLog', 'workflow.steps.util.metric_collector.ConfigureTelegraf', ) }, { 'Preparing new environment': ( 'workflow.steps.util.disk.AddDiskPermissionsOldest', 'workflow.steps.util.disk.MountOldestExportMigration', 'workflow.steps.util.disk.CopyDataBetweenExportsMigration', 'workflow.steps.util.disk.FilePermissionsMigration', 'workflow.steps.util.disk.UnmountNewerExportMigration', 'workflow.steps.util.vm.ChangeInstanceHost', 'workflow.steps.util.vm.UpdateOSDescription', 'workflow.steps.util.infra.OfferingMigration', 'workflow.steps.util.infra.UpdateMigrateEnvironment', 'workflow.steps.util.infra.UpdateMigratePlan', ) }, { 'Starting new infra': ( 'workflow.steps.util.database.Start', 'workflow.steps.util.database.CheckIsUp', 'workflow.steps.util.metric_collector.RestartTelegraf', ) }, { 'Enabling access': ( 'workflow.steps.util.dns.ChangeEndpoint', 'workflow.steps.util.acl.ReplicateAclsMigration', ) }, { 'Destroying old infra': ( 'workflow.steps.util.disk.DisableOldestExportMigration', 'workflow.steps.util.disk.DiskUpdateHost', 'workflow.steps.util.vm.RemoveHostMigration', ) }, { 'Enabling monitoring and alarms': ( 'workflow.steps.util.db_monitor.EnableMonitoring', 'workflow.steps.util.zabbix.CreateAlarms', ) }, { 'Restart replication': ('workflow.steps.util.database.SetSlavesMigration', ) }] task = TaskHistory() task.task_id = self.request.id task.task_name = "migrating_zone" task.task_status = TaskHistory.STATUS_RUNNING task.context = {'infra': infra, 'instances': instances} task.arguments = {'infra': infra, 'instances': instances} task.user = '******' task.save() if steps_for_instances(steps, instances, task, since_step=since_step): task.set_status_success('Region migrated with success') else: task.set_status_error('Could not migrate region') database = infra.databases.first() database.environment = infra.environment database.save()