Ejemplo n.º 1
0
def database_environment_migrate_rollback(self, migrate, task):
    task = TaskHistory.register(
        request=self.request, task_history=task, user=task.user,
        worker_name=get_worker_name()
    )
    from tasks_database_migrate import rollback_database_environment_migrate
    rollback_database_environment_migrate(migrate, task)
Ejemplo n.º 2
0
def remove_database_old_backups(self):

    worker_name = get_worker_name()
    task_history = TaskHistory.register(request=self.request,
                                        worker_name=worker_name,
                                        user=None)

    backup_retention_days = Configuration.get_by_name_as_int(
        'backup_retention_days')

    LOG.info("Removing backups older than %s days" % (backup_retention_days))

    backup_time_dt = date.today() - timedelta(days=backup_retention_days)
    snapshots = Snapshot.objects.filter(start_at__lte=backup_time_dt,
                                        purge_at__isnull=True,
                                        instance__isnull=False,
                                        snapshopt_id__isnull=False)
    msgs = []
    status = TaskHistory.STATUS_SUCCESS
    if len(snapshots) == 0:
        msgs.append("There is no snapshot to purge")
    for snapshot in snapshots:
        try:
            remove_snapshot_backup(snapshot=snapshot)
            msg = "Backup %s removed" % (snapshot)
            LOG.info(msg)
        except:
            msg = "Error removing backup %s" % (snapshot)
            status = TaskHistory.STATUS_ERROR
            LOG.error(msg)
        msgs.append(msg)

    task_history.update_status_for(status, details="\n".join(msgs))

    return
Ejemplo n.º 3
0
def make_databases_backup(self):

    LOG.info("Making databases backups")
    task_history = TaskHistory.register(request=self.request, user=None)

    msgs = []
    status = TaskHistory.STATUS_SUCCESS
    databaseinfras = DatabaseInfra.objects.filter(plan__provider=Plan.CLOUDSTACK)
    error = {}
    for databaseinfra in databaseinfras:
        instances = Instance.objects.filter(databaseinfra=databaseinfra)
        for instance in instances:
            
            if not instance.databaseinfra.get_driver().check_instance_is_eligible_for_backup(instance):
                LOG.info('Instance %s is not eligible for backup' % (str(instance)))
                continue

            try:
                if make_instance_snapshot_backup(instance = instance, error = error):
                    msg = "Backup for %s was successful" % (str(instance))
                    LOG.info(msg)
                else:
                    status = TaskHistory.STATUS_ERROR
                    msg = "Backup for %s was unsuccessful. Error: %s" % (str(instance), error['errormsg'])
                    LOG.error(msg)
                print msg
            except Exception, e:
                status = TaskHistory.STATUS_ERROR
                msg = "Backup for %s was unsuccessful. Error: %s" % (str(instance), str(e))
                LOG.error(msg)

            msgs.append(msg)
Ejemplo n.º 4
0
def purge_quarantine(self,):
    user = AccountUser.objects.get(username='******')
    AuditRequest.new_request("purge_quarantine", user, "localhost")
    try:

        task_history = TaskHistory.register(request=self.request, user=user)

        LOG.info("id: %s | task: %s | kwargs: %s | args: %s" % (
            self.request.id, self.request.task, self.request.kwargs, str(self.request.args)))
        quarantine_time = Configuration.get_by_name_as_int('quarantine_retention_days')
        quarantine_time_dt = date.today() - timedelta(days=quarantine_time)

        databases = Database.objects.filter(is_in_quarantine=True,
                                            quarantine_dt__lte=quarantine_time_dt)

        for database in databases:
            if database.plan.provider == database.plan.CLOUDSTACK:
                databaseinfra = database.databaseinfra

                destroy_infra(databaseinfra=databaseinfra, task=task_history)
            else:
                database.delete()

            LOG.info("The database %s was deleted, because it was set to quarentine %d days ago" % (
                database.name, quarantine_time))

        task_history.update_status_for(TaskHistory.STATUS_SUCCESS, details='Databases destroyed successfully')
        return

    except Exception:
        task_history.update_status_for(TaskHistory.STATUS_ERROR, details="Error")
        return
    finally:
        AuditRequest.cleanup_request()
Ejemplo n.º 5
0
def remove_database_old_backups(self):

    task_history = TaskHistory.register(request=self.request, user=None)

    backup_retention_days = Configuration.get_by_name_as_int('backup_retention_days')

    LOG.info("Removing backups older than %s days" % (backup_retention_days))

    backup_time_dt = date.today() - timedelta(days=backup_retention_days)
    snapshots = Snapshot.objects.filter(start_at__lte=backup_time_dt, purge_at__isnull = True, instance__isnull = False, snapshopt_id__isnull = False)
    msgs = []
    status = TaskHistory.STATUS_SUCCESS
    if len(snapshots) == 0:
        msgs.append("There is no snapshot to purge")
    for snapshot in snapshots:
        try:
            remove_snapshot_backup(snapshot=snapshot)
            msg = "Backup %s removed" % (snapshot)
            LOG.info(msg)
        except:
            msg = "Error removing backup %s" % (snapshot)
            status = TaskHistory.STATUS_ERROR
            LOG.error(msg)
        msgs.append(msg)

    task_history.update_status_for(status, details="\n".join(msgs))

    return
Ejemplo n.º 6
0
def database_notification(self):
    LOG.info("retrieving all teams and sending database notification")
    teams = Team.objects.all()
    msgs = {}

    for team in teams:
        ###############################################
        # create task
        ###############################################

        msgs[team] = analyzing_notification_for_team(team=team)
        ###############################################

    try:
        LOG.info("Messages: ")
        LOG.info(msgs)

        worker_name = get_worker_name()
        task_history = TaskHistory.register(request=self.request,
                                            user=None,
                                            worker_name=worker_name)
        task_history.update_status_for(TaskHistory.STATUS_SUCCESS,
                                       details="\n".join(
                                           (str(key) + ': ' + ', '.join(value)
                                            for key, value in msgs.items())))
    except Exception as e:
        task_history.update_status_for(TaskHistory.STATUS_ERROR, details=e)

    return
Ejemplo n.º 7
0
def node_zone_migrate_rollback(self, migrate, task):
    task = TaskHistory.register(request=self.request,
                                task_history=task,
                                user=task.user,
                                worker_name=get_worker_name())
    from tasks_migrate import rollback_node_zone_migrate
    rollback_node_zone_migrate(migrate, task)
Ejemplo n.º 8
0
def node_zone_migrate_rollback(self, migrate, task):
    task = TaskHistory.register(
        request=self.request, task_history=task, user=task.user,
        worker_name=get_worker_name()
    )
    from tasks_migrate import rollback_node_zone_migrate
    rollback_node_zone_migrate(migrate, task)
Ejemplo n.º 9
0
def remove_database_old_backups(self):
    worker_name = get_worker_name()
    task_history = TaskHistory.register(
        request=self.request, worker_name=worker_name, user=None
    )
    task_history.relevance = TaskHistory.RELEVANCE_WARNING

    snapshots = []
    for env in Environment.objects.all():
        snapshots += get_snapshots_by_env(env)

    msgs = []
    status = TaskHistory.STATUS_SUCCESS
    if len(snapshots) == 0:
        msgs.append("There is no snapshot to purge")

    for snapshot in snapshots:
        try:
            remove_snapshot_backup(snapshot=snapshot, msgs=msgs)
        except Exception as e:
            msg = "Error removing backup {}. Error: {}".format(snapshot, e)
            status = TaskHistory.STATUS_ERROR
            LOG.error(msg)
            msgs.append(msg)
    task_history.update_status_for(status, details="\n".join(msgs))
    return
Ejemplo n.º 10
0
def database_environment_migrate_rollback(self, migrate, task):
    task = TaskHistory.register(request=self.request,
                                task_history=task,
                                user=task.user,
                                worker_name=get_worker_name())
    from tasks_database_migrate import rollback_database_environment_migrate
    rollback_database_environment_migrate(migrate, task)
def database_notification(self):
    LOG.info("retrieving all teams and sending database notification")
    teams = Team.objects.all()
    msgs = {}

    for team in teams:
        ###############################################
        # create task
        ###############################################

        msgs[team] = analyzing_notification_for_team(team=team)
        ###############################################

    try:
        LOG.info("Messages: ")
        LOG.info(msgs)

        worker_name = get_worker_name()
        task_history = TaskHistory.register(
            request=self.request, user=None, worker_name=worker_name)
        task_history.update_status_for(TaskHistory.STATUS_SUCCESS, details="\n".join(
            str(key) + ': ' + ', '.join(value) for key, value in msgs.items()))
    except Exception as e:
        task_history.update_status_for(TaskHistory.STATUS_ERROR, details=e)

    return
Ejemplo n.º 12
0
def remove_database_old_backups(self):
    worker_name = get_worker_name()
    task_history = TaskHistory.register(
        request=self.request, worker_name=worker_name, user=None
    )
    task_history.relevance = TaskHistory.RELEVANCE_WARNING

    snapshots = []
    msgs = []
    for env in Environment.objects.all():
        try:
            snapshots += get_snapshots_by_env(env)
        except GetCredentialException as ex:
            status = TaskHistory.STATUS_ERROR
            LOG.error(str(ex))
            msgs.append(str(ex))
            task_history.update_status_for(
                status, details="\n".join(msgs))
            return

    status = TaskHistory.STATUS_SUCCESS
    if len(snapshots) == 0:
        msgs.append("There is no snapshot to purge")

    for snapshot in snapshots:
        try:
            remove_snapshot_backup(snapshot=snapshot, msgs=msgs)
        except Exception as e:
            msg = "Error removing backup {}. Error: {}".format(snapshot, e)
            status = TaskHistory.STATUS_ERROR
            LOG.error(msg)
            msgs.append(msg)
    task_history.update_status_for(status, details="\n".join(msgs))
    return
Ejemplo n.º 13
0
def _create_database_rollback(self, rollback_from, task, user):
    task = TaskHistory.register(
        request=self.request, task_history=task, user=user,
        worker_name=get_worker_name()
    )

    from tasks_create_database import rollback_create
    rollback_create(rollback_from, task, user)
Ejemplo n.º 14
0
def _create_database_rollback(self, rollback_from, task, user):
    task = TaskHistory.register(request=self.request,
                                task_history=task,
                                user=user,
                                worker_name=get_worker_name())

    from tasks_create_database import rollback_create
    rollback_create(rollback_from, task, user)
def node_zone_migrate(
    self, host, zone, new_environment, task, since_step=None, step_manager=None
):
    task = TaskHistory.register(
        request=self.request, task_history=task, user=task.user,
        worker_name=get_worker_name()
    )
    from tasks_migrate import node_zone_migrate
    node_zone_migrate(host, zone, new_environment, task, since_step, step_manager=step_manager)
Ejemplo n.º 16
0
def restore_snapshot(self, database, snapshot, user, task_history):
    from dbaas_nfsaas.models import HostAttr
    LOG.info("Restoring snapshot")
    worker_name = get_worker_name()

    task_history = models.TaskHistory.objects.get(id=task_history)
    task_history = TaskHistory.register(request=self.request, task_history=task_history,
                                        user=user, worker_name=worker_name)

    databaseinfra = database.databaseinfra

    snapshot = Snapshot.objects.get(id=snapshot)
    snapshot_id = snapshot.snapshopt_id

    host_attr = HostAttr.objects.get(nfsaas_path=snapshot.export_path)
    host = host_attr.host
    host_attr = HostAttr.objects.get(host=host, is_active=True)

    export_id = host_attr.nfsaas_export_id
    export_path = host_attr.nfsaas_path

    steps = RESTORE_SNAPSHOT_SINGLE

    if databaseinfra.plan.is_ha and databaseinfra.engine_name == 'mysql':
        steps = RESTORE_SNAPSHOT_MYSQL_HA

    not_primary_instances = databaseinfra.instances.exclude(hostname=host).exclude(instance_type__in=[Instance.MONGODB_ARBITER,
                                                                                                      Instance.REDIS_SENTINEL])
    not_primary_hosts = [
        instance.hostname for instance in not_primary_instances]

    workflow_dict = build_dict(databaseinfra=databaseinfra,
                               database=database,
                               snapshot_id=snapshot_id,
                               export_path=export_path,
                               export_id=export_id,
                               host=host,
                               steps=steps,
                               not_primary_hosts=not_primary_hosts,
                               )

    start_workflow(workflow_dict=workflow_dict, task=task_history)

    if workflow_dict['exceptions']['traceback']:
        error = "\n".join(
            ": ".join(err) for err in workflow_dict['exceptions']['error_codes'])
        traceback = "\nException Traceback\n".join(
            workflow_dict['exceptions']['traceback'])
        error = "{}\n{}\n{}".format(error, traceback, error)
        task_history.update_status_for(
            TaskHistory.STATUS_ERROR, details=error)
    else:
        task_history.update_status_for(
            TaskHistory.STATUS_SUCCESS, details='Database sucessfully recovered!')

    return
Ejemplo n.º 17
0
def restore_snapshot(self, database, snapshot, user, task_history):
    from dbaas_nfsaas.models import HostAttr
    LOG.info("Restoring snapshot")
    worker_name = get_worker_name()

    task_history = models.TaskHistory.objects.get(id= task_history)
    task_history = TaskHistory.register(request=self.request, task_history=task_history,
            user=user, worker_name=worker_name)

    databaseinfra = database.databaseinfra

    snapshot = Snapshot.objects.get(id=snapshot)
    snapshot_id = snapshot.snapshopt_id

    host_attr = HostAttr.objects.get(nfsaas_path=snapshot.export_path)
    host = host_attr.host
    host_attr = HostAttr.objects.get(host=host, is_active=True)

    export_id = host_attr.nfsaas_export_id
    export_path = host_attr.nfsaas_path

    steps = RESTORE_SNAPSHOT_SINGLE

    if databaseinfra.plan.is_ha and databaseinfra.engine_name == 'mysql':
        steps = RESTORE_SNAPSHOT_MYSQL_HA

    not_primary_instances = databaseinfra.instances.exclude(hostname=host).exclude(instance_type__in=[Instance.MONGODB_ARBITER,
                                                                                  Instance.REDIS_SENTINEL])
    not_primary_hosts = [instance.hostname for instance in not_primary_instances]

    workflow_dict = build_dict(databaseinfra=databaseinfra,
                               database=database,
                               snapshot_id=snapshot_id,
                               export_path=export_path,
                               export_id=export_id,
                               host=host,
                               steps=steps,
                               not_primary_hosts=not_primary_hosts,
                               )

    start_workflow(workflow_dict=workflow_dict, task=task_history)

    if workflow_dict['exceptions']['traceback']:
        error = "\n".join(
            ": ".join(err) for err in workflow_dict['exceptions']['error_codes'])
        traceback = "\nException Traceback\n".join(
            workflow_dict['exceptions']['traceback'])
        error = "{}\n{}\n{}".format(error, traceback, error)
        task_history.update_status_for(
            TaskHistory.STATUS_ERROR, details=error)
    else:
        task_history.update_status_for(
            TaskHistory.STATUS_SUCCESS, details='Database sucessfully recovered!')

    return
Ejemplo n.º 18
0
def restore_database(self, database, task, snapshot, user, retry_from=None):
    task = TaskHistory.register(
        request=self.request, task_history=task, user=user,
        worker_name=get_worker_name()
    )

    from backup.models import Snapshot
    snapshot = Snapshot.objects.get(id=snapshot)

    from tasks_restore_backup import restore_snapshot
    restore_snapshot(database, snapshot.group, task, retry_from)
Ejemplo n.º 19
0
def restore_database(self, database, task, snapshot, user, retry_from=None):
    task = TaskHistory.register(request=self.request,
                                task_history=task,
                                user=user,
                                worker_name=get_worker_name())

    from backup.models import Snapshot
    snapshot = Snapshot.objects.get(id=snapshot)

    from tasks_restore_backup import restore_snapshot
    restore_snapshot(database, snapshot.group, task, retry_from)
def database_environment_migrate(
    self, database, new_environment, new_offering, task, hosts_zones,
    since_step=None, step_manager=None
):
    task = TaskHistory.register(
        request=self.request, task_history=task, user=task.user,
        worker_name=get_worker_name()
    )
    from tasks_database_migrate import database_environment_migrate
    database_environment_migrate(
        database, new_environment, new_offering, task, hosts_zones, since_step,
        step_manager=step_manager
    )
Ejemplo n.º 21
0
def purge_unused_exports_task(self):
    from notification.tasks import TaskRegister
    task = TaskRegister.purge_unused_exports()

    task = TaskHistory.register(
        request=self.request, worker_name=get_worker_name(), task_history=task
    )

    task.add_detail('Getting all inactive exports without snapshots')
    if purge_unused_exports(task):
        task.set_status_success('Done')
    else:
        task.set_status_error('Error')
Ejemplo n.º 22
0
def set_celery_healthcheck_last_update(self):
    try:
        worker_name = get_worker_name()
        task_history = TaskHistory.register(request=self.request, user=None,
            worker_name=worker_name)

        LOG.info("Setting Celery healthcheck last update")
        CeleryHealthCheck.set_last_update()

        task_history.update_status_for(TaskHistory.STATUS_SUCCESS, details="Finished")
    except Exception, e:
        LOG.warn("Oopss...{}".format(e))
        task_history.update_status_for(TaskHistory.STATUS_ERROR, details=e)
Ejemplo n.º 23
0
def purge_unused_exports_task(self):
    from notification.tasks import TaskRegister
    task = TaskRegister.purge_unused_exports()

    task = TaskHistory.register(
        request=self.request, worker_name=get_worker_name(), task_history=task
    )

    task.add_detail('Getting all inactive exports without snapshots')
    if purge_unused_exports(task):
        task.set_status_success('Done')
    else:
        task.set_status_error('Error')
Ejemplo n.º 24
0
def make_databases_backup(self):

    LOG.info("Making databases backups")
    worker_name = get_worker_name()
    task_history = TaskHistory.register(request=self.request,
                                        worker_name=worker_name,
                                        user=None)

    msgs = []
    status = TaskHistory.STATUS_SUCCESS
    databaseinfras = DatabaseInfra.objects.filter(
        plan__provider=Plan.CLOUDSTACK)
    error = {}
    for databaseinfra in databaseinfras:
        instances = Instance.objects.filter(databaseinfra=databaseinfra)
        for instance in instances:

            try:
                if not instance.databaseinfra.get_driver(
                ).check_instance_is_eligible_for_backup(instance):
                    LOG.info('Instance %s is not eligible for backup' %
                             (str(instance)))
                    continue
            except Exception as e:
                status = TaskHistory.STATUS_ERROR
                msg = "Backup for %s was unsuccessful. Error: %s" % (
                    str(instance), str(e))
                LOG.error(msg)
            else:
                try:
                    if make_instance_snapshot_backup(instance=instance,
                                                     error=error):
                        msg = "Backup for %s was successful" % (str(instance))
                        LOG.info(msg)
                    else:
                        status = TaskHistory.STATUS_ERROR
                        msg = "Backup for %s was unsuccessful. Error: %s" % (
                            str(instance), error['errormsg'])
                        LOG.error(msg)
                    LOG.info(msg)
                except Exception as e:
                    status = TaskHistory.STATUS_ERROR
                    msg = "Backup for %s was unsuccessful. Error: %s" % (
                        str(instance), str(e))
                    LOG.error(msg)

            msgs.append(msg)

    task_history.update_status_for(status, details="\n".join(msgs))

    return
def create_database(
    self, name, plan, environment, team, project, description, task,
    subscribe_to_email_events=True, is_protected=False, user=None,
    retry_from=None
):
    task = TaskHistory.register(
        request=self.request, task_history=task, user=user,
        worker_name=get_worker_name()
    )

    from tasks_create_database import create_database
    create_database(
        name, plan, environment, team, project, description, task,
        subscribe_to_email_events, is_protected, user, retry_from
    )
Ejemplo n.º 26
0
def update_ssl(self):
    from account.models import User
    from notification.tasks import TaskRegister
    from logical.models import Database
    LOG.info("Updating ssl certificates")
    worker_name = get_worker_name()
    user = User.objects.get(username="******")
    task_history = TaskHistory.register(request=self.request,
                                        worker_name=worker_name,
                                        user=None)
    task_history.relevance = TaskHistory.RELEVANCE_ERROR
    for db in Database.objects.filter(
            databaseinfra__ssl_expire_at__gte=(datetime.date.now() -
                                               timedelta(days=30))):
        TaskRegister.update_ssl(db, user)
Ejemplo n.º 27
0
def analyze_databases(self, task_history=None):
    endpoint, healh_check_route, healh_check_string = get_analyzing_credentials()
    user = User.objects.get(username='******')
    worker_name = get_worker_name()
    task_history = TaskHistory.register(task_history=task_history, request=self.request, user=user,
                                        worker_name=worker_name)
    task_history.update_details(persist=True, details="Loading Process...")
    AuditRequest.new_request("analyze_databases", user, "localhost")

    try:
        analyze_service = AnalyzeService(endpoint, healh_check_route,
                                         healh_check_string)
        with transaction.atomic():
            databases = Database.objects.filter(is_in_quarantine=False)
            today = datetime.now()
            for database in databases:
                database_name, engine, instances, environment_name, databaseinfra_name = setup_database_info(database)
                for execution_plan in ExecutionPlan.objects.all():
                    if database_can_not_be_resized(database, execution_plan):
                        continue
                    params = execution_plan.setup_execution_params()
                    result = analyze_service.run(engine=engine, database=database_name,
                                                 instances=instances, **params)
                    if result['status'] == 'success':
                        task_history.update_details(persist=True, details="\nDatabase {} {} was analised.".format(database, execution_plan.plan_name))
                        if result['msg'] != instances:
                            continue
                        for instance in result['msg']:
                            insert_analyze_repository_record(today, database_name, instance,
                                                             engine, databaseinfra_name,
                                                             environment_name,
                                                             execution_plan)
                    else:
                        raise Exception("Check your service logs..")
        task_history.update_status_for(TaskHistory.STATUS_SUCCESS,
                                       details='Analisys ok!')
    except Exception:
        try:
            task_history.update_details(persist=True,
                                        details="\nDatabase {} {} could not be analised.".format(database,
                                                                                                 execution_plan.plan_name))
            task_history.update_status_for(TaskHistory.STATUS_ERROR,
                                           details='Analisys finished with errors!\nError: {}'.format(result['msg']))
        except UnboundLocalError:
            task_history.update_details(persist=True, details="\nProccess crashed")
            task_history.update_status_for(TaskHistory.STATUS_ERROR, details='Analisys could not be started')
    finally:
        AuditRequest.cleanup_request()
Ejemplo n.º 28
0
def remove_database_backup(self, task, snapshot):
    worker_name = get_worker_name()
    task_history = TaskHistory.register(
        request=self.request, worker_name=worker_name, task_history=task
    )

    task_history.add_detail('Removing {}'.format(snapshot))
    try:
        remove_snapshot_backup(snapshot, force=1)
    except Exception as e:
        task_history.add_detail('Error: {}'.format(e))
        task.set_status_error('Could not delete backup')
        return False
    else:
        task.set_status_success('Backup deleted with success')
        return True
Ejemplo n.º 29
0
def remove_database_backup(self, task, snapshot):
    worker_name = get_worker_name()
    task_history = TaskHistory.register(
        request=self.request, worker_name=worker_name, task_history=task
    )

    task_history.add_detail('Removing {}'.format(snapshot))
    try:
        remove_snapshot_backup(snapshot, force=1)
    except Exception as e:
        task_history.add_detail('Error: {}'.format(e))
        task.set_status_error('Could not delete backup')
        return False
    else:
        task.set_status_success('Backup deleted with success')
        return True
Ejemplo n.º 30
0
def update_ssl(self,
               database,
               task,
               since_step=None,
               step_manager=None,
               scheduled_task=None):
    from maintenance.models import UpdateSsl
    task = TaskHistory.register(request=self.request,
                                task_history=task,
                                user=task.user,
                                worker_name=get_worker_name())
    if step_manager:
        step_manager = step_manager
        step_manager.id = None
        step_manager.started_at = None
        since_step = step_manager.current_step
    else:
        retry_from = UpdateSsl.objects.filter(can_do_retry=True,
                                              database=database,
                                              status=UpdateSsl.ERROR).last()
        step_manager = UpdateSsl()
        if retry_from:
            step_manager.current_step = retry_from.current_step
            since_step = retry_from.current_step
            step_manager.task_schedule = retry_from.task_schedule
    step_manager.database = database
    step_manager.task = task
    if scheduled_task:
        step_manager.task_schedule = scheduled_task
    step_manager.set_running()
    step_manager.save()

    steps = database.databaseinfra.update_ssl_steps()
    instances = database.infra.get_driver().get_database_instances()
    result = steps_for_instances(steps,
                                 instances,
                                 task,
                                 step_manager.update_step,
                                 since_step,
                                 step_manager=step_manager)
    step_manager = UpdateSsl.objects.get(id=step_manager.id)
    if result:
        step_manager.set_success()
        task.set_status_success('SSL Update with success')
    else:
        step_manager.set_error()
        task.set_status_error('Could not update SSL')
Ejemplo n.º 31
0
def purge_quarantine(self,):
    user = AccountUser.objects.get(username='******')
    AuditRequest.new_request("purge_quarantine", user, "localhost")

    try:
        task_history = TaskHistory.register(request=self.request, user=user)
        task_history.relevance = TaskHistory.RELEVANCE_WARNING

        LOG.info(
            "id: {} | task: {} | kwargs: {} | args: {}".format(
                self.request.id, self.request.task,
                self.request.kwargs, str(self.request.args)
            )
        )

        quarantine_time = Configuration.get_by_name_as_int(
            'quarantine_retention_days'
        )
        quarantine_time_dt = date.today() - timedelta(days=quarantine_time)
        task_history.add_detail(
            "Quarantine date older than {}".format(quarantine_time_dt)
        )

        databases = Database.objects.filter(
            is_in_quarantine=True, quarantine_dt__lte=quarantine_time_dt
        )
        task_history.add_detail(
            "Databases to purge: {}".format(len(databases))
        )

        for database in databases:
            task_history.add_detail('Deleting {}...'.format(database), level=2)
            database.destroy(user)

        task_history.update_status_for(
            TaskHistory.STATUS_SUCCESS,
            details='Listed databases were destroyed successfully.'
        )
        return

    except Exception as e:
        task_history.update_status_for(
            TaskHistory.STATUS_ERROR, details="Error\n{}".format(e))
        return
    finally:
        AuditRequest.cleanup_request()
Ejemplo n.º 32
0
def make_database_backup(self, database, task):
    worker_name = get_worker_name()
    task_history = TaskHistory.register(
        request=self.request, worker_name=worker_name, task_history=task
    )

    if not database.pin_task(task):
        task.error_in_lock(database)
        return False

    task_history.add_detail('Starting database {} backup'.format(database))

    instances = _get_backup_instance(database, task)
    if not instances:
        task.set_status_error('Could not find eligible instances', database)
        return False

    _check_snapshot_limit(instances, task)

    group = BackupGroup()
    group.save()

    has_warning = False
    for instance in instances:
        snapshot = _create_database_backup(instance, task, group)

        if not snapshot:
            task.set_status_error(
                'Backup was unsuccessful in {}'.format(instance), database
            )
            return False

        snapshot.is_automatic = False
        snapshot.save()

        if not has_warning:
            has_warning = snapshot.has_warning

    if has_warning:
        task.set_status_warning('Backup was warning', database)
    else:
        task.set_status_success('Backup was successful', database)

    return True
Ejemplo n.º 33
0
def make_database_backup(self, database, task):
    worker_name = get_worker_name()
    task_history = TaskHistory.register(
        request=self.request, worker_name=worker_name, task_history=task
    )

    if not database.pin_task(task):
        task.error_in_lock(database)
        return False

    task_history.add_detail('Starting database {} backup'.format(database))

    instances = _get_backup_instance(database, task)
    if not instances:
        task.set_status_error('Could not find eligible instances', database)
        return False

    _check_snapshot_limit(instances, task)

    group = BackupGroup()
    group.save()

    has_warning = False
    for instance in instances:
        snapshot = _create_database_backup(instance, task, group)

        if not snapshot:
            task.set_status_error(
                'Backup was unsuccessful in {}'.format(instance), database
            )
            return False

        snapshot.is_automatic = False
        snapshot.save()

        if not has_warning:
            has_warning = snapshot.has_warning

    if has_warning:
        task.set_status_warning('Backup was warning', database)
    else:
        task.set_status_success('Backup was successful', database)

    return True
Ejemplo n.º 34
0
def recreate_slave(self,
                   database,
                   host,
                   task,
                   since_step=None,
                   step_manager=None):
    from maintenance.models import RecreateSlave
    task = TaskHistory.register(request=self.request,
                                task_history=task,
                                user=task.user,
                                worker_name=get_worker_name())
    instance = host.instances.first()
    if step_manager:
        step_manager = step_manager
        step_manager.id = None
        step_manager.started_at = None
        since_step = step_manager.current_step
    else:
        retry_from = RecreateSlave.objects.filter(
            can_do_retry=True, host=host, status=RecreateSlave.ERROR).last()
        step_manager = RecreateSlave()
        if retry_from:
            step_manager.current_step = retry_from.current_step
            step_manager.snapshot = retry_from.snapshot
            since_step = retry_from.current_step
    step_manager.host = instance.hostname
    step_manager.task = task
    step_manager.save()

    steps = host.instances.first().databaseinfra.recreate_slave_steps()
    result = steps_for_instances(steps, [instance],
                                 task,
                                 step_manager.update_step,
                                 since_step,
                                 step_manager=step_manager)
    step_manager = RecreateSlave.objects.get(id=step_manager.id)
    if result:
        step_manager.set_success()
        task.set_status_success('Slave recreated with success')
    else:
        step_manager.set_error()
        task.set_status_error('Could not recreate slave')
Ejemplo n.º 35
0
def purge_quarantine(self, ):
    user = AccountUser.objects.get(username='******')
    AuditRequest.new_request("purge_quarantine", user, "localhost")

    try:
        task_history = TaskHistory.register(request=self.request, user=user)
        task_history.relevance = TaskHistory.RELEVANCE_WARNING

        LOG.info("id: {} | task: {} | kwargs: {} | args: {}".format(
            self.request.id, self.request.task, self.request.kwargs,
            str(self.request.args)))

        quarantine_time = Configuration.get_by_name_as_int(
            'quarantine_retention_days')
        quarantine_time_dt = date.today() - timedelta(days=quarantine_time)
        task_history.add_detail(
            "Quarantine date older than {}".format(quarantine_time_dt))

        databases = Database.objects.filter(
            is_in_quarantine=True, quarantine_dt__lte=quarantine_time_dt)
        task_history.add_detail("Databases to purge: {}".format(
            len(databases)))

        for database in databases:
            task_history.add_detail('Deleting {}...'.format(database), level=2)
            database.destroy(user)

        task_history.update_status_for(
            TaskHistory.STATUS_SUCCESS,
            details='Listed databases were destroyed successfully.')
        return

    except Exception as e:
        task_history.update_status_for(TaskHistory.STATUS_ERROR,
                                       details="Error\n{}".format(e))
        return
    finally:
        AuditRequest.cleanup_request()
def check_ssl_expire_at(self):
    LOG.info("Retrieving all SSL MySQL databases")
    worker_name = get_worker_name()
    task = TaskHistory.register(
        request=self.request, user=None, worker_name=worker_name)
    task.relevance = TaskHistory.RELEVANCE_CRITICAL

    one_month_later = date.today() + timedelta(days=30)
    try:
        infras = DatabaseInfra.objects.filter(
            ssl_configured=True,
            engine__engine_type__name='mysql',
            instances__hostname__ssl_expire_at__lte=one_month_later
        ).distinct()
        for infra in infras:
            database = infra.databases.first()
            task.update_details(
                "Checking database {}...".format(database), persist=True
            )
            scheudled_tasks = TaskSchedule.objects.filter(
                scheduled_for__lte=one_month_later,
                status=TaskSchedule.SCHEDULED,
                database=database
            )
            if scheudled_tasks:
                task.update_details("Already scheduled!\n", persist=True)
            else:
                TaskSchedule.objects.create(
                    method_path='ddd',
                    scheduled_for=one_month_later,
                    database=database
                )
                task.update_details("Schedule created!\n", persist=True)
        task.update_status_for(TaskHistory.STATUS_SUCCESS, details="\nDone")
    except Exception as err:
        task.update_status_for(TaskHistory.STATUS_ERROR, details=err)
        return
Ejemplo n.º 37
0
def purge_quarantine(self,):
    user = AccountUser.objects.get(username='******')
    AuditRequest.new_request("purge_quarantine", user, "localhost")
    try:

        task_history = TaskHistory.register(request=self.request, user=user)

        LOG.info("id: %s | task: %s | kwargs: %s | args: %s" % (
            self.request.id, self.request.task, self.request.kwargs, str(self.request.args)))
        quarantine_time = Configuration.get_by_name_as_int(
            'quarantine_retention_days')
        quarantine_time_dt = date.today() - timedelta(days=quarantine_time)

        databases = Database.objects.filter(is_in_quarantine=True,
                                            quarantine_dt__lte=quarantine_time_dt)

        for database in databases:
            if database.plan.provider == database.plan.CLOUDSTACK:
                databaseinfra = database.databaseinfra

                destroy_infra(databaseinfra=databaseinfra, task=task_history)
            else:
                database.delete()

            LOG.info("The database %s was deleted, because it was set to quarentine %d days ago" % (
                database.name, quarantine_time))

        task_history.update_status_for(
            TaskHistory.STATUS_SUCCESS, details='Databases destroyed successfully')
        return

    except Exception:
        task_history.update_status_for(
            TaskHistory.STATUS_ERROR, details="Error")
        return
    finally:
        AuditRequest.cleanup_request()
Ejemplo n.º 38
0
def execute_scheduled_maintenance(self, maintenance_id):
    LOG.debug("Maintenance id: {}".format(maintenance_id))
    maintenance = models.Maintenance.objects.get(id=maintenance_id)
    models.Maintenance.objects.filter(id=maintenance_id).update(
        status=maintenance.RUNNING, started_at=datetime.now())
    LOG.info("Maintenance {} is RUNNING".format(maintenance, ))

    worker_name = get_worker_name()
    task_history = TaskHistory.register(request=self.request,
                                        worker_name=worker_name)
    task_history.relevance = TaskHistory.RELEVANCE_CRITICAL
    LOG.info("id: {} | task: {} | kwargs: {} | args: {}".format(
        self.request.id, self.request.task, self.request.kwargs,
        str(self.request.args)))
    task_history.update_details(
        persist=True, details="Executing Maintenance: {}".format(maintenance))
    for hm in models.HostMaintenance.objects.filter(maintenance=maintenance):
        # main_output = {}
        hm.status = hm.RUNNING
        hm.started_at = datetime.now()
        hm.save()
        if hm.host is None:
            hm.status = hm.UNAVAILABLEHOST
            hm.finished_at = datetime.now()
            hm.save()
            continue

        host = hm.host
        update_task = "\nRunning Maintenance on {}".format(host)

        if maintenance.disable_alarms:
            disable_alarms(hm.host)

        param_dict = {}
        params = models.MaintenanceParameters.objects.filter(
            maintenance=maintenance)
        for param in params:
            param_function = get_function(param.function_name)
            param_dict[param.parameter_name] = param_function(host.id)

        main_script = build_context_script(param_dict, maintenance.main_script)
        main_output = host.ssh.run_script(script=main_script,
                                          raise_if_error=False)

        if main_output['exit_code'] == 0:
            hm.status = hm.SUCCESS
        else:
            if maintenance.rollback_script:
                hm.status = hm.ROLLBACK
                hm.save()

                rollback_script = build_context_script(
                    param_dict, maintenance.rollback_script)
                rollback_output = host.ssh.run_script(script=rollback_script,
                                                      raise_if_error=False)

                if rollback_output['exit_code'] == 0:
                    hm.status = hm.ROLLBACK_SUCCESS
                else:
                    hm.status = hm.ROLLBACK_ERROR

                hm.rollback_log = get_dict_lines(rollback_output)

            else:
                hm.status = hm.ERROR

        if maintenance.disable_alarms:
            enable_alarms(hm.host)

        update_task += "...status: {}".format(hm.status)

        task_history.update_details(persist=True, details=update_task)

        hm.main_log = get_dict_lines(main_output)
        hm.finished_at = datetime.now()
        hm.save()

    models.Maintenance.objects.filter(id=maintenance_id).update(
        status=maintenance.FINISHED, finished_at=datetime.now())
    task_history.update_status_for(TaskHistory.STATUS_SUCCESS,
                                   details='Maintenance executed succesfully')
    LOG.info("Maintenance: {} has FINISHED".format(maintenance))
Ejemplo n.º 39
0
def execute_database_region_migration_undo(self,
                                           database_region_migration_detail_id,
                                           task_history=None,
                                           user=None):
    AuditRequest.new_request("execute_database_region_migration", user,
                             "localhost")
    try:

        if task_history:
            arguments = task_history.arguments
        else:
            arguments = None

        task_history = TaskHistory.register(request=self.request,
                                            task_history=task_history,
                                            user=user,
                                            worker_name=get_worker_name())

        if arguments:
            task_history.arguments = arguments
            task_history.save()

        database_region_migration_detail = DatabaseRegionMigrationDetail.objects.get(
            id=database_region_migration_detail_id)

        database_region_migration_detail.started_at = datetime.now()
        database_region_migration_detail.status = database_region_migration_detail.RUNNING
        database_region_migration_detail.is_migration_up = False
        database_region_migration_detail.save()

        database_region_migration = database_region_migration_detail.database_region_migration
        database = database_region_migration.database
        databaseinfra = database.databaseinfra
        source_environment = databaseinfra.environment
        target_environment = source_environment.equivalent_environment
        engine = database.engine_type
        steps = get_engine_steps(engine)
        workflow_steps = steps[
            database_region_migration_detail.step].step_classes
        source_instances = []
        source_hosts = []
        for instance in databaseinfra.instances.filter(
                future_instance__isnull=False):
            source_instances.append(instance)
            if instance.instance_type != instance.REDIS:
                source_hosts.append(instance.hostname)

        target_instances = []
        target_hosts = []
        for instance in databaseinfra.instances.filter(
                future_instance__isnull=True):
            target_instances.append(instance)
            if instance.instance_type != instance.REDIS:
                target_hosts.append(instance.hostname)

        source_plan = databaseinfra.plan
        target_plan = source_plan.equivalent_plan_id

        if not source_hosts:
            raise Exception('There is no source host')
        if not source_instances:
            raise Exception('There is no source instance')
        if not target_hosts:
            raise Exception('There is no target host')
        if not target_instances:
            raise Exception('There is no target instance')

        source_secondary_ips = DatabaseInfraAttr.objects.filter(
            databaseinfra=databaseinfra, equivalent_dbinfraattr__isnull=False)

        source_secondary_ips = list(source_secondary_ips)

        workflow_dict = build_dict(
            database_region_migration_detail=database_region_migration_detail,
            database_region_migration=database_region_migration,
            database=database,
            databaseinfra=databaseinfra,
            source_environment=source_environment,
            target_environment=target_environment,
            steps=workflow_steps,
            engine=engine,
            source_instances=source_instances,
            source_plan=source_plan,
            target_plan=target_plan,
            source_hosts=source_hosts,
            target_instances=target_instances,
            target_hosts=target_hosts,
            source_secondary_ips=source_secondary_ips,
        )

        stop_workflow(workflow_dict=workflow_dict, task=task_history)

        current_step = database_region_migration.current_step
        database_region_migration.current_step = current_step - 1
        database_region_migration.save()

        database_region_migration_detail.status = database_region_migration_detail.SUCCESS
        database_region_migration_detail.finished_at = datetime.now()
        database_region_migration_detail.save()

        task_history.update_status_for(
            TaskHistory.STATUS_SUCCESS,
            details='Database region migration was succesfully')

    except Exception, e:
        traceback = full_stack()
        LOG.error("Ops... something went wrong: %s" % e)
        LOG.error(traceback)

        task_history.update_status_for(TaskHistory.STATUS_ERROR,
                                       details=traceback)

        database_region_migration_detail.status = database_region_migration_detail.ERROR
        database_region_migration_detail.finished_at = datetime.now()
        database_region_migration_detail.save()

        return
Ejemplo n.º 40
0
def execute_database_region_migration(self,
                                      database_region_migration_detail_id,
                                      task_history=None,
                                      user=None):
    AuditRequest.new_request("execute_database_region_migration", user,
                             "localhost")
    try:

        if task_history:
            arguments = task_history.arguments
        else:
            arguments = None

        task_history = TaskHistory.register(request=self.request,
                                            task_history=task_history,
                                            user=user,
                                            worker_name=get_worker_name())

        if arguments:
            task_history.arguments = arguments
            task_history.save()

        database_region_migration_detail = DatabaseRegionMigrationDetail.objects.get(
            id=database_region_migration_detail_id)

        database_region_migration_detail.started_at = datetime.now()
        database_region_migration_detail.status = database_region_migration_detail.RUNNING
        database_region_migration_detail.save()

        database_region_migration = database_region_migration_detail.database_region_migration
        database = database_region_migration.database
        databaseinfra = database.databaseinfra
        source_environment = databaseinfra.environment
        target_environment = source_environment.equivalent_environment
        engine = database.engine_type
        steps = get_engine_steps(engine)
        workflow_steps = steps[
            database_region_migration_detail.step].step_classes
        source_instances = []
        source_hosts = []
        for instance in Instance.objects.filter(databaseinfra=databaseinfra):
            if database_region_migration.current_step > 0 and not instance.future_instance:
                continue
            source_instances.append(instance)
            if instance.instance_type != instance.REDIS:
                source_hosts.append(instance.hostname)

        source_plan = databaseinfra.plan
        target_plan = source_plan.equivalent_plan

        source_offering = databaseinfra.cs_dbinfra_offering.get().offering
        target_offering = source_offering.equivalent_offering

        source_secondary_ips = []

        for secondary_ip in DatabaseInfraAttr.objects.filter(
                databaseinfra=databaseinfra):
            if database_region_migration.current_step > 0 and\
                    not secondary_ip.equivalent_dbinfraattr:
                continue
            source_secondary_ips.append(secondary_ip)

        workflow_dict = build_dict(
            databaseinfra=databaseinfra,
            database=database,
            source_environment=source_environment,
            target_environment=target_environment,
            steps=workflow_steps,
            source_instances=source_instances,
            source_hosts=source_hosts,
            source_plan=source_plan,
            target_plan=target_plan,
            source_offering=source_offering,
            target_offering=target_offering,
            source_secondary_ips=source_secondary_ips,
        )

        start_workflow(workflow_dict=workflow_dict, task=task_history)

        if workflow_dict['created'] == False:

            if 'exceptions' in workflow_dict:
                error = "\n".join(
                    ": ".join(err)
                    for err in workflow_dict['exceptions']['error_codes'])
                traceback = "\nException Traceback\n".join(
                    workflow_dict['exceptions']['traceback'])
                error = "{}\n{}\n{}".format(error, traceback, error)
            else:
                error = "There is not any infra-structure to allocate this database."

            database_region_migration_detail.status = database_region_migration_detail.ROLLBACK
            database_region_migration_detail.finished_at = datetime.now()
            database_region_migration_detail.save()

            task_history.update_status_for(TaskHistory.STATUS_ERROR,
                                           details=error)

            return

        else:
            database_region_migration_detail.status = database_region_migration_detail.SUCCESS
            database_region_migration_detail.finished_at = datetime.now()
            database_region_migration_detail.save()

            current_step = database_region_migration.current_step
            database_region_migration.current_step = current_step + 1
            database_region_migration.save()

            task_history.update_status_for(
                TaskHistory.STATUS_SUCCESS,
                details='Database region migration was succesfully')
            return

    except Exception, e:
        traceback = full_stack()
        LOG.error("Ops... something went wrong: %s" % e)
        LOG.error(traceback)

        database_region_migration_detail.status = database_region_migration_detail.ROLLBACK
        database_region_migration_detail.finished_at = datetime.now()
        database_region_migration_detail.save()

        task_history.update_status_for(TaskHistory.STATUS_ERROR,
                                       details=traceback)
        return
Ejemplo n.º 41
0
def execute_database_region_migration(self,
                                      database_region_migration_detail_id,
                                      task_history=None,
                                      user=None):
    #AuditRequest.new_request("execute_database_region_migration", user, "localhost")
    try:

        if task_history:
            arguments = task_history.arguments
        else:
            arguments = None

        task_history = TaskHistory.register(request=self.request,
                                            task_history=task_history,
                                            user=user,
                                            worker_name=get_worker_name())

        if arguments:
            task_history.arguments = arguments
            task_history.save()

        database_region_migration_detail = DatabaseRegionMigrationDetail.objects.get(
            id=database_region_migration_detail_id)
        database_region_migration = database_region_migration_detail.database_region_migration
        database = database_region_migration.database
        databaseinfra = database.databaseinfra
        source_environment = databaseinfra.environment
        target_environment = source_environment.equivalent_environment
        engine = database.engine_type
        steps = get_engine_steps(engine)
        workflow_steps = steps[
            database_region_migration_detail.step].step_classes
        source_instances = []
        source_hosts = []
        for instance in Instance.objects.filter(databaseinfra=databaseinfra):
            source_instances.append(instance)
            if instance.instance_type != instance.REDIS_SENTINEL:
                source_hosts.append(instance.hostname)

        source_plan = databaseinfra.plan
        target_plan = source_plan.equivalent_plan_id

        workflow_dict = build_dict(
            #database_region_migration_detail = database_region_migration_detail,
            #database_region_migration = database_region_migration,
            #database = database,
            databaseinfra=databaseinfra,
            #source_environment = source_environment,
            target_environment=target_environment,
            steps=workflow_steps,
            #engine = engine,
            source_instances=source_instances,
            source_hosts=source_hosts,
            #source_plan = source_plan,
            target_plan=target_plan,
        )

        start_workflow(workflow_dict=workflow_dict, task=task_history)

        if workflow_dict['created'] == False:

            if 'exceptions' in workflow_dict:
                error = "\n".join(
                    ": ".join(err)
                    for err in workflow_dict['exceptions']['error_codes'])
                traceback = "\nException Traceback\n".join(
                    workflow_dict['exceptions']['traceback'])
                error = "{}\n{}\n{}".format(error, traceback, error)
            else:
                error = "There is not any infra-structure to allocate this database."

            task_history.update_status_for(TaskHistory.STATUS_ERROR,
                                           details=error)

            return

        else:

            task_history.update_status_for(
                TaskHistory.STATUS_SUCCESS,
                details='Database region migration was succesfully')
            return

    except Exception, e:
        traceback = full_stack()
        LOG.error("Ops... something went wrong: %s" % e)
        LOG.error(traceback)

        task_history.update_status_for(TaskHistory.STATUS_ERROR,
                                       details=traceback)
        return
Ejemplo n.º 42
0
def execute_scheduled_maintenance(self,maintenance_id):
    LOG.debug("Maintenance id: {}".format(maintenance_id))
    maintenance = models.Maintenance.objects.get(id=maintenance_id)

    models.Maintenance.objects.filter(id=maintenance_id,
        ).update(status=maintenance.RUNNING, started_at=datetime.now())
    LOG.info("Maintenance {} is RUNNING".format(maintenance,))

    worker_name = get_worker_name()
    task_history = TaskHistory.register(request=self.request,worker_name= worker_name)

    LOG.info("id: %s | task: %s | kwargs: %s | args: %s" % (
            self.request.id, self.request.task, self.request.kwargs, str(self.request.args)))

    task_history.update_details(persist=True,
        details="Executing Maintenance: {}".format(maintenance))

    for hm in models.HostMaintenance.objects.filter(maintenance=maintenance):
        main_output = {}
        hm.status = hm.RUNNING
        hm.started_at = datetime.now()
        hm.save()

        if hm.host is None:
            hm.status = hm.UNAVAILABLEHOST
            hm.finished_at = datetime.now()
            hm.save()
            continue

        host = hm.host
        update_task = "\nRunning Maintenance on {}".format(host)

        try:
            cloudstack_host_attributes = host.cs_host_attributes.get()
        except ObjectDoesNotExist, e:
            LOG.warn("Host {} does not have cloudstack attrs...{}".format(hm.host,e))
            hm.status = hm.UNAVAILABLECSHOSTATTR
            hm.finished_at = datetime.now()
            hm.save()
            continue

        param_dict = {}
        for param in models.MaintenanceParameters.objects.filter(maintenance=maintenance):
            param_function = _get_function(param.function_name)
            param_dict[param.parameter_name] = param_function(host.id)

        main_script = build_context_script(param_dict, maintenance.main_script)
        exit_status = exec_remote_command(server=host.address,
            username=cloudstack_host_attributes.vm_user,
            password=cloudstack_host_attributes.vm_password,
            command=main_script, output=main_output)

        if exit_status == 0:
            hm.status = hm.SUCCESS
        else:

            if maintenance.rollback_script:
                rollback_output = {}
                hm.status = hm.ROLLBACK
                hm.save()

                rollback_script = build_context_script(param_dict, maintenance.rollback_script)
                exit_status = exec_remote_command(server=host.address,
                    username=cloudstack_host_attributes.vm_user,
                    password=cloudstack_host_attributes.vm_password,
                    command=rollback_script, output=rollback_output)

                if exit_status ==0:
                    hm.status = hm.ROLLBACK_SUCCESS
                else:
                    hm.status = hm.ROLLBACK_ERROR

                hm.rollback_log = get_dict_lines(rollback_output)

            else:
                hm.status = hm.ERROR

        update_task += "...status: {}".format(hm.status)

        task_history.update_details(persist=True,
            details=update_task)

        hm.main_log = get_dict_lines(main_output)
        hm.finished_at = datetime.now()
        hm.save()
Ejemplo n.º 43
0
def make_databases_backup(self):
    LOG.info("Making databases backups")
    worker_name = get_worker_name()
    task_history = TaskHistory.register(
        request=self.request, worker_name=worker_name, user=None
    )
    task_history.relevance = TaskHistory.RELEVANCE_ERROR

    waiting_msg = "\nWaiting 5 minutes to start the next backup group"
    status = TaskHistory.STATUS_SUCCESS
    environments = Environment.objects.all()
    prod_envs = Configuration.get_by_name_as_list('prod_envs')
    dev_envs = Configuration.get_by_name_as_list('dev_envs')
    env_names_order = prod_envs + dev_envs
    if not env_names_order:
        env_names_order = [env.name for env in environments]

    infras = DatabaseInfra.objects.filter(plan__has_persistence=True)
    for env_name in env_names_order:
        try:
            env = environments.get(name=env_name)
        except Environment.DoesNotExist:
            continue

        msg = '\nStarting Backup for env {}'.format(env.name)
        task_history.update_details(persist=True, details=msg)
        databaseinfras_by_env = infras.filter(environment=env)
        error = {}
        backup_number = 0
        backups_per_group = len(infras) / 12
        for infra in databaseinfras_by_env:
            if not infra.databases.first():
                continue

            if backups_per_group > 0:
                if backup_number < backups_per_group:
                    backup_number += 1
                else:
                    backup_number = 0
                    task_history.update_details(waiting_msg, True)
                    sleep(300)

            group = BackupGroup()
            group.save()

            for instance in infra.instances.filter(read_only=False):
                try:
                    driver = instance.databaseinfra.get_driver()
                    is_eligible = driver.check_instance_is_eligible_for_backup(
                        instance
                    )
                    if not is_eligible:
                        LOG.info(
                            'Instance {} is not eligible for backup'.format(
                                instance
                            )
                        )
                        continue
                except Exception as e:
                    status = TaskHistory.STATUS_ERROR
                    msg = "Backup for %s was unsuccessful. Error: %s" % (
                        str(instance), str(e))
                    LOG.error(msg)

                time_now = str(strftime("%m/%d/%Y %H:%M:%S"))
                start_msg = "\n{} - Starting backup for {} ...".format(
                    time_now, instance
                )
                task_history.update_details(persist=True, details=start_msg)
                try:
                    snapshot = make_instance_snapshot_backup(
                        instance=instance, error=error, group=group
                    )
                    if snapshot and snapshot.was_successful:
                        msg = "Backup for %s was successful" % (str(instance))
                        LOG.info(msg)
                    elif snapshot and snapshot.has_warning:
                        status = TaskHistory.STATUS_WARNING
                        msg = "Backup for %s has warning" % (str(instance))
                        LOG.info(msg)
                    else:
                        status = TaskHistory.STATUS_ERROR
                        msg = "Backup for %s was unsuccessful. Error: %s" % (
                            str(instance), error['errormsg'])
                        LOG.error(msg)
                    LOG.info(msg)
                except Exception as e:
                    status = TaskHistory.STATUS_ERROR
                    msg = "Backup for %s was unsuccessful. Error: %s" % (
                        str(instance), str(e))
                    LOG.error(msg)

                time_now = str(strftime("%m/%d/%Y %H:%M:%S"))
                msg = "\n{} - {}".format(time_now, msg)
                task_history.update_details(persist=True, details=msg)

    task_history.update_status_for(status, details="\nBackup finished")

    return
Ejemplo n.º 44
0
def execute_database_region_migration_undo(self,
                                           database_region_migration_detail_id,
                                           task_history=None,
                                           user=None):
    #AuditRequest.new_request("execute_database_region_migration", user, "localhost")
    try:

        if task_history:
            arguments = task_history.arguments
        else:
            arguments = None

        task_history = TaskHistory.register(request=self.request,
                                            task_history=task_history,
                                            user=user,
                                            worker_name=get_worker_name())

        if arguments:
            task_history.arguments = arguments
            task_history.save()

        database_region_migration_detail = DatabaseRegionMigrationDetail.objects.get(
            id=database_region_migration_detail_id)
        database_region_migration = database_region_migration_detail.database_region_migration
        database = database_region_migration.database
        databaseinfra = database.databaseinfra
        source_environment = databaseinfra.environment
        target_environment = source_environment.equivalent_environment
        engine = database.engine_type
        steps = get_engine_steps(engine)
        workflow_steps = steps[
            database_region_migration_detail.step].step_classes
        source_instances = []
        source_hosts = []
        for instance in databaseinfra.instances.filter(
                future_instance__isnull=False):
            source_instances.append(instance)
            source_hosts.append(instance.hostname)

        target_instances = []
        target_hosts = []
        for instance in databaseinfra.instances.filter(
                future_instance__isnull=True):
            target_instances.append(instance)
            target_hosts.append(instance.hostname)

        source_plan = databaseinfra.plan
        target_plan = source_plan.equivalent_plan_id

        workflow_dict = build_dict(
            database_region_migration_detail=database_region_migration_detail,
            database_region_migration=database_region_migration,
            database=database,
            databaseinfra=databaseinfra,
            source_environment=source_environment,
            target_environment=target_environment,
            steps=workflow_steps,
            engine=engine,
            source_instances=source_instances,
            source_plan=source_plan,
            target_plan=target_plan,
            source_hosts=source_hosts,
            target_instances=target_instances,
            target_hosts=target_hosts)

        stop_workflow(workflow_dict=workflow_dict, task=task_history)

        task_history.update_status_for(
            TaskHistory.STATUS_SUCCESS,
            details='Database region migration was succesfully')

    except Exception, e:
        traceback = full_stack()
        LOG.error("Ops... something went wrong: %s" % e)
        LOG.error(traceback)

        task_history.update_status_for(TaskHistory.STATUS_ERROR,
                                       details=traceback)
        return
Ejemplo n.º 45
0
def update_ssl(self,
               database,
               task,
               since_step=None,
               step_manager=None,
               scheduled_task=None,
               auto_rollback=False):
    from maintenance.models import UpdateSsl
    task = TaskHistory.register(request=self.request,
                                task_history=task,
                                user=task.user,
                                worker_name=get_worker_name())
    if step_manager:
        step_manager = step_manager
        step_manager.id = None
        step_manager.started_at = None
        since_step = step_manager.current_step
    else:
        retry_from = UpdateSsl.objects.filter(can_do_retry=True,
                                              database=database,
                                              status=UpdateSsl.ERROR).last()
        step_manager = UpdateSsl()
        if retry_from:
            step_manager.current_step = retry_from.current_step
            since_step = retry_from.current_step
            step_manager.task_schedule = retry_from.task_schedule
    step_manager.database = database
    step_manager.task = task
    if scheduled_task:
        step_manager.task_schedule = scheduled_task
    step_manager.set_running()
    step_manager.save()

    steps = database.databaseinfra.update_ssl_steps()

    hosts = []
    for instance in database.infra.instances.all():
        if instance.hostname not in hosts:
            hosts.append(instance.hostname)
    instances = []
    for host in hosts:
        instances.append(host.instances.all()[0])

    result = steps_for_instances(steps,
                                 instances,
                                 task,
                                 step_manager.update_step,
                                 since_step,
                                 step_manager=step_manager)
    step_manager = UpdateSsl.objects.get(id=step_manager.id)
    if result:
        step_manager.set_success()
        task.set_status_success('SSL Update with success')
    else:
        step_manager.set_error()
        task.set_status_error('Could not update SSL')
        if auto_rollback:
            from workflow.workflow import rollback_for_instances_full
            new_task = task
            new_task.id = None
            new_task.details = ''
            new_task.task_name += '_rollback'
            new_task.task_status = new_task.STATUS_RUNNING
            new_task.save()
            rollback_step_manager = step_manager
            rollback_step_manager.id = None
            rollback_step_manager.task_schedule = None
            rollback_step_manager.can_do_retry = 0
            rollback_step_manager.save()
            result = rollback_for_instances_full(
                steps,
                instances,
                new_task,
                rollback_step_manager.get_current_step,
                rollback_step_manager.update_step,
            )
            if result:
                rollback_step_manager.set_success()
                task.set_status_success('Rollback SSL Update with success')
            else:
                if hasattr(rollback_step_manager, 'cleanup'):
                    rollback_step_manager.cleanup(instances)
                rollback_step_manager.set_error()
                task.set_status_error('Could not rollback update SSL')
Ejemplo n.º 46
0
def execute_database_region_migration(self, database_region_migration_detail_id, task_history=None, user=None):
    #AuditRequest.new_request("execute_database_region_migration", user, "localhost")
    try:
    
        if task_history:
            arguments = task_history.arguments
        else:
            arguments = None
    
        task_history = TaskHistory.register(request=self.request,
            task_history = task_history,
            user = user,
            worker_name = get_worker_name())

        if arguments:
            task_history.arguments = arguments
            task_history.save()
    
        database_region_migration_detail = DatabaseRegionMigrationDetail.objects.get(id=database_region_migration_detail_id)
        database_region_migration = database_region_migration_detail.database_region_migration
        database = database_region_migration.database
        databaseinfra = database.databaseinfra
        source_environment = databaseinfra.environment
        target_environment = source_environment.equivalent_environment
        engine = database.engine_type
        steps = get_engine_steps(engine)
        workflow_steps = steps[database_region_migration_detail.step].step_classes
        source_instances = []
        source_hosts = []
        for instance in Instance.objects.filter(databaseinfra=databaseinfra):
            source_instances.append(instance)
            if instance.instance_type != instance.REDIS_SENTINEL:
                source_hosts.append(instance.hostname)
    
        source_plan = databaseinfra.plan
        target_plan = source_plan.equivalent_plan_id
    
        workflow_dict = build_dict(
                               #database_region_migration_detail = database_region_migration_detail,
                               #database_region_migration = database_region_migration,
                               #database = database,
                               databaseinfra = databaseinfra,
                               #source_environment = source_environment,
                               target_environment = target_environment,
                               steps = workflow_steps,
                               #engine = engine,
                               source_instances = source_instances,
                               source_hosts = source_hosts,
                               #source_plan = source_plan,
                               target_plan = target_plan,
                               )

        start_workflow(workflow_dict = workflow_dict, task = task_history)    

        if workflow_dict['created'] == False:

            if 'exceptions' in workflow_dict:
                error = "\n".join(": ".join(err) for err in workflow_dict['exceptions']['error_codes'])
                traceback = "\nException Traceback\n".join(workflow_dict['exceptions']['traceback'])
                error = "{}\n{}\n{}".format(error, traceback, error)
            else:
                error = "There is not any infra-structure to allocate this database."

            task_history.update_status_for(TaskHistory.STATUS_ERROR, details=error)

            return

        else:

            task_history.update_status_for(TaskHistory.STATUS_SUCCESS, details='Database region migration was succesfully')
            return

    except Exception, e:
        traceback = full_stack()
        LOG.error("Ops... something went wrong: %s" % e)
        LOG.error(traceback)

        task_history.update_status_for(TaskHistory.STATUS_ERROR, details=traceback)
        return
Ejemplo n.º 47
0
def execute_database_region_migration_undo(self, database_region_migration_detail_id, task_history=None, user=None):
    #AuditRequest.new_request("execute_database_region_migration", user, "localhost")
    try:
    
        if task_history:
            arguments = task_history.arguments
        else:
            arguments = None
    
        task_history = TaskHistory.register(request=self.request,
            task_history = task_history,
            user = user,
            worker_name = get_worker_name())

        if arguments:
            task_history.arguments = arguments
            task_history.save()
    
        database_region_migration_detail = DatabaseRegionMigrationDetail.objects.get(id=database_region_migration_detail_id)
        database_region_migration = database_region_migration_detail.database_region_migration
        database = database_region_migration.database
        databaseinfra = database.databaseinfra
        source_environment = databaseinfra.environment
        target_environment = source_environment.equivalent_environment
        engine = database.engine_type
        steps = get_engine_steps(engine)
        workflow_steps = steps[database_region_migration_detail.step].step_classes
        source_instances = []
        source_hosts = []
        for instance in databaseinfra.instances.filter(future_instance__isnull = False):
            source_instances.append(instance)
            source_hosts.append(instance.hostname)
        
        target_instances = []
        target_hosts = []
        for instance in databaseinfra.instances.filter(future_instance__isnull = True):
            target_instances.append(instance)
            target_hosts.append(instance.hostname)
        
        source_plan = databaseinfra.plan
        target_plan = source_plan.equivalent_plan_id
    
        workflow_dict = build_dict(database_region_migration_detail = database_region_migration_detail,
                               database_region_migration = database_region_migration,
                               database = database,
                               databaseinfra = databaseinfra,
                               source_environment = source_environment,
                               target_environment = target_environment,
                               steps = workflow_steps,
                               engine = engine,
                               source_instances = source_instances,
                               source_plan = source_plan,
                               target_plan = target_plan,
                               source_hosts = source_hosts,
                               target_instances = target_instances,
                               target_hosts = target_hosts
                               )

        stop_workflow(workflow_dict = workflow_dict, task = task_history)    

        task_history.update_status_for(TaskHistory.STATUS_SUCCESS, details='Database region migration was succesfully')

    except Exception, e:
        traceback = full_stack()
        LOG.error("Ops... something went wrong: %s" % e)
        LOG.error(traceback)

        task_history.update_status_for(TaskHistory.STATUS_ERROR, details=traceback)
        return
Ejemplo n.º 48
0
def make_databases_backup(self):
    LOG.info("Making databases backups")
    worker_name = get_worker_name()
    task_history = TaskHistory.register(
        request=self.request, worker_name=worker_name, user=None
    )
    task_history.relevance = TaskHistory.RELEVANCE_ERROR

    backup_group_interval = Configuration.get_by_name_as_int(
        'backup_group_interval', default=1
    )
    waiting_msg = "\nWaiting {} minute(s) to start the next backup group".format(
        backup_group_interval
    )
    status = TaskHistory.STATUS_SUCCESS
    environments = Environment.objects.all()
    prod_envs = Environment.prod_envs()
    dev_envs = Environment.dev_envs()
    env_names_order = list(prod_envs) + list(dev_envs)
    if not env_names_order:
        env_names_order = [env.name for env in environments]

    current_time = datetime.now()
    current_hour = current_time.hour

    # Get all infras with a backup today until the current hour
    infras_with_backup_today = DatabaseInfra.objects.filter(
        instances__backup_instance__status=Snapshot.SUCCESS,
        backup_hour__lt=current_hour,
        plan__has_persistence=True,
        instances__backup_instance__end_at__year=current_time.year,
        instances__backup_instance__end_at__month=current_time.month,
        instances__backup_instance__end_at__day=current_time.day).distinct()

    # Get all infras with pending backups based on infras_with_backup_today
    infras_pending_backup = DatabaseInfra.objects.filter(
        backup_hour__lt=current_hour,
        plan__has_persistence=True,
    ).exclude(pk__in=[infra.pk for infra in infras_with_backup_today])

    # Get all infras to backup on the current hour
    infras_current_hour = DatabaseInfra.objects.filter(
        plan__has_persistence=True,
        backup_hour=current_time.hour
    )

    # Merging pending and current infras to backup list
    infras = infras_current_hour | infras_pending_backup

    for env_name in env_names_order:
        try:
            env = environments.get(name=env_name)
        except Environment.DoesNotExist:
            continue

        msg = '\nStarting Backup for env {}'.format(env.name)
        task_history.update_details(persist=True, details=msg)
        databaseinfras_by_env = infras.filter(environment=env)
        error = {}
        backup_number = 0
        backups_per_group = len(infras) / 12
        for infra in databaseinfras_by_env:
            if not infra.databases.first():
                continue

            if backups_per_group > 0:
                if backup_number < backups_per_group:
                    backup_number += 1
                else:
                    backup_number = 0
                    task_history.update_details(waiting_msg, True)
                    sleep(backup_group_interval*60)

            group = BackupGroup()
            group.save()

            instances_backup = infra.instances.filter(
                read_only=False, is_active=True
            )
            for instance in instances_backup:
                try:
                    driver = instance.databaseinfra.get_driver()
                    is_eligible = driver.check_instance_is_eligible_for_backup(
                        instance
                    )
                    if not is_eligible:
                        LOG.info(
                            'Instance {} is not eligible for backup'.format(
                                instance
                            )
                        )
                        continue
                except Exception as e:
                    status = TaskHistory.STATUS_ERROR
                    msg = "Backup for %s was unsuccessful. Error: %s" % (
                        str(instance), str(e))
                    LOG.error(msg)

                time_now = str(strftime("%m/%d/%Y %H:%M:%S"))
                start_msg = "\n{} - Starting backup for {} ...".format(
                    time_now, instance
                )
                task_history.update_details(persist=True, details=start_msg)
                try:
                    snapshot = make_instance_snapshot_backup(
                        instance=instance, error=error, group=group,
                        current_hour=current_hour
                    )
                    if snapshot and snapshot.was_successful:
                        msg = "Backup for %s was successful" % (str(instance))
                        LOG.info(msg)
                    elif snapshot and snapshot.was_error:
                        status = TaskHistory.STATUS_ERROR
                        msg = "Backup for %s was unsuccessful. Error: %s" % (
                            str(instance), error['errormsg'])
                        LOG.error(msg)
                    else:
                        status = TaskHistory.STATUS_WARNING
                        msg = "Backup for %s has warning" % (str(instance))
                        LOG.info(msg)
                    LOG.info(msg)
                except Exception as e:
                    status = TaskHistory.STATUS_ERROR
                    msg = "Backup for %s was unsuccessful. Error: %s" % (
                        str(instance), str(e))
                    LOG.error(msg)

                time_now = str(strftime("%m/%d/%Y %H:%M:%S"))
                msg = "\n{} - {}".format(time_now, msg)
                task_history.update_details(persist=True, details=msg)

    task_history.update_status_for(status, details="\nBackup finished")

    return
Ejemplo n.º 49
0
def execute_database_region_migration(self,
                                      database_region_migration_detail_id,
                                      task_history=None, user=None):
    AuditRequest.new_request(
        "execute_database_region_migration", user, "localhost")
    try:

        if task_history:
            arguments = task_history.arguments
        else:
            arguments = None

        task_history = TaskHistory.register(request=self.request,
                                            task_history=task_history,
                                            user=user,
                                            worker_name=get_worker_name())

        if arguments:
            task_history.arguments = arguments
            task_history.save()

        database_region_migration_detail = DatabaseRegionMigrationDetail.objects.get(
            id=database_region_migration_detail_id)

        database_region_migration_detail.started_at = datetime.now()
        database_region_migration_detail.status = database_region_migration_detail.RUNNING
        database_region_migration_detail.save()

        database_region_migration = database_region_migration_detail.database_region_migration
        database = database_region_migration.database
        databaseinfra = database.databaseinfra
        source_environment = databaseinfra.environment
        target_environment = source_environment.equivalent_environment
        engine = database.engine_type
        steps = get_engine_steps(engine)
        workflow_steps = steps[
            database_region_migration_detail.step].step_classes
        source_instances = []
        source_hosts = []
        for instance in Instance.objects.filter(databaseinfra=databaseinfra):
            if database_region_migration.current_step > 0 and not instance.future_instance:
                continue
            source_instances.append(instance)
            if instance.instance_type != instance.REDIS:
                source_hosts.append(instance.hostname)

        source_plan = databaseinfra.plan
        target_plan = source_plan.equivalent_plan

        source_offering = databaseinfra.cs_dbinfra_offering.get().offering
        target_offering = source_offering.equivalent_offering

        source_secondary_ips = []

        for secondary_ip in DatabaseInfraAttr.objects.filter(databaseinfra=databaseinfra):
            if database_region_migration.current_step > 0 and\
                    not secondary_ip.equivalent_dbinfraattr:
                continue
            source_secondary_ips.append(secondary_ip)

        workflow_dict = build_dict(
            databaseinfra=databaseinfra,
            database=database,
            source_environment=source_environment,
            target_environment=target_environment,
            steps=workflow_steps,
            source_instances=source_instances,
            source_hosts=source_hosts,
            source_plan=source_plan,
            target_plan=target_plan,
            source_offering=source_offering,
            target_offering=target_offering,
            source_secondary_ips=source_secondary_ips,
        )

        start_workflow(workflow_dict=workflow_dict, task=task_history)

        if workflow_dict['created'] == False:

            if 'exceptions' in workflow_dict:
                error = "\n".join(
                    ": ".join(err) for err in workflow_dict['exceptions']['error_codes'])
                traceback = "\nException Traceback\n".join(
                    workflow_dict['exceptions']['traceback'])
                error = "{}\n{}\n{}".format(error, traceback, error)
            else:
                error = "There is not any infra-structure to allocate this database."

            database_region_migration_detail.status = database_region_migration_detail.ROLLBACK
            database_region_migration_detail.finished_at = datetime.now()
            database_region_migration_detail.save()

            task_history.update_status_for(
                TaskHistory.STATUS_ERROR, details=error)

            return

        else:
            database_region_migration_detail.status = database_region_migration_detail.SUCCESS
            database_region_migration_detail.finished_at = datetime.now()
            database_region_migration_detail.save()

            current_step = database_region_migration.current_step
            database_region_migration.current_step = current_step + 1
            database_region_migration.save()

            task_history.update_status_for(
                TaskHistory.STATUS_SUCCESS, details='Database region migration was succesfully')
            return

    except Exception as e:
        traceback = full_stack()
        LOG.error("Ops... something went wrong: %s" % e)
        LOG.error(traceback)

        database_region_migration_detail.status = database_region_migration_detail.ROLLBACK
        database_region_migration_detail.finished_at = datetime.now()
        database_region_migration_detail.save()

        task_history.update_status_for(
            TaskHistory.STATUS_ERROR, details=traceback)
        return

    finally:
        AuditRequest.cleanup_request()
        pass
Ejemplo n.º 50
0
def execute_scheduled_maintenance(self, maintenance_id):
    LOG.debug("Maintenance id: {}".format(maintenance_id))
    maintenance = models.Maintenance.objects.get(id=maintenance_id)
    models.Maintenance.objects.filter(id=maintenance_id).update(
        status=maintenance.RUNNING, started_at=datetime.now()
    )
    LOG.info("Maintenance {} is RUNNING".format(maintenance,))

    worker_name = get_worker_name()
    task_history = TaskHistory.register(
        request=self.request, worker_name=worker_name
    )
    task_history.relevance = TaskHistory.RELEVANCE_CRITICAL
    LOG.info("id: {} | task: {} | kwargs: {} | args: {}".format(
        self.request.id, self.request.task,
        self.request.kwargs, str(self.request.args)
    ))
    task_history.update_details(
        persist=True, details="Executing Maintenance: {}".format(maintenance)
    )
    for hm in models.HostMaintenance.objects.filter(maintenance=maintenance):
        main_output = {}
        hm.status = hm.RUNNING
        hm.started_at = datetime.now()
        hm.save()
        if hm.host is None:
            hm.status = hm.UNAVAILABLEHOST
            hm.finished_at = datetime.now()
            hm.save()
            continue

        host = hm.host
        update_task = "\nRunning Maintenance on {}".format(host)

        if maintenance.disable_alarms:
            disable_alarms(hm.host)

        param_dict = {}
        params = models.MaintenanceParameters.objects.filter(
            maintenance=maintenance
        )
        for param in params:
            param_function = get_function(param.function_name)
            param_dict[param.parameter_name] = param_function(host.id)

        main_script = build_context_script(param_dict, maintenance.main_script)
        exit_status = exec_remote_command_host(host, main_script, main_output)

        if exit_status == 0:
            hm.status = hm.SUCCESS
        else:

            if maintenance.rollback_script:
                rollback_output = {}
                hm.status = hm.ROLLBACK
                hm.save()

                rollback_script = build_context_script(
                    param_dict, maintenance.rollback_script
                )
                exit_status = exec_remote_command_host(
                    host, rollback_script, rollback_output
                )

                if exit_status == 0:
                    hm.status = hm.ROLLBACK_SUCCESS
                else:
                    hm.status = hm.ROLLBACK_ERROR

                hm.rollback_log = get_dict_lines(rollback_output)

            else:
                hm.status = hm.ERROR

        if maintenance.disable_alarms:
            enable_alarms(hm.host)

        update_task += "...status: {}".format(hm.status)

        task_history.update_details(persist=True, details=update_task)

        hm.main_log = get_dict_lines(main_output)
        hm.finished_at = datetime.now()
        hm.save()

    models.Maintenance.objects.filter(id=maintenance_id).update(
        status=maintenance.FINISHED, finished_at=datetime.now()
    )
    task_history.update_status_for(
        TaskHistory.STATUS_SUCCESS, details='Maintenance executed succesfully'
    )
    LOG.info("Maintenance: {} has FINISHED".format(maintenance))
Ejemplo n.º 51
0
def execute_database_region_migration_undo(self,
                                           database_region_migration_detail_id,
                                           task_history=None, user=None):
    AuditRequest.new_request(
        "execute_database_region_migration", user, "localhost")
    try:

        if task_history:
            arguments = task_history.arguments
        else:
            arguments = None

        task_history = TaskHistory.register(request=self.request,
                                            task_history=task_history,
                                            user=user,
                                            worker_name=get_worker_name())

        if arguments:
            task_history.arguments = arguments
            task_history.save()

        database_region_migration_detail = DatabaseRegionMigrationDetail.objects.get(
            id=database_region_migration_detail_id)

        database_region_migration_detail.started_at = datetime.now()
        database_region_migration_detail.status = database_region_migration_detail.RUNNING
        database_region_migration_detail.is_migration_up = False
        database_region_migration_detail.save()

        database_region_migration = database_region_migration_detail.database_region_migration
        database = database_region_migration.database
        databaseinfra = database.databaseinfra
        source_environment = databaseinfra.environment
        target_environment = source_environment.equivalent_environment
        engine = database.engine_type
        steps = get_engine_steps(engine)
        workflow_steps = steps[
            database_region_migration_detail.step].step_classes
        source_instances = []
        source_hosts = []
        for instance in databaseinfra.instances.filter(future_instance__isnull=False):
            source_instances.append(instance)
            if instance.instance_type != instance.REDIS:
                source_hosts.append(instance.hostname)

        target_instances = []
        target_hosts = []
        for instance in databaseinfra.instances.filter(future_instance__isnull=True):
            target_instances.append(instance)
            if instance.instance_type != instance.REDIS:
                target_hosts.append(instance.hostname)

        source_plan = databaseinfra.plan
        target_plan = source_plan.equivalent_plan_id

        if not source_hosts:
            raise Exception('There is no source host')
        if not source_instances:
            raise Exception('There is no source instance')
        if not target_hosts:
            raise Exception('There is no target host')
        if not target_instances:
            raise Exception('There is no target instance')

        source_secondary_ips = DatabaseInfraAttr.objects.filter(databaseinfra=databaseinfra,
                                                                equivalent_dbinfraattr__isnull=False)

        source_secondary_ips = list(source_secondary_ips)

        workflow_dict = build_dict(database_region_migration_detail=database_region_migration_detail,
                                   database_region_migration=database_region_migration,
                                   database=database,
                                   databaseinfra=databaseinfra,
                                   source_environment=source_environment,
                                   target_environment=target_environment,
                                   steps=workflow_steps,
                                   engine=engine,
                                   source_instances=source_instances,
                                   source_plan=source_plan,
                                   target_plan=target_plan,
                                   source_hosts=source_hosts,
                                   target_instances=target_instances,
                                   target_hosts=target_hosts,
                                   source_secondary_ips=source_secondary_ips,
                                   )

        stop_workflow(workflow_dict=workflow_dict, task=task_history)

        current_step = database_region_migration.current_step
        database_region_migration.current_step = current_step - 1
        database_region_migration.save()

        database_region_migration_detail.status = database_region_migration_detail.SUCCESS
        database_region_migration_detail.finished_at = datetime.now()
        database_region_migration_detail.save()

        task_history.update_status_for(
            TaskHistory.STATUS_SUCCESS, details='Database region migration was succesfully')

    except Exception as e:
        traceback = full_stack()
        LOG.error("Ops... something went wrong: %s" % e)
        LOG.error(traceback)

        task_history.update_status_for(
            TaskHistory.STATUS_ERROR, details=traceback)

        database_region_migration_detail.status = database_region_migration_detail.ERROR
        database_region_migration_detail.finished_at = datetime.now()
        database_region_migration_detail.save()

        return

    finally:
        AuditRequest.cleanup_request()