def clone_infra(plan, environment, name, team, project, description, task=None, clone=None):
    if not plan.provider == plan.CLOUDSTACK:
        dbinfra = DatabaseInfra.best_for(plan= plan, environment= environment, name= name)

        if dbinfra:
            database = Database.provision(databaseinfra=dbinfra, name=name)
            database.team = team
            database.description = description
            database.project = project
            database.save()

            return build_dict(databaseinfra= dbinfra, database= database, created= True)

        return build_dict(databaseinfra=None, created= False)

    workflow_dict = build_dict(name= slugify(name),
                               plan= plan,
                               environment= environment,
                               steps= get_clone_settings(plan.engine_type.name),
                               qt= get_vm_qt(plan= plan, ),
                               dbtype = str(plan.engine_type),
                               team= team,
                               project= project,
                               description= description,
                               clone= clone
                               )

    start_workflow(workflow_dict= workflow_dict, task=task)
    return workflow_dict
def make_infra(
    plan, environment, name, team, project, description,
    subscribe_to_email_events=True, task=None, is_protected=False
):
    if not plan.provider == plan.CLOUDSTACK:
        dbinfra = DatabaseInfra.best_for(
            plan=plan, environment=environment, name=name
        )

        if dbinfra:
            database = Database.provision(databaseinfra=dbinfra, name=name)
            database.team = team
            database.description = description
            database.project = project
            database.subscribe_to_email_events = subscribe_to_email_events
            database.save()

            return build_dict(
                databaseinfra=dbinfra, database=database, created=True
            )
        return build_dict(databaseinfra=None, created=False)

    workflow_dict = build_dict(
        name=slugify(name), plan=plan, environment=environment,
        steps=get_deploy_settings(
            plan.replication_topology.class_path
        ), qt=get_vm_qt(plan=plan, ), dbtype=str(plan.engine_type),
        team=team, project=project, description=description,
        subscribe_to_email_events=subscribe_to_email_events,
        is_protected=is_protected
    )

    start_workflow(workflow_dict=workflow_dict, task=task)
    return workflow_dict
def clone_infra(plan, environment, name, team, project, description, task=None, clone=None):
    if not plan.provider == plan.CLOUDSTACK:
        dbinfra = DatabaseInfra.best_for(
            plan=plan, environment=environment, name=name)

        if dbinfra:
            database = Database.provision(databaseinfra=dbinfra, name=name)
            database.team = team
            database.description = description
            database.project = project
            database.save()

            return build_dict(databaseinfra=dbinfra, database=database, created=True)

        return build_dict(databaseinfra=None, created=False)

    workflow_dict = build_dict(name=slugify(name),
                               plan=plan,
                               environment=environment,
                               steps=get_clone_settings(plan.engine_type.name),
                               qt=get_vm_qt(plan=plan, ),
                               dbtype=str(plan.engine_type),
                               team=team,
                               project=project,
                               description=description,
                               clone=clone
                               )

    start_workflow(workflow_dict=workflow_dict, task=task)
    return workflow_dict
def clone_infra(plan,
                environment,
                name,
                team,
                backup_hour,
                maintenance_window,
                maintenance_day,
                project,
                description,
                subscribe_to_email_events,
                task=None,
                clone=None):
    if not plan.provider == plan.CLOUDSTACK:
        infra = DatabaseInfra.best_for(
            plan=plan,
            environment=environment,
            name=name,
            backup_hour=backup_hour,
            maintenance_window=maintenance_window,
            maintenance_day=maintenance_day,
        )

        if infra:
            database = Database.provision(databaseinfra=infra, name=name)
            database.team = team
            database.description = description
            database.project = project
            database.save()

            return build_dict(
                databaseinfra=infra,
                database=database,
                created=True,
                subscribe_to_email_events=subscribe_to_email_events)

        return build_dict(databaseinfra=None,
                          created=False,
                          subscribe_to_email_events=subscribe_to_email_events)

    workflow_dict = build_dict(
        name=slugify(name),
        plan=plan,
        environment=environment,
        steps=get_clone_settings(plan.replication_topology.class_path),
        qt=get_vm_qt(plan=plan),
        dbtype=str(plan.engine_type),
        team=team,
        backup_hour=backup_hour,
        maintenance_window=maintenance_window,
        maintenance_day=maintenance_day,
        project=project,
        description=description,
        clone=clone,
        subscribe_to_email_events=subscribe_to_email_events,
    )

    start_workflow(workflow_dict=workflow_dict, task=task)
    return workflow_dict
def destroy_infra(databaseinfra, task=None):
    if not databaseinfra.plan.provider == databaseinfra.plan.CLOUDSTACK:
        return True

    instances = []
    hosts = []

    for instance in databaseinfra.instances.all():
        instances.append(instance)
        hosts.append(instance.hostname)

    workflow_dict = build_dict(plan= databaseinfra.plan,
                               environment= databaseinfra.environment,
                               steps= get_engine_steps(engine= str(databaseinfra.plan.engine_type)),
                               qt= get_vm_qt(plan= databaseinfra.plan),
                               hosts= hosts,
                               instances= instances,
                               databaseinfra= databaseinfra,
                               MYSQL = MYSQL,
                               MONGODB = MONGODB,
                               REDIS = REDIS,
                               enginecod = get_engine(engine= str(databaseinfra.plan.engine_type))
                               )

    if stop_workflow(workflow_dict= workflow_dict, task=task):
        return workflow_dict
    else:
        return False
def resize_database_instances(database, cloudstackpack, instances, task=None):

    from dbaas_cloudstack.models import CloudStackPack


    original_cloudstackpack = CloudStackPack.objects.get(
        offering__serviceofferingid=database.offering_id,
        offering__region__environment=database.environment,
        engine_type__name=database.engine_type
    )

    workflow_dict = build_dict(
        database=database,
        databaseinfra=database.databaseinfra,
        cloudstackpack=cloudstackpack,
        original_cloudstackpack=original_cloudstackpack,
        environment=database.environment,
        instances=instances,
        steps=get_resize_settings(
            database.databaseinfra.plan.replication_topology.class_path
        )
    )

    start_workflow_ha(workflow_dict=workflow_dict, task=task)
    return workflow_dict
def destroy_infra(databaseinfra, task=None):

    try:
        database = databaseinfra.databases.get()
        LOG.debug('Database found! {}'.format(database))
    except IndexError:
        LOG.info("Database not found...")

    if not databaseinfra.plan.provider == databaseinfra.plan.CLOUDSTACK:
        LOG.error('Databaseinfra is not cloudstack infra')
        return True

    instances = []
    hosts = []

    for instance in databaseinfra.instances.all():
        instances.append(instance)
        hosts.append(instance.hostname)

    workflow_dict = build_dict(plan=databaseinfra.plan,
                               environment=databaseinfra.environment,
                               steps=get_deploy_settings(
                                   databaseinfra.plan.engine_type.name),
                               qt=get_vm_qt(plan=databaseinfra.plan),
                               dbtype=str(databaseinfra.plan.engine_type),
                               hosts=hosts,
                               instances=instances,
                               databaseinfra=databaseinfra,
                               database=database
                               )

    if stop_workflow(workflow_dict=workflow_dict, task=task):
        return workflow_dict
    else:
        return False
def destroy_infra(databaseinfra, task=None):

    try:
        database = databaseinfra.databases.get()
        LOG.debug('Database found! {}'.format(database))
    except IndexError:
        LOG.info("Database not found...")

    if not databaseinfra.plan.provider == databaseinfra.plan.CLOUDSTACK:
        LOG.error('Databaseinfra is not cloudstack infra')
        return True

    instances = []
    hosts = []

    for instance in databaseinfra.instances.all():
        instances.append(instance)
        hosts.append(instance.hostname)

    workflow_dict = build_dict(
        plan=databaseinfra.plan,
        environment=databaseinfra.environment,
        steps=get_destroy_settings(
            databaseinfra.plan.replication_topology.class_path),
        qt=get_vm_qt(plan=databaseinfra.plan),
        dbtype=str(databaseinfra.plan.engine_type),
        hosts=hosts,
        instances=instances,
        databaseinfra=databaseinfra,
        database=database)

    if stop_workflow(workflow_dict=workflow_dict, task=task):
        return workflow_dict
    else:
        return False
def destroy_infra(databaseinfra, task=None):
    if not databaseinfra.plan.provider == databaseinfra.plan.CLOUDSTACK:
        return True

    instances = []
    hosts = []

    for instance in databaseinfra.instances.all():
        instances.append(instance)
        hosts.append(instance.hostname)

    workflow_dict = build_dict(plan= databaseinfra.plan,
                               environment= databaseinfra.environment,
                               steps= get_engine_steps(engine= str(databaseinfra.plan.engine_type)),
                               qt= get_vm_qt(plan= databaseinfra.plan),
                               hosts= hosts,
                               instances= instances,
                               databaseinfra= databaseinfra,
                               MYSQL = MYSQL,
                               MONGODB = MONGODB,
                               enginecod = get_engine(engine= str(databaseinfra.plan.engine_type))
                               )

    if stop_workflow(workflow_dict= workflow_dict, task=task):
        return workflow_dict
    else:
        return False
Beispiel #10
0
def restore_snapshot(self, database, snapshot, user, task_history):
    from dbaas_nfsaas.models import HostAttr
    LOG.info("Restoring snapshot")
    worker_name = get_worker_name()

    task_history = models.TaskHistory.objects.get(id=task_history)
    task_history = TaskHistory.register(request=self.request, task_history=task_history,
                                        user=user, worker_name=worker_name)

    databaseinfra = database.databaseinfra

    snapshot = Snapshot.objects.get(id=snapshot)
    snapshot_id = snapshot.snapshopt_id

    host_attr = HostAttr.objects.get(nfsaas_path=snapshot.export_path)
    host = host_attr.host
    host_attr = HostAttr.objects.get(host=host, is_active=True)

    export_id = host_attr.nfsaas_export_id
    export_path = host_attr.nfsaas_path

    steps = RESTORE_SNAPSHOT_SINGLE

    if databaseinfra.plan.is_ha and databaseinfra.engine_name == 'mysql':
        steps = RESTORE_SNAPSHOT_MYSQL_HA

    not_primary_instances = databaseinfra.instances.exclude(hostname=host).exclude(instance_type__in=[Instance.MONGODB_ARBITER,
                                                                                                      Instance.REDIS_SENTINEL])
    not_primary_hosts = [
        instance.hostname for instance in not_primary_instances]

    workflow_dict = build_dict(databaseinfra=databaseinfra,
                               database=database,
                               snapshot_id=snapshot_id,
                               export_path=export_path,
                               export_id=export_id,
                               host=host,
                               steps=steps,
                               not_primary_hosts=not_primary_hosts,
                               )

    start_workflow(workflow_dict=workflow_dict, task=task_history)

    if workflow_dict['exceptions']['traceback']:
        error = "\n".join(
            ": ".join(err) for err in workflow_dict['exceptions']['error_codes'])
        traceback = "\nException Traceback\n".join(
            workflow_dict['exceptions']['traceback'])
        error = "{}\n{}\n{}".format(error, traceback, error)
        task_history.update_status_for(
            TaskHistory.STATUS_ERROR, details=error)
    else:
        task_history.update_status_for(
            TaskHistory.STATUS_SUCCESS, details='Database sucessfully recovered!')

    return
Beispiel #11
0
def restore_snapshot(self, database, snapshot, user, task_history):
    from dbaas_nfsaas.models import HostAttr
    LOG.info("Restoring snapshot")
    worker_name = get_worker_name()

    task_history = models.TaskHistory.objects.get(id= task_history)
    task_history = TaskHistory.register(request=self.request, task_history=task_history,
            user=user, worker_name=worker_name)

    databaseinfra = database.databaseinfra

    snapshot = Snapshot.objects.get(id=snapshot)
    snapshot_id = snapshot.snapshopt_id

    host_attr = HostAttr.objects.get(nfsaas_path=snapshot.export_path)
    host = host_attr.host
    host_attr = HostAttr.objects.get(host=host, is_active=True)

    export_id = host_attr.nfsaas_export_id
    export_path = host_attr.nfsaas_path

    steps = RESTORE_SNAPSHOT_SINGLE

    if databaseinfra.plan.is_ha and databaseinfra.engine_name == 'mysql':
        steps = RESTORE_SNAPSHOT_MYSQL_HA

    not_primary_instances = databaseinfra.instances.exclude(hostname=host).exclude(instance_type__in=[Instance.MONGODB_ARBITER,
                                                                                  Instance.REDIS_SENTINEL])
    not_primary_hosts = [instance.hostname for instance in not_primary_instances]

    workflow_dict = build_dict(databaseinfra=databaseinfra,
                               database=database,
                               snapshot_id=snapshot_id,
                               export_path=export_path,
                               export_id=export_id,
                               host=host,
                               steps=steps,
                               not_primary_hosts=not_primary_hosts,
                               )

    start_workflow(workflow_dict=workflow_dict, task=task_history)

    if workflow_dict['exceptions']['traceback']:
        error = "\n".join(
            ": ".join(err) for err in workflow_dict['exceptions']['error_codes'])
        traceback = "\nException Traceback\n".join(
            workflow_dict['exceptions']['traceback'])
        error = "{}\n{}\n{}".format(error, traceback, error)
        task_history.update_status_for(
            TaskHistory.STATUS_ERROR, details=error)
    else:
        task_history.update_status_for(
            TaskHistory.STATUS_SUCCESS, details='Database sucessfully recovered!')

    return
def make_infra(plan, environment, name,task=None):
    if not plan.provider == plan.CLOUDSTACK:
        dbinfra = DatabaseInfra.best_for(plan= plan, environment= environment, name= name)

        if dbinfra:
            return build_dict(databaseinfra= dbinfra, created=True)

        return build_dict(databaseinfra=None, created= False)

    workflow_dict = build_dict(name= name,
                               plan= plan,
                               environment= environment,
                               steps= get_engine_steps(engine= str(plan.engine_type)),
                               qt= get_vm_qt(plan= plan, ),
                               MYSQL = MYSQL,
                               MONGODB = MONGODB,
                               enginecod = get_engine(engine= str(plan.engine_type))
                               )

    start_workflow(workflow_dict= workflow_dict, task=task)
    return workflow_dict
def make_infra(plan, environment, name,task=None):
    if not plan.provider == plan.CLOUDSTACK:
        dbinfra = DatabaseInfra.best_for(plan= plan, environment= environment, name= name)

        if dbinfra:
            return build_dict(databaseinfra= dbinfra, created=True)

        return build_dict(databaseinfra=None, created= False)

    workflow_dict = build_dict(name= slugify(name),
                               plan= plan,
                               environment= environment,
                               steps= get_engine_steps(engine= str(plan.engine_type)),
                               qt= get_vm_qt(plan= plan, ),
                               MYSQL = MYSQL,
                               MONGODB = MONGODB,
                               REDIS = REDIS,
                               enginecod = get_engine(engine= str(plan.engine_type))
                               )

    start_workflow(workflow_dict= workflow_dict, task=task)
    return workflow_dict
def resize_database(database, cloudstackpack, task=None):
    
    from dbaas_cloudstack.models import CloudStackPack
    original_cloudstackpack = CloudStackPack.objects.get(offering__serviceofferingid = database.offering_id, 
                                                         offering__region__environment = database.environment, 
                                                         engine_type__name = database.engine_type)
    workflow_dict = build_dict(database= database,
                               cloudstackpack= cloudstackpack,
                               original_cloudstackpack = original_cloudstackpack,
                               environment= database.environment,
                               steps= get_engine_resize_steps(engine= str(database.plan.engine_type)),
                               enginecod = get_engine(engine= str(database.plan.engine_type))
                               )

    start_workflow(workflow_dict= workflow_dict, task=task)
    
    return workflow_dict
Beispiel #15
0
    def build(cls, data, config):
        # Preprocess data to construct an embedding
        # Reserve 0 for the special NIL token.
        # return: {'char1': 1, ... ,'charN': N}, sorted by frequency of character
        tok2id = build_dict((normalize(word) if config.is_normalize else word
                             for sentence, _ in data for word in sentence),
                            offset=1)
        tok2id[config.UNK] = len(tok2id) + 1
        # print(sorted(tok2id.values()))
        # print(tok2id[config.UNK])
        # tok2id index from 1
        assert sorted(tok2id.items(), key=lambda t: t[1])[0][1] == 1
        logger.info("Built dictionary for %d features.", len(tok2id))

        max_length = max(len(sentence) for sentence, _ in data)
        # for i,d in enumerate(data):
        #     print('{} {}'.format(i, len(d[0])))

        return cls(tok2id, max_length)  # return a class instance
def resize_database_instance(database, cloudstackpack, instance, task=None):

    from dbaas_cloudstack.models import CloudStackPack
    original_cloudstackpack = CloudStackPack.objects.get(offering__serviceofferingid=database.offering_id,
                                                         offering__region__environment=database.environment,
                                                         engine_type__name=database.engine_type)

    workflow_dict = build_dict(database=database,
                               databaseinfra=database.databaseinfra,
                               cloudstackpack=cloudstackpack,
                               original_cloudstackpack=original_cloudstackpack,
                               environment=database.environment,
                               instance=instance,
                               host=instance.hostname,
                               steps=get_resize_settings(database.engine_type),
                               )

    start_workflow(workflow_dict=workflow_dict, task=task)

    return workflow_dict
def resize_database_instance(database, cloudstackpack, instance, task=None):

    from dbaas_cloudstack.models import CloudStackPack
    original_cloudstackpack = CloudStackPack.objects.get(
        offering__serviceofferingid=database.offering_id,
        offering__region__environment=database.environment,
        engine_type__name=database.engine_type)

    workflow_dict = build_dict(
        database=database,
        databaseinfra=database.databaseinfra,
        cloudstackpack=cloudstackpack,
        original_cloudstackpack=original_cloudstackpack,
        environment=database.environment,
        instance=instance,
        host=instance.hostname,
        steps=get_resize_settings(database.engine_type),
    )

    start_workflow(workflow_dict=workflow_dict, task=task)

    return workflow_dict
Beispiel #18
0
def execute_database_region_migration(self,
                                      database_region_migration_detail_id,
                                      task_history=None,
                                      user=None):
    #AuditRequest.new_request("execute_database_region_migration", user, "localhost")
    try:

        if task_history:
            arguments = task_history.arguments
        else:
            arguments = None

        task_history = TaskHistory.register(request=self.request,
                                            task_history=task_history,
                                            user=user,
                                            worker_name=get_worker_name())

        if arguments:
            task_history.arguments = arguments
            task_history.save()

        database_region_migration_detail = DatabaseRegionMigrationDetail.objects.get(
            id=database_region_migration_detail_id)
        database_region_migration = database_region_migration_detail.database_region_migration
        database = database_region_migration.database
        databaseinfra = database.databaseinfra
        source_environment = databaseinfra.environment
        target_environment = source_environment.equivalent_environment
        engine = database.engine_type
        steps = get_engine_steps(engine)
        workflow_steps = steps[
            database_region_migration_detail.step].step_classes
        source_instances = []
        source_hosts = []
        for instance in Instance.objects.filter(databaseinfra=databaseinfra):
            source_instances.append(instance)
            if instance.instance_type != instance.REDIS_SENTINEL:
                source_hosts.append(instance.hostname)

        source_plan = databaseinfra.plan
        target_plan = source_plan.equivalent_plan_id

        workflow_dict = build_dict(
            #database_region_migration_detail = database_region_migration_detail,
            #database_region_migration = database_region_migration,
            #database = database,
            databaseinfra=databaseinfra,
            #source_environment = source_environment,
            target_environment=target_environment,
            steps=workflow_steps,
            #engine = engine,
            source_instances=source_instances,
            source_hosts=source_hosts,
            #source_plan = source_plan,
            target_plan=target_plan,
        )

        start_workflow(workflow_dict=workflow_dict, task=task_history)

        if workflow_dict['created'] == False:

            if 'exceptions' in workflow_dict:
                error = "\n".join(
                    ": ".join(err)
                    for err in workflow_dict['exceptions']['error_codes'])
                traceback = "\nException Traceback\n".join(
                    workflow_dict['exceptions']['traceback'])
                error = "{}\n{}\n{}".format(error, traceback, error)
            else:
                error = "There is not any infra-structure to allocate this database."

            task_history.update_status_for(TaskHistory.STATUS_ERROR,
                                           details=error)

            return

        else:

            task_history.update_status_for(
                TaskHistory.STATUS_SUCCESS,
                details='Database region migration was succesfully')
            return

    except Exception, e:
        traceback = full_stack()
        LOG.error("Ops... something went wrong: %s" % e)
        LOG.error(traceback)

        task_history.update_status_for(TaskHistory.STATUS_ERROR,
                                       details=traceback)
        return
Beispiel #19
0
def execute_database_region_migration_undo(self,
                                           database_region_migration_detail_id,
                                           task_history=None,
                                           user=None):
    #AuditRequest.new_request("execute_database_region_migration", user, "localhost")
    try:

        if task_history:
            arguments = task_history.arguments
        else:
            arguments = None

        task_history = TaskHistory.register(request=self.request,
                                            task_history=task_history,
                                            user=user,
                                            worker_name=get_worker_name())

        if arguments:
            task_history.arguments = arguments
            task_history.save()

        database_region_migration_detail = DatabaseRegionMigrationDetail.objects.get(
            id=database_region_migration_detail_id)
        database_region_migration = database_region_migration_detail.database_region_migration
        database = database_region_migration.database
        databaseinfra = database.databaseinfra
        source_environment = databaseinfra.environment
        target_environment = source_environment.equivalent_environment
        engine = database.engine_type
        steps = get_engine_steps(engine)
        workflow_steps = steps[
            database_region_migration_detail.step].step_classes
        source_instances = []
        source_hosts = []
        for instance in databaseinfra.instances.filter(
                future_instance__isnull=False):
            source_instances.append(instance)
            source_hosts.append(instance.hostname)

        target_instances = []
        target_hosts = []
        for instance in databaseinfra.instances.filter(
                future_instance__isnull=True):
            target_instances.append(instance)
            target_hosts.append(instance.hostname)

        source_plan = databaseinfra.plan
        target_plan = source_plan.equivalent_plan_id

        workflow_dict = build_dict(
            database_region_migration_detail=database_region_migration_detail,
            database_region_migration=database_region_migration,
            database=database,
            databaseinfra=databaseinfra,
            source_environment=source_environment,
            target_environment=target_environment,
            steps=workflow_steps,
            engine=engine,
            source_instances=source_instances,
            source_plan=source_plan,
            target_plan=target_plan,
            source_hosts=source_hosts,
            target_instances=target_instances,
            target_hosts=target_hosts)

        stop_workflow(workflow_dict=workflow_dict, task=task_history)

        task_history.update_status_for(
            TaskHistory.STATUS_SUCCESS,
            details='Database region migration was succesfully')

    except Exception, e:
        traceback = full_stack()
        LOG.error("Ops... something went wrong: %s" % e)
        LOG.error(traceback)

        task_history.update_status_for(TaskHistory.STATUS_ERROR,
                                       details=traceback)
        return
def execute_database_region_migration_undo(self,
                                           database_region_migration_detail_id,
                                           task_history=None, user=None):
    AuditRequest.new_request(
        "execute_database_region_migration", user, "localhost")
    try:

        if task_history:
            arguments = task_history.arguments
        else:
            arguments = None

        task_history = TaskHistory.register(request=self.request,
                                            task_history=task_history,
                                            user=user,
                                            worker_name=get_worker_name())

        if arguments:
            task_history.arguments = arguments
            task_history.save()

        database_region_migration_detail = DatabaseRegionMigrationDetail.objects.get(
            id=database_region_migration_detail_id)

        database_region_migration_detail.started_at = datetime.now()
        database_region_migration_detail.status = database_region_migration_detail.RUNNING
        database_region_migration_detail.is_migration_up = False
        database_region_migration_detail.save()

        database_region_migration = database_region_migration_detail.database_region_migration
        database = database_region_migration.database
        databaseinfra = database.databaseinfra
        source_environment = databaseinfra.environment
        target_environment = source_environment.equivalent_environment
        engine = database.engine_type
        steps = get_engine_steps(engine)
        workflow_steps = steps[
            database_region_migration_detail.step].step_classes
        source_instances = []
        source_hosts = []
        for instance in databaseinfra.instances.filter(future_instance__isnull=False):
            source_instances.append(instance)
            if instance.instance_type != instance.REDIS:
                source_hosts.append(instance.hostname)

        target_instances = []
        target_hosts = []
        for instance in databaseinfra.instances.filter(future_instance__isnull=True):
            target_instances.append(instance)
            if instance.instance_type != instance.REDIS:
                target_hosts.append(instance.hostname)

        source_plan = databaseinfra.plan
        target_plan = source_plan.equivalent_plan_id

        if not source_hosts:
            raise Exception('There is no source host')
        if not source_instances:
            raise Exception('There is no source instance')
        if not target_hosts:
            raise Exception('There is no target host')
        if not target_instances:
            raise Exception('There is no target instance')

        source_secondary_ips = DatabaseInfraAttr.objects.filter(databaseinfra=databaseinfra,
                                                                equivalent_dbinfraattr__isnull=False)

        source_secondary_ips = list(source_secondary_ips)

        workflow_dict = build_dict(database_region_migration_detail=database_region_migration_detail,
                                   database_region_migration=database_region_migration,
                                   database=database,
                                   databaseinfra=databaseinfra,
                                   source_environment=source_environment,
                                   target_environment=target_environment,
                                   steps=workflow_steps,
                                   engine=engine,
                                   source_instances=source_instances,
                                   source_plan=source_plan,
                                   target_plan=target_plan,
                                   source_hosts=source_hosts,
                                   target_instances=target_instances,
                                   target_hosts=target_hosts,
                                   source_secondary_ips=source_secondary_ips,
                                   )

        stop_workflow(workflow_dict=workflow_dict, task=task_history)

        current_step = database_region_migration.current_step
        database_region_migration.current_step = current_step - 1
        database_region_migration.save()

        database_region_migration_detail.status = database_region_migration_detail.SUCCESS
        database_region_migration_detail.finished_at = datetime.now()
        database_region_migration_detail.save()

        task_history.update_status_for(
            TaskHistory.STATUS_SUCCESS, details='Database region migration was succesfully')

    except Exception as e:
        traceback = full_stack()
        LOG.error("Ops... something went wrong: %s" % e)
        LOG.error(traceback)

        task_history.update_status_for(
            TaskHistory.STATUS_ERROR, details=traceback)

        database_region_migration_detail.status = database_region_migration_detail.ERROR
        database_region_migration_detail.finished_at = datetime.now()
        database_region_migration_detail.save()

        return

    finally:
        AuditRequest.cleanup_request()
Beispiel #21
0
def execute_database_region_migration_undo(self, database_region_migration_detail_id, task_history=None, user=None):
    #AuditRequest.new_request("execute_database_region_migration", user, "localhost")
    try:
    
        if task_history:
            arguments = task_history.arguments
        else:
            arguments = None
    
        task_history = TaskHistory.register(request=self.request,
            task_history = task_history,
            user = user,
            worker_name = get_worker_name())

        if arguments:
            task_history.arguments = arguments
            task_history.save()
    
        database_region_migration_detail = DatabaseRegionMigrationDetail.objects.get(id=database_region_migration_detail_id)
        database_region_migration = database_region_migration_detail.database_region_migration
        database = database_region_migration.database
        databaseinfra = database.databaseinfra
        source_environment = databaseinfra.environment
        target_environment = source_environment.equivalent_environment
        engine = database.engine_type
        steps = get_engine_steps(engine)
        workflow_steps = steps[database_region_migration_detail.step].step_classes
        source_instances = []
        source_hosts = []
        for instance in databaseinfra.instances.filter(future_instance__isnull = False):
            source_instances.append(instance)
            source_hosts.append(instance.hostname)
        
        target_instances = []
        target_hosts = []
        for instance in databaseinfra.instances.filter(future_instance__isnull = True):
            target_instances.append(instance)
            target_hosts.append(instance.hostname)
        
        source_plan = databaseinfra.plan
        target_plan = source_plan.equivalent_plan_id
    
        workflow_dict = build_dict(database_region_migration_detail = database_region_migration_detail,
                               database_region_migration = database_region_migration,
                               database = database,
                               databaseinfra = databaseinfra,
                               source_environment = source_environment,
                               target_environment = target_environment,
                               steps = workflow_steps,
                               engine = engine,
                               source_instances = source_instances,
                               source_plan = source_plan,
                               target_plan = target_plan,
                               source_hosts = source_hosts,
                               target_instances = target_instances,
                               target_hosts = target_hosts
                               )

        stop_workflow(workflow_dict = workflow_dict, task = task_history)    

        task_history.update_status_for(TaskHistory.STATUS_SUCCESS, details='Database region migration was succesfully')

    except Exception, e:
        traceback = full_stack()
        LOG.error("Ops... something went wrong: %s" % e)
        LOG.error(traceback)

        task_history.update_status_for(TaskHistory.STATUS_ERROR, details=traceback)
        return
Beispiel #22
0
def execute_database_region_migration_undo(self,
                                           database_region_migration_detail_id,
                                           task_history=None,
                                           user=None):
    AuditRequest.new_request("execute_database_region_migration", user,
                             "localhost")
    try:

        if task_history:
            arguments = task_history.arguments
        else:
            arguments = None

        task_history = TaskHistory.register(request=self.request,
                                            task_history=task_history,
                                            user=user,
                                            worker_name=get_worker_name())

        if arguments:
            task_history.arguments = arguments
            task_history.save()

        database_region_migration_detail = DatabaseRegionMigrationDetail.objects.get(
            id=database_region_migration_detail_id)

        database_region_migration_detail.started_at = datetime.now()
        database_region_migration_detail.status = database_region_migration_detail.RUNNING
        database_region_migration_detail.is_migration_up = False
        database_region_migration_detail.save()

        database_region_migration = database_region_migration_detail.database_region_migration
        database = database_region_migration.database
        databaseinfra = database.databaseinfra
        source_environment = databaseinfra.environment
        target_environment = source_environment.equivalent_environment
        engine = database.engine_type
        steps = get_engine_steps(engine)
        workflow_steps = steps[
            database_region_migration_detail.step].step_classes
        source_instances = []
        source_hosts = []
        for instance in databaseinfra.instances.filter(
                future_instance__isnull=False):
            source_instances.append(instance)
            if instance.instance_type != instance.REDIS:
                source_hosts.append(instance.hostname)

        target_instances = []
        target_hosts = []
        for instance in databaseinfra.instances.filter(
                future_instance__isnull=True):
            target_instances.append(instance)
            if instance.instance_type != instance.REDIS:
                target_hosts.append(instance.hostname)

        source_plan = databaseinfra.plan
        target_plan = source_plan.equivalent_plan_id

        if not source_hosts:
            raise Exception('There is no source host')
        if not source_instances:
            raise Exception('There is no source instance')
        if not target_hosts:
            raise Exception('There is no target host')
        if not target_instances:
            raise Exception('There is no target instance')

        source_secondary_ips = DatabaseInfraAttr.objects.filter(
            databaseinfra=databaseinfra, equivalent_dbinfraattr__isnull=False)

        source_secondary_ips = list(source_secondary_ips)

        workflow_dict = build_dict(
            database_region_migration_detail=database_region_migration_detail,
            database_region_migration=database_region_migration,
            database=database,
            databaseinfra=databaseinfra,
            source_environment=source_environment,
            target_environment=target_environment,
            steps=workflow_steps,
            engine=engine,
            source_instances=source_instances,
            source_plan=source_plan,
            target_plan=target_plan,
            source_hosts=source_hosts,
            target_instances=target_instances,
            target_hosts=target_hosts,
            source_secondary_ips=source_secondary_ips,
        )

        stop_workflow(workflow_dict=workflow_dict, task=task_history)

        current_step = database_region_migration.current_step
        database_region_migration.current_step = current_step - 1
        database_region_migration.save()

        database_region_migration_detail.status = database_region_migration_detail.SUCCESS
        database_region_migration_detail.finished_at = datetime.now()
        database_region_migration_detail.save()

        task_history.update_status_for(
            TaskHistory.STATUS_SUCCESS,
            details='Database region migration was succesfully')

    except Exception, e:
        traceback = full_stack()
        LOG.error("Ops... something went wrong: %s" % e)
        LOG.error(traceback)

        task_history.update_status_for(TaskHistory.STATUS_ERROR,
                                       details=traceback)

        database_region_migration_detail.status = database_region_migration_detail.ERROR
        database_region_migration_detail.finished_at = datetime.now()
        database_region_migration_detail.save()

        return
Beispiel #23
0
def volume_migration(self, database, user, task_history=None):
    from dbaas_nfsaas.models import HostAttr, PlanAttr
    from workflow.settings import VOLUME_MIGRATION
    from util import build_dict
    from workflow.workflow import start_workflow
    from time import sleep

    def switch_master(databaseinfra, instance):
        driver = databaseinfra.get_driver()
        for attempt in range(0, 21):
            if driver.is_replication_ok(instance):
                driver.switch_master()
                return
            LOG.info("Waiting 10s to check replication...")
            sleep(10)

    worker_name = get_worker_name()
    task_history = TaskHistory.register(request=self.request, task_history=task_history,
                                        user=user, worker_name=worker_name)

    stop_now = False
    if database.status != Database.ALIVE or not database.database_status.is_alive:
        msg = "Database is not alive!"
        stop_now = True

    if database.is_beeing_used_elsewhere(task_id=self.request.id):
        msg = "Database is in use by another task!"
        stop_now = True

    if database.has_migration_started():
        msg = "Region migration for this database has already started!"
        stop_now = True

    if stop_now:
        task_history.update_status_for(TaskHistory.STATUS_SUCCESS, details=msg)
        LOG.info("Migration finished")
        return

    default_plan_size = PlanAttr.objects.get(dbaas_plan=database.plan).nfsaas_plan
    LOG.info("Migrating {} volumes".format(database))

    databaseinfra = database.databaseinfra
    driver = databaseinfra.get_driver()

    environment = database.environment
    plan = database.plan

    instances = driver.get_slave_instances()
    master_instance = driver.get_master_instance()
    instances.append(master_instance)
    LOG.info('Instances: {}'.format(str(instances)))

    hosts = [instance.hostname for instance in instances]
    volumes = HostAttr.objects.filter(host__in=hosts,
                                      is_active=True,
                                      nfsaas_size_id=default_plan_size)

    if len(volumes) == len(hosts):
        task_history.update_status_for(TaskHistory.STATUS_SUCCESS, details='Volumes already migrated!')
        LOG.info("Migration finished")
        return

    for index, instance in enumerate(instances):
        if not driver.check_instance_is_eligible_for_backup(instance=instance):
            LOG.info('Instance is not eligible for backup {}'.format(str(instance)))
            continue

        LOG.info('Volume migration for instance {}'.format(str(instance)))
        host = instance.hostname
        old_volume = HostAttr.objects.get(host=host, is_active=True)

        if old_volume.nfsaas_size_id == default_plan_size:
            if databaseinfra.plan.is_ha:
                switch_master(databaseinfra, instance)
            continue

        workflow_dict = build_dict(databaseinfra=databaseinfra,
                                   database=database,
                                   environment=environment,
                                   plan=plan,
                                   host=host,
                                   instance=instance,
                                   old_volume=old_volume,
                                   steps=VOLUME_MIGRATION,
                                   )

        start_workflow(workflow_dict=workflow_dict, task=task_history)

        if workflow_dict['exceptions']['traceback']:
            error = "\n".join(": ".join(err) for err in workflow_dict['exceptions']['error_codes'])
            traceback = "\nException Traceback\n".join(workflow_dict['exceptions']['traceback'])
            error = "{}\n{}\n{}".format(error, traceback, error)
            task_history.update_status_for(TaskHistory.STATUS_ERROR, details=error)
            LOG.info("Migration finished with errors")
            return

        if databaseinfra.plan.is_ha:
            LOG.info("Waiting 60s to check continue...")
            sleep(60)
            switch_master(databaseinfra, instance)
            LOG.info("Waiting 60s to check continue...")
            sleep(60)

    task_history.update_status_for(TaskHistory.STATUS_SUCCESS, details='Volumes sucessfully migrated!')

    LOG.info("Migration finished")

    return
Beispiel #24
0
def volume_migration(self, database, user, task_history=None):
    from dbaas_nfsaas.models import HostAttr, PlanAttr
    from workflow.settings import VOLUME_MIGRATION
    from util import build_dict
    from workflow.workflow import start_workflow
    from time import sleep

    def switch_master(databaseinfra, instance):
        driver = databaseinfra.get_driver()
        for attempt in range(0, 21):
            if driver.is_replication_ok(instance):
                driver.switch_master()
                return
            LOG.info("Waiting 10s to check replication...")
            sleep(10)

    worker_name = get_worker_name()
    task_history = TaskHistory.register(request=self.request,
                                        task_history=task_history,
                                        user=user,
                                        worker_name=worker_name)

    stop_now = False
    if database.status != Database.ALIVE or not database.database_status.is_alive:
        msg = "Database is not alive!"
        stop_now = True

    if database.is_beeing_used_elsewhere(task_id=self.request.id):
        msg = "Database is in use by another task!"
        stop_now = True

    if database.has_migration_started():
        msg = "Region migration for this database has already started!"
        stop_now = True

    if stop_now:
        task_history.update_status_for(TaskHistory.STATUS_SUCCESS, details=msg)
        LOG.info("Migration finished")
        return

    default_plan_size = PlanAttr.objects.get(
        dbaas_plan=database.plan).nfsaas_plan
    LOG.info("Migrating {} volumes".format(database))

    databaseinfra = database.databaseinfra
    driver = databaseinfra.get_driver()

    environment = database.environment
    plan = database.plan

    instances = driver.get_slave_instances()
    master_instance = driver.get_master_instance()
    instances.append(master_instance)
    LOG.info('Instances: {}'.format(str(instances)))

    hosts = [instance.hostname for instance in instances]
    volumes = HostAttr.objects.filter(host__in=hosts,
                                      is_active=True,
                                      nfsaas_size_id=default_plan_size)

    if len(volumes) == len(hosts):
        task_history.update_status_for(TaskHistory.STATUS_SUCCESS,
                                       details='Volumes already migrated!')
        LOG.info("Migration finished")
        return

    for index, instance in enumerate(instances):
        if not driver.check_instance_is_eligible_for_backup(instance=instance):
            LOG.info('Instance is not eligible for backup {}'.format(
                str(instance)))
            continue

        LOG.info('Volume migration for instance {}'.format(str(instance)))
        host = instance.hostname
        old_volume = HostAttr.objects.get(host=host, is_active=True)

        if old_volume.nfsaas_size_id == default_plan_size:
            if databaseinfra.plan.is_ha:
                switch_master(databaseinfra, instance)
            continue

        workflow_dict = build_dict(
            databaseinfra=databaseinfra,
            database=database,
            environment=environment,
            plan=plan,
            host=host,
            instance=instance,
            old_volume=old_volume,
            steps=VOLUME_MIGRATION,
        )

        start_workflow(workflow_dict=workflow_dict, task=task_history)

        if workflow_dict['exceptions']['traceback']:
            error = "\n".join(
                ": ".join(err)
                for err in workflow_dict['exceptions']['error_codes'])
            traceback = "\nException Traceback\n".join(
                workflow_dict['exceptions']['traceback'])
            error = "{}\n{}\n{}".format(error, traceback, error)
            task_history.update_status_for(TaskHistory.STATUS_ERROR,
                                           details=error)
            LOG.info("Migration finished with errors")
            return

        if databaseinfra.plan.is_ha:
            LOG.info("Waiting 60s to check continue...")
            sleep(60)
            switch_master(databaseinfra, instance)
            LOG.info("Waiting 60s to check continue...")
            sleep(60)

    task_history.update_status_for(TaskHistory.STATUS_SUCCESS,
                                   details='Volumes sucessfully migrated!')

    LOG.info("Migration finished")

    return
Beispiel #25
0
def upgrade_mongodb_24_to_30(self, database, user, task_history=None):

    from workflow.settings import MONGODB_UPGRADE_24_TO_30_SINGLE
    from workflow.settings import MONGODB_UPGRADE_24_TO_30_HA
    from util import build_dict
    from workflow.workflow import start_workflow

    worker_name = get_worker_name()
    task_history = TaskHistory.register(request=self.request,
                                        task_history=task_history,
                                        user=user,
                                        worker_name=worker_name)

    databaseinfra = database.databaseinfra
    driver = databaseinfra.get_driver()

    instances = driver.get_database_instances()
    source_plan = databaseinfra.plan
    target_plan = source_plan.engine_equivalent_plan

    source_engine = databaseinfra.engine
    target_engine = source_engine.engine_upgrade_option

    if source_plan.is_ha:
        steps = MONGODB_UPGRADE_24_TO_30_HA
    else:
        steps = MONGODB_UPGRADE_24_TO_30_SINGLE

    stop_now = False

    if not target_plan:
        msg = "There is not Engine Equivalent Plan!"
        stop_now = True

    if not target_engine:
        msg = "There is not Engine Upgrade Option!"
        stop_now = True

    if database.status != Database.ALIVE or not database.database_status.is_alive:
        msg = "Database is not alive!"
        stop_now = True

    if database.is_beeing_used_elsewhere(task_id=self.request.id):
        msg = "Database is in use by another task!"
        stop_now = True

    if not source_engine.version.startswith('2.4.'):
        msg = "Database version must be 2.4!"
        stop_now = True

    if target_engine and target_engine.version != '3.0.12':
        msg = "Target database version must be 3.0.12!"
        stop_now = True

    if stop_now:
        task_history.update_status_for(TaskHistory.STATUS_ERROR, details=msg)
        LOG.info("Upgrade finished")
        return

    try:
        delete_zabbix_alarms(database)
    except Exception as e:
        message = "Could not delete Zabbix alarms: {}".format(e)
        task_history.update_status_for(TaskHistory.STATUS_ERROR,
                                       details=message)
        LOG.error(message)
        return

    try:
        workflow_dict = build_dict(steps=steps,
                                   databaseinfra=databaseinfra,
                                   instances=instances,
                                   source_plan=source_plan,
                                   target_plan=target_plan,
                                   source_engine=source_engine,
                                   target_engine=target_engine)

        start_workflow(workflow_dict=workflow_dict, task=task_history)

        if workflow_dict['exceptions']['traceback']:
            error = "\n".join(
                ": ".join(err)
                for err in workflow_dict['exceptions']['error_codes'])
            traceback = "\nException Traceback\n".join(
                workflow_dict['exceptions']['traceback'])
            error = "{}\n{}\n{}".format(error, traceback, error)
            task_history.update_status_for(TaskHistory.STATUS_ERROR,
                                           details=error)
            LOG.info("MongoDB Upgrade finished with errors")
            return

        task_history.update_status_for(TaskHistory.STATUS_SUCCESS,
                                       details='MongoDB sucessfully upgraded!')

        LOG.info("MongoDB Upgrade finished")
    except Exception as e:
        task_history.update_status_for(TaskHistory.STATUS_ERROR, details=e)
        LOG.warning("MongoDB Upgrade finished with errors")

    try:
        create_zabbix_alarms(database)
    except Exception as e:
        message = "Could not create Zabbix alarms: {}".format(e)
        task_history.update_status_for(TaskHistory.STATUS_ERROR,
                                       details=message)
        LOG.error(message)
def execute_database_region_migration(self,
                                      database_region_migration_detail_id,
                                      task_history=None, user=None):
    AuditRequest.new_request(
        "execute_database_region_migration", user, "localhost")
    try:

        if task_history:
            arguments = task_history.arguments
        else:
            arguments = None

        task_history = TaskHistory.register(request=self.request,
                                            task_history=task_history,
                                            user=user,
                                            worker_name=get_worker_name())

        if arguments:
            task_history.arguments = arguments
            task_history.save()

        database_region_migration_detail = DatabaseRegionMigrationDetail.objects.get(
            id=database_region_migration_detail_id)

        database_region_migration_detail.started_at = datetime.now()
        database_region_migration_detail.status = database_region_migration_detail.RUNNING
        database_region_migration_detail.save()

        database_region_migration = database_region_migration_detail.database_region_migration
        database = database_region_migration.database
        databaseinfra = database.databaseinfra
        source_environment = databaseinfra.environment
        target_environment = source_environment.equivalent_environment
        engine = database.engine_type
        steps = get_engine_steps(engine)
        workflow_steps = steps[
            database_region_migration_detail.step].step_classes
        source_instances = []
        source_hosts = []
        for instance in Instance.objects.filter(databaseinfra=databaseinfra):
            if database_region_migration.current_step > 0 and not instance.future_instance:
                continue
            source_instances.append(instance)
            if instance.instance_type != instance.REDIS:
                source_hosts.append(instance.hostname)

        source_plan = databaseinfra.plan
        target_plan = source_plan.equivalent_plan

        source_offering = databaseinfra.cs_dbinfra_offering.get().offering
        target_offering = source_offering.equivalent_offering

        source_secondary_ips = []

        for secondary_ip in DatabaseInfraAttr.objects.filter(databaseinfra=databaseinfra):
            if database_region_migration.current_step > 0 and\
                    not secondary_ip.equivalent_dbinfraattr:
                continue
            source_secondary_ips.append(secondary_ip)

        workflow_dict = build_dict(
            databaseinfra=databaseinfra,
            database=database,
            source_environment=source_environment,
            target_environment=target_environment,
            steps=workflow_steps,
            source_instances=source_instances,
            source_hosts=source_hosts,
            source_plan=source_plan,
            target_plan=target_plan,
            source_offering=source_offering,
            target_offering=target_offering,
            source_secondary_ips=source_secondary_ips,
        )

        start_workflow(workflow_dict=workflow_dict, task=task_history)

        if workflow_dict['created'] == False:

            if 'exceptions' in workflow_dict:
                error = "\n".join(
                    ": ".join(err) for err in workflow_dict['exceptions']['error_codes'])
                traceback = "\nException Traceback\n".join(
                    workflow_dict['exceptions']['traceback'])
                error = "{}\n{}\n{}".format(error, traceback, error)
            else:
                error = "There is not any infra-structure to allocate this database."

            database_region_migration_detail.status = database_region_migration_detail.ROLLBACK
            database_region_migration_detail.finished_at = datetime.now()
            database_region_migration_detail.save()

            task_history.update_status_for(
                TaskHistory.STATUS_ERROR, details=error)

            return

        else:
            database_region_migration_detail.status = database_region_migration_detail.SUCCESS
            database_region_migration_detail.finished_at = datetime.now()
            database_region_migration_detail.save()

            current_step = database_region_migration.current_step
            database_region_migration.current_step = current_step + 1
            database_region_migration.save()

            task_history.update_status_for(
                TaskHistory.STATUS_SUCCESS, details='Database region migration was succesfully')
            return

    except Exception as e:
        traceback = full_stack()
        LOG.error("Ops... something went wrong: %s" % e)
        LOG.error(traceback)

        database_region_migration_detail.status = database_region_migration_detail.ROLLBACK
        database_region_migration_detail.finished_at = datetime.now()
        database_region_migration_detail.save()

        task_history.update_status_for(
            TaskHistory.STATUS_ERROR, details=traceback)
        return

    finally:
        AuditRequest.cleanup_request()
        pass
def upgrade_mongodb_24_to_30(self, database, user, task_history=None):

    from workflow.settings import MONGODB_UPGRADE_24_TO_30_SINGLE
    from workflow.settings import MONGODB_UPGRADE_24_TO_30_HA
    from util import build_dict
    from workflow.workflow import start_workflow

    worker_name = get_worker_name()
    task_history = TaskHistory.register(request=self.request, task_history=task_history,
                                        user=user, worker_name=worker_name)

    databaseinfra = database.databaseinfra
    driver = databaseinfra.get_driver()

    instances = driver.get_database_instances()
    source_plan = databaseinfra.plan
    target_plan = source_plan.engine_equivalent_plan

    source_engine = databaseinfra.engine
    target_engine = source_engine.engine_upgrade_option

    if source_plan.is_ha:
        steps = MONGODB_UPGRADE_24_TO_30_HA
    else:
        steps = MONGODB_UPGRADE_24_TO_30_SINGLE

    stop_now = False

    if not target_plan:
        msg = "There is not Engine Equivalent Plan!"
        stop_now = True

    if not target_engine:
        msg = "There is not Engine Upgrade Option!"
        stop_now = True

    if database.status != Database.ALIVE or not database.database_status.is_alive:
        msg = "Database is not alive!"
        stop_now = True

    if database.is_beeing_used_elsewhere(task_id=self.request.id):
        msg = "Database is in use by another task!"
        stop_now = True

    if database.has_migration_started():
        msg = "Region migration for this database has already started!"
        stop_now = True

    if not source_engine.version.startswith('2.4.'):
        msg = "Database version must be 2.4!"
        stop_now = True

    if target_engine and target_engine.version != '3.0.8':
        msg = "Target database version must be 3.0.8!"
        stop_now = True

    if stop_now:
        task_history.update_status_for(TaskHistory.STATUS_ERROR, details=msg)
        LOG.info("Upgrade finished")
        return

    try:

        disable_zabbix_alarms(database)

        workflow_dict = build_dict(steps=steps,
                                   databaseinfra=databaseinfra,
                                   instances=instances,
                                   source_plan=source_plan,
                                   target_plan=target_plan,
                                   source_engine=source_engine,
                                   target_engine=target_engine)

        start_workflow(workflow_dict=workflow_dict, task=task_history)

        if workflow_dict['exceptions']['traceback']:
            error = "\n".join(": ".join(err) for err in workflow_dict['exceptions']['error_codes'])
            traceback = "\nException Traceback\n".join(workflow_dict['exceptions']['traceback'])
            error = "{}\n{}\n{}".format(error, traceback, error)
            task_history.update_status_for(TaskHistory.STATUS_ERROR, details=error)
            LOG.info("MongoDB Upgrade finished with errors")
            return

        task_history.update_status_for(
            TaskHistory.STATUS_SUCCESS, details='MongoDB sucessfully upgraded!')

        LOG.info("MongoDB Upgrade finished")
    except Exception as e:
        task_history.update_status_for(TaskHistory.STATUS_ERROR, details=e)
        LOG.warning("MongoDB Upgrade finished with errors")
    finally:
        enable_zabbix_alarms(database)
Beispiel #28
0
def restore_snapshot(self, database, snapshot, user, task_history):
    try:
        from dbaas_nfsaas.models import HostAttr
        LOG.info("Restoring snapshot")
        worker_name = get_worker_name()

        task_history = models.TaskHistory.objects.get(id=task_history)
        task_history = TaskHistory.register(request=self.request, task_history=task_history,
                                            user=user, worker_name=worker_name)

        databaseinfra = database.databaseinfra

        snapshot = Snapshot.objects.get(id=snapshot)
        snapshot_id = snapshot.snapshopt_id

        host_attr_snapshot = HostAttr.objects.get(nfsaas_path=snapshot.export_path)
        host = host_attr_snapshot.host
        host_attr = HostAttr.objects.get(host=host, is_active=True)

        export_id_snapshot = host_attr_snapshot.nfsaas_export_id
        export_id = host_attr.nfsaas_export_id
        export_path = host_attr.nfsaas_path

        steps = get_restore_snapshot_settings(
            database.plan.replication_topology.class_path
        )

        not_primary_instances = databaseinfra.instances.exclude(hostname=host).exclude(instance_type__in=[Instance.MONGODB_ARBITER,
                                                                                                          Instance.REDIS_SENTINEL])
        not_primary_hosts = [
            instance.hostname for instance in not_primary_instances]

        workflow_dict = build_dict(databaseinfra=databaseinfra,
                                   database=database,
                                   snapshot_id=snapshot_id,
                                   export_path=export_path,
                                   export_id=export_id,
                                   export_id_snapshot=export_id_snapshot,
                                   host=host,
                                   steps=steps,
                                   not_primary_hosts=not_primary_hosts,
                                   )

        start_workflow(workflow_dict=workflow_dict, task=task_history)

        if workflow_dict['exceptions']['traceback']:
            raise Exception('Restore could not be finished')
        else:
            task_history.update_status_for(
                TaskHistory.STATUS_SUCCESS, details='Database sucessfully recovered!')

    except Exception, e:
        if 'workflow_dict' in locals():
            error = "\n".join(": ".join(err) for err in
                              workflow_dict['exceptions']['error_codes'])
            traceback = "\nException Traceback\n".join(workflow_dict['exceptions']['traceback'])
            error = "{}\n{}\n{}".format(error, traceback, error)
        else:
            error = str(e)
        task_history.update_status_for(
            TaskHistory.STATUS_ERROR, details=error)
Beispiel #29
0
def execute_database_region_migration(self,
                                      database_region_migration_detail_id,
                                      task_history=None,
                                      user=None):
    AuditRequest.new_request("execute_database_region_migration", user,
                             "localhost")
    try:

        if task_history:
            arguments = task_history.arguments
        else:
            arguments = None

        task_history = TaskHistory.register(request=self.request,
                                            task_history=task_history,
                                            user=user,
                                            worker_name=get_worker_name())

        if arguments:
            task_history.arguments = arguments
            task_history.save()

        database_region_migration_detail = DatabaseRegionMigrationDetail.objects.get(
            id=database_region_migration_detail_id)

        database_region_migration_detail.started_at = datetime.now()
        database_region_migration_detail.status = database_region_migration_detail.RUNNING
        database_region_migration_detail.save()

        database_region_migration = database_region_migration_detail.database_region_migration
        database = database_region_migration.database
        databaseinfra = database.databaseinfra
        source_environment = databaseinfra.environment
        target_environment = source_environment.equivalent_environment
        engine = database.engine_type
        steps = get_engine_steps(engine)
        workflow_steps = steps[
            database_region_migration_detail.step].step_classes
        source_instances = []
        source_hosts = []
        for instance in Instance.objects.filter(databaseinfra=databaseinfra):
            if database_region_migration.current_step > 0 and not instance.future_instance:
                continue
            source_instances.append(instance)
            if instance.instance_type != instance.REDIS:
                source_hosts.append(instance.hostname)

        source_plan = databaseinfra.plan
        target_plan = source_plan.equivalent_plan

        source_offering = databaseinfra.cs_dbinfra_offering.get().offering
        target_offering = source_offering.equivalent_offering

        source_secondary_ips = []

        for secondary_ip in DatabaseInfraAttr.objects.filter(
                databaseinfra=databaseinfra):
            if database_region_migration.current_step > 0 and\
                    not secondary_ip.equivalent_dbinfraattr:
                continue
            source_secondary_ips.append(secondary_ip)

        workflow_dict = build_dict(
            databaseinfra=databaseinfra,
            database=database,
            source_environment=source_environment,
            target_environment=target_environment,
            steps=workflow_steps,
            source_instances=source_instances,
            source_hosts=source_hosts,
            source_plan=source_plan,
            target_plan=target_plan,
            source_offering=source_offering,
            target_offering=target_offering,
            source_secondary_ips=source_secondary_ips,
        )

        start_workflow(workflow_dict=workflow_dict, task=task_history)

        if workflow_dict['created'] == False:

            if 'exceptions' in workflow_dict:
                error = "\n".join(
                    ": ".join(err)
                    for err in workflow_dict['exceptions']['error_codes'])
                traceback = "\nException Traceback\n".join(
                    workflow_dict['exceptions']['traceback'])
                error = "{}\n{}\n{}".format(error, traceback, error)
            else:
                error = "There is not any infra-structure to allocate this database."

            database_region_migration_detail.status = database_region_migration_detail.ROLLBACK
            database_region_migration_detail.finished_at = datetime.now()
            database_region_migration_detail.save()

            task_history.update_status_for(TaskHistory.STATUS_ERROR,
                                           details=error)

            return

        else:
            database_region_migration_detail.status = database_region_migration_detail.SUCCESS
            database_region_migration_detail.finished_at = datetime.now()
            database_region_migration_detail.save()

            current_step = database_region_migration.current_step
            database_region_migration.current_step = current_step + 1
            database_region_migration.save()

            task_history.update_status_for(
                TaskHistory.STATUS_SUCCESS,
                details='Database region migration was succesfully')
            return

    except Exception, e:
        traceback = full_stack()
        LOG.error("Ops... something went wrong: %s" % e)
        LOG.error(traceback)

        database_region_migration_detail.status = database_region_migration_detail.ROLLBACK
        database_region_migration_detail.finished_at = datetime.now()
        database_region_migration_detail.save()

        task_history.update_status_for(TaskHistory.STATUS_ERROR,
                                       details=traceback)
        return
Beispiel #30
0
def execute_database_region_migration(self, database_region_migration_detail_id, task_history=None, user=None):
    #AuditRequest.new_request("execute_database_region_migration", user, "localhost")
    try:
    
        if task_history:
            arguments = task_history.arguments
        else:
            arguments = None
    
        task_history = TaskHistory.register(request=self.request,
            task_history = task_history,
            user = user,
            worker_name = get_worker_name())

        if arguments:
            task_history.arguments = arguments
            task_history.save()
    
        database_region_migration_detail = DatabaseRegionMigrationDetail.objects.get(id=database_region_migration_detail_id)
        database_region_migration = database_region_migration_detail.database_region_migration
        database = database_region_migration.database
        databaseinfra = database.databaseinfra
        source_environment = databaseinfra.environment
        target_environment = source_environment.equivalent_environment
        engine = database.engine_type
        steps = get_engine_steps(engine)
        workflow_steps = steps[database_region_migration_detail.step].step_classes
        source_instances = []
        source_hosts = []
        for instance in Instance.objects.filter(databaseinfra=databaseinfra):
            source_instances.append(instance)
            if instance.instance_type != instance.REDIS_SENTINEL:
                source_hosts.append(instance.hostname)
    
        source_plan = databaseinfra.plan
        target_plan = source_plan.equivalent_plan_id
    
        workflow_dict = build_dict(
                               #database_region_migration_detail = database_region_migration_detail,
                               #database_region_migration = database_region_migration,
                               #database = database,
                               databaseinfra = databaseinfra,
                               #source_environment = source_environment,
                               target_environment = target_environment,
                               steps = workflow_steps,
                               #engine = engine,
                               source_instances = source_instances,
                               source_hosts = source_hosts,
                               #source_plan = source_plan,
                               target_plan = target_plan,
                               )

        start_workflow(workflow_dict = workflow_dict, task = task_history)    

        if workflow_dict['created'] == False:

            if 'exceptions' in workflow_dict:
                error = "\n".join(": ".join(err) for err in workflow_dict['exceptions']['error_codes'])
                traceback = "\nException Traceback\n".join(workflow_dict['exceptions']['traceback'])
                error = "{}\n{}\n{}".format(error, traceback, error)
            else:
                error = "There is not any infra-structure to allocate this database."

            task_history.update_status_for(TaskHistory.STATUS_ERROR, details=error)

            return

        else:

            task_history.update_status_for(TaskHistory.STATUS_SUCCESS, details='Database region migration was succesfully')
            return

    except Exception, e:
        traceback = full_stack()
        LOG.error("Ops... something went wrong: %s" % e)
        LOG.error(traceback)

        task_history.update_status_for(TaskHistory.STATUS_ERROR, details=traceback)
        return