Esempio n. 1
0
def resize_database(self, database, cloudstackpack, task_history=None,user=None):

    AuditRequest.new_request("resize_database", user, "localhost")

    try:
        worker_name = get_worker_name()
        task_history = TaskHistory.register(request=self.request, task_history=task_history,
            user=user, worker_name=worker_name)
        from util.providers import resize_database

        result = resize_database(database = database, cloudstackpack = cloudstackpack, task = task_history)

        if result['created']==False:

            if 'exceptions' in result:
                error = "\n".join(": ".join(err) for err in result['exceptions']['error_codes'])
                traceback = "\nException Traceback\n".join(result['exceptions']['traceback'])
                error = "{}\n{}\n{}".format(error, traceback, error)
            else:
                error = "Something went wrong."

            task_history.update_status_for(TaskHistory.STATUS_ERROR, details=error)
        else:
            task_history.update_status_for(TaskHistory.STATUS_SUCCESS, details='Resize successfully done.')

    except Exception, e:
        error = "Resize Database ERROR: {}".format(e)
        LOG.error(error)
        task_history.update_status_for(TaskHistory.STATUS_ERROR, details=error)
Esempio n. 2
0
def monitor_acl_job(self,database, job_id, bind_address, bind_status=models.CREATED , user=None):
    if not user:
        user =  self.request.args[-1]
    AuditRequest.new_request("create_database",user, "localhost")

    worker_name = get_worker_name()
    task_history = TaskHistory.register(request=self.request, user=user, worker_name=worker_name)
    LOG.info("id: %s | task: %s | kwargs: %s | args: %s" % (self.request.id, self.request.task, self.request.kwargs, str(self.request.args)))

    task_history.update_details(persist=True, details="Loading Process...")
    try:

        LOG.debug("database: {}, job_id: {}, bind_address: {}, bind_status: {}, user: {}".format(database, job_id, bind_address, bind_status, user))

        status = tasks.monitor_acl_job(database, job_id, bind_address,)

        LOG.debug("Job status return: {}".format(status))
        if status:
            from dbaas_aclapi.util import update_bind_status
            LOG.info("Updating Bind Status")
            update_bind_status(database, bind_address, bind_status)

            task_history.update_status_for(TaskHistory.STATUS_SUCCESS, details='Bind created successfully')
            return
        else:
            raise Exception, "Error when monitoring the Bind Process"


    except Exception, e:
        LOG.info("DatabaseBindMonitoring ERROR: {}".format(e))
        task_history.update_status_for(TaskHistory.STATUS_ERROR, details='Bind could not be granted')
        return
Esempio n. 3
0
def destroy_database(self, database, task_history=None, user=None):
    # register History
    AuditRequest.new_request("destroy_database", user, "localhost")
    try:
        worker_name = get_worker_name()
        task_history = TaskHistory.register(request=self.request, task_history=task_history,
                                            user=user, worker_name=worker_name)

        LOG.info("id: %s | task: %s | kwargs: %s | args: %s" % (
            self.request.id, self.request.task, self.request.kwargs, str(self.request.args)))

        task_history.add_detail('Quarantine:')
        task_history.add_detail(
            'Since: {}'.format(database.quarantine_dt), level=2
        )
        task_history.add_detail(
            'Requested by: {}'.format(database.quarantine_user), level=2
        )
        task_history.add_detail('')
        task_history.add_detail('Loading Process...')

        databaseinfra = database.databaseinfra

        destroy_infra(databaseinfra=databaseinfra, task=task_history)

        task_history.update_status_for(
            TaskHistory.STATUS_SUCCESS, details='Database destroyed successfully')
        return
    finally:
        AuditRequest.cleanup_request()
Esempio n. 4
0
def destroy_database(self, database, task_history=None, user=None):
    # register History
    AuditRequest.new_request("destroy_database", user, "localhost")
    try:
        worker_name = get_worker_name()
        task_history = TaskHistory.register(request=self.request,
                                            task_history=task_history,
                                            user=user,
                                            worker_name=worker_name)

        LOG.info("id: %s | task: %s | kwargs: %s | args: %s" %
                 (self.request.id, self.request.task, self.request.kwargs,
                  str(self.request.args)))

        task_history.update_details(persist=True, details="Loading Process...")

        databaseinfra = database.databaseinfra

        destroy_infra(databaseinfra=databaseinfra, task=task_history)

        task_history.update_status_for(
            TaskHistory.STATUS_SUCCESS,
            details='Database destroyed successfully')
        return
    finally:
        AuditRequest.cleanup_request()
Esempio n. 5
0
def purge_quarantine(self,):
    user = AccountUser.objects.get(username='******')
    AuditRequest.new_request("purge_quarantine", user, "localhost")
    try:

        task_history = TaskHistory.register(request=self.request, user=user)

        LOG.info("id: %s | task: %s | kwargs: %s | args: %s" % (
            self.request.id, self.request.task, self.request.kwargs, str(self.request.args)))
        quarantine_time = Configuration.get_by_name_as_int('quarantine_retention_days')
        quarantine_time_dt = date.today() - timedelta(days=quarantine_time)

        databases = Database.objects.filter(is_in_quarantine=True,
                                            quarantine_dt__lte=quarantine_time_dt)

        for database in databases:
            if database.plan.provider == database.plan.CLOUDSTACK:
                databaseinfra = database.databaseinfra

                destroy_infra(databaseinfra=databaseinfra, task=task_history)
            else:
                database.delete()

            LOG.info("The database %s was deleted, because it was set to quarentine %d days ago" % (
                database.name, quarantine_time))

        task_history.update_status_for(TaskHistory.STATUS_SUCCESS, details='Databases destroyed successfully')
        return

    except Exception:
        task_history.update_status_for(TaskHistory.STATUS_ERROR, details="Error")
        return
    finally:
        AuditRequest.cleanup_request()
Esempio n. 6
0
def create_database(self, name, plan, environment, team, project, description, task_history=None, user=None):
    AuditRequest.new_request("create_database", user, "localhost")
    try:

        worker_name = get_worker_name()
        task_history = TaskHistory.register(request=self.request, task_history=task_history,
                                            user=user, worker_name=worker_name)

        LOG.info("id: %s | task: %s | kwargs: %s | args: %s" % (
            self.request.id, self.request.task, self.request.kwargs, str(self.request.args)))

        task_history.update_details(persist=True, details="Loading Process...")

        result = make_infra(plan=plan,
                            environment=environment,
                            name=name,
                            team=team,
                            project=project,
                            description=description,
                            task=task_history,
                            )

        if result['created'] == False:

            if 'exceptions' in result:
                error = "\n".join(": ".join(err)
                                  for err in result['exceptions']['error_codes'])
                traceback = "\nException Traceback\n".join(
                    result['exceptions']['traceback'])
                error = "{}\n{}\n{}".format(error, traceback, error)
            else:
                error = "There is not any infra-structure to allocate this database."

            task_history.update_status_for(
                TaskHistory.STATUS_ERROR, details=error)
            return

        task_history.update_dbid(db=result['database'])
        task_history.update_status_for(
            TaskHistory.STATUS_SUCCESS, details='Database created successfully')

        return

    except Exception as e:
        traceback = full_stack()
        LOG.error("Ops... something went wrong: %s" % e)
        LOG.error(traceback)

        if 'result' in locals() and result['created']:
            destroy_infra(
                databaseinfra=result['databaseinfra'], task=task_history)

        task_history.update_status_for(
            TaskHistory.STATUS_ERROR, details=traceback)
        return

    finally:
        AuditRequest.cleanup_request()
Esempio n. 7
0
def clone_database(self, origin_database, clone_name, user=None):
	# register History
	AuditRequest.new_request("clone_database", self.request.kwargs["user"], "localhost")
	try:
		task_history = TaskHistory.register(request=self.request, user=user)

		LOG.info("origin_database: %s" % origin_database)

		dest_database = Database.objects.get(pk=origin_database.pk)
		dest_database.name = clone_name
		dest_database.pk = None

		task_history.update_details(persist=True, details="Loading Process...")
		result = make_infra(plan=origin_database.plan, environment=origin_database.environment, name=clone_name,
		                    task=task_history)

		if result['created']==False:

			if 'exceptions' in result:
				error = "\n\n".join(": ".join(err) for err in result['exceptions']['error_codes'])
				traceback = "\n\nException Traceback\n".join(result['exceptions']['traceback'])
				error = "{}\n{}".format(error, traceback)
			else:
				error = "There is not any infra-structure to allocate this database."

			task_history.update_status_for(TaskHistory.STATUS_ERROR, details=error)
			return
	finally:
		AuditRequest.cleanup_request()

	dest_database.databaseinfra = result['databaseinfra']
	dest_database.save()
	LOG.info("dest_database: %s" % dest_database)

	LOG.info("id: %s | task: %s | kwargs: %s | args: %s" % (
		self.request.id, self.request.task, self.request.kwargs, str(self.request.args)))

	try:
		args = get_clone_args(origin_database, dest_database)
		script_name = factory_for(origin_database.databaseinfra).clone()
		return_code, output = call_script(script_name, working_dir=settings.SCRIPTS_PATH, args=args, split_lines=False)
		LOG.info("%s - return code: %s" % (self.request.id, return_code))
		if return_code != 0:
			task_history.update_status_for(TaskHistory.STATUS_ERROR, details=output + "\nTransaction rollback")
			LOG.error("task id %s - error occurred. Transaction rollback" % self.request.id)
			rollback_database(dest_database)
		else:
			task_history.update_dbid(db=dest_database)
			task_history.update_status_for(TaskHistory.STATUS_SUCCESS, details=output)
	except SoftTimeLimitExceeded:
		LOG.error("task id %s - timeout exceeded" % self.request.id)
		task_history.update_status_for(TaskHistory.STATUS_ERROR, details="timeout exceeded")
		rollback_database(dest_database)
	except Exception, e:
		LOG.error("task id %s error: %s" % (self.request.id, e))
		task_history.update_status_for(TaskHistory.STATUS_ERROR, details=e)
		rollback_database(dest_database)
def create_database(self, name, plan, environment, team, project, description, user=None):
	# register History
	AuditRequest.new_request("create_database", self.request.args[-1], "localhost")
	try:
		task_history = TaskHistory.register(request=self.request, user=user)
		LOG.info("id: %s | task: %s | kwargs: %s | args: %s" % (
			self.request.id, self.request.task, self.request.kwargs, str(self.request.args)))

		task_history.update_details(persist=True, details="Loading Process...")

		result = make_infra(plan=plan, environment=environment, name=name, task=task_history)

		if result['created']==False:

			if 'exceptions' in result:
				error = "\n".join(": ".join(err) for err in result['exceptions']['error_codes'])
				traceback = "\nException Traceback\n".join(result['exceptions']['traceback'])
				error = "{}\n{}\n{}".format(error, traceback, error)
			else:
				error = "There is not any infra-structure to allocate this database."

			task_history.update_status_for(TaskHistory.STATUS_ERROR, details=error)
			return


		database = Database.provision(name, result['databaseinfra'])
		database.team = team
		database.project = project
		database.description = description
		database.save()
		
		task_history.update_dbid(db=database)
		
		from util import laas
		#laas.register_database_laas(database)
		
		task_history.update_status_for(TaskHistory.STATUS_SUCCESS, details='Database created successfully')
		
		return
		
	except Exception, e:
	    traceback = full_stack()
	    LOG.error("Ops... something went wrong: %s" % e)
	    LOG.error(traceback)
	    
	    if 'database' in locals() and database.id:
	        task_history.update_status_for(TaskHistory.STATUS_WARNING, details=traceback)
	    else:
	        if 'result' in locals() and result['created']:
	            destroy_infra(databaseinfra = result['databaseinfra'])
	        task_history.update_status_for(TaskHistory.STATUS_ERROR, details=traceback)
	    return
Esempio n. 9
0
def monitor_acl_job(self,
                    database,
                    job_id,
                    bind_address,
                    bind_status=models.CREATED,
                    user=None):
    if not user:
        user = self.request.args[-1]
    AuditRequest.new_request("create_database", user, "localhost")

    worker_name = get_worker_name()
    task_history = TaskHistory.register(request=self.request,
                                        user=user,
                                        worker_name=worker_name)
    LOG.info("id: %s | task: %s | kwargs: %s | args: %s" %
             (self.request.id, self.request.task, self.request.kwargs,
              str(self.request.args)))

    task_history.update_details(persist=True, details="Loading Process...")
    try:

        LOG.debug(
            "database: {}, job_id: {}, bind_address: {}, bind_status: {}, user: {}"
            .format(database, job_id, bind_address, bind_status, user))

        status = tasks.monitor_acl_job(
            database,
            job_id,
            bind_address,
        )

        LOG.debug("Job status return: {}".format(status))
        if status:
            from dbaas_aclapi.util import update_bind_status
            LOG.info("Updating Bind Status")
            update_bind_status(database, bind_address, bind_status)

            task_history.update_status_for(TaskHistory.STATUS_SUCCESS,
                                           details='Bind created successfully')
            return
        else:
            raise Exception, "Error when monitoring the Bind Process"

    except Exception, e:
        LOG.info("DatabaseBindMonitoring ERROR: {}".format(e))
        task_history.update_status_for(TaskHistory.STATUS_ERROR,
                                       details='Bind could not be granted')
        return
Esempio n. 10
0
def analyze_databases(self, task_history=None):
    endpoint, healh_check_route, healh_check_string = get_analyzing_credentials()
    user = User.objects.get(username='******')
    worker_name = get_worker_name()
    task_history = TaskHistory.register(task_history=task_history, request=self.request, user=user,
                                        worker_name=worker_name)
    task_history.update_details(persist=True, details="Loading Process...")
    AuditRequest.new_request("analyze_databases", user, "localhost")

    try:
        analyze_service = AnalyzeService(endpoint, healh_check_route,
                                         healh_check_string)
        with transaction.atomic():
            databases = Database.objects.filter(is_in_quarantine=False)
            today = datetime.now()
            for database in databases:
                database_name, engine, instances, environment_name, databaseinfra_name = setup_database_info(database)
                for execution_plan in ExecutionPlan.objects.all():
                    if database_can_not_be_resized(database, execution_plan):
                        continue
                    params = execution_plan.setup_execution_params()
                    result = analyze_service.run(engine=engine, database=database_name,
                                                 instances=instances, **params)
                    if result['status'] == 'success':
                        task_history.update_details(persist=True, details="\nDatabase {} {} was analised.".format(database, execution_plan.plan_name))
                        if result['msg'] != instances:
                            continue
                        for instance in result['msg']:
                            insert_analyze_repository_record(today, database_name, instance,
                                                             engine, databaseinfra_name,
                                                             environment_name,
                                                             execution_plan)
                    else:
                        raise Exception("Check your service logs..")
        task_history.update_status_for(TaskHistory.STATUS_SUCCESS,
                                       details='Analisys ok!')
    except Exception:
        try:
            task_history.update_details(persist=True,
                                        details="\nDatabase {} {} could not be analised.".format(database,
                                                                                                 execution_plan.plan_name))
            task_history.update_status_for(TaskHistory.STATUS_ERROR,
                                           details='Analisys finished with errors!\nError: {}'.format(result['msg']))
        except UnboundLocalError:
            task_history.update_details(persist=True, details="\nProccess crashed")
            task_history.update_status_for(TaskHistory.STATUS_ERROR, details='Analisys could not be started')
    finally:
        AuditRequest.cleanup_request()
Esempio n. 11
0
def purge_quarantine(self,):
    user = AccountUser.objects.get(username='******')
    AuditRequest.new_request("purge_quarantine", user, "localhost")

    try:
        task_history = TaskHistory.register(request=self.request, user=user)
        task_history.relevance = TaskHistory.RELEVANCE_WARNING

        LOG.info(
            "id: {} | task: {} | kwargs: {} | args: {}".format(
                self.request.id, self.request.task,
                self.request.kwargs, str(self.request.args)
            )
        )

        quarantine_time = Configuration.get_by_name_as_int(
            'quarantine_retention_days'
        )
        quarantine_time_dt = date.today() - timedelta(days=quarantine_time)
        task_history.add_detail(
            "Quarantine date older than {}".format(quarantine_time_dt)
        )

        databases = Database.objects.filter(
            is_in_quarantine=True, quarantine_dt__lte=quarantine_time_dt
        )
        task_history.add_detail(
            "Databases to purge: {}".format(len(databases))
        )

        for database in databases:
            task_history.add_detail('Deleting {}...'.format(database), level=2)
            database.destroy(user)

        task_history.update_status_for(
            TaskHistory.STATUS_SUCCESS,
            details='Listed databases were destroyed successfully.'
        )
        return

    except Exception as e:
        task_history.update_status_for(
            TaskHistory.STATUS_ERROR, details="Error\n{}".format(e))
        return
    finally:
        AuditRequest.cleanup_request()
Esempio n. 12
0
def destroy_database(self, database, user=None):
	# register History
	AuditRequest.new_request("destroy_database", self.request.args[-1], "localhost")
	try:
		task_history = TaskHistory.register(request=self.request, user=user)
		LOG.info("id: %s | task: %s | kwargs: %s | args: %s" % (
			self.request.id, self.request.task, self.request.kwargs, str(self.request.args)))

		task_history.update_details(persist=True, details="Loading Process...")

		databaseinfra = database.databaseinfra
		database.delete()

		destroy_infra(databaseinfra=databaseinfra, task=task_history)

		task_history.update_status_for(TaskHistory.STATUS_SUCCESS, details='Database destroyed successfully')
		return
	finally:
		AuditRequest.cleanup_request()
Esempio n. 13
0
def resize_database(self,
                    database,
                    cloudstackpack,
                    task_history=None,
                    user=None):

    AuditRequest.new_request("resize_database", user, "localhost")

    try:
        worker_name = get_worker_name()
        task_history = TaskHistory.register(request=self.request,
                                            task_history=task_history,
                                            user=user,
                                            worker_name=worker_name)
        from util.providers import resize_database

        result = resize_database(database=database,
                                 cloudstackpack=cloudstackpack,
                                 task=task_history)

        if result['created'] == False:

            if 'exceptions' in result:
                error = "\n".join(
                    ": ".join(err)
                    for err in result['exceptions']['error_codes'])
                traceback = "\nException Traceback\n".join(
                    result['exceptions']['traceback'])
                error = "{}\n{}\n{}".format(error, traceback, error)
            else:
                error = "Something went wrong."

            task_history.update_status_for(TaskHistory.STATUS_ERROR,
                                           details=error)
        else:
            task_history.update_status_for(TaskHistory.STATUS_SUCCESS,
                                           details='Resize successfully done.')

    except Exception, e:
        error = "Resize Database ERROR: {}".format(e)
        LOG.error(error)
        task_history.update_status_for(TaskHistory.STATUS_ERROR, details=error)
Esempio n. 14
0
def purge_quarantine(self, ):
    user = AccountUser.objects.get(username='******')
    AuditRequest.new_request("purge_quarantine", user, "localhost")

    try:
        task_history = TaskHistory.register(request=self.request, user=user)
        task_history.relevance = TaskHistory.RELEVANCE_WARNING

        LOG.info("id: {} | task: {} | kwargs: {} | args: {}".format(
            self.request.id, self.request.task, self.request.kwargs,
            str(self.request.args)))

        quarantine_time = Configuration.get_by_name_as_int(
            'quarantine_retention_days')
        quarantine_time_dt = date.today() - timedelta(days=quarantine_time)
        task_history.add_detail(
            "Quarantine date older than {}".format(quarantine_time_dt))

        databases = Database.objects.filter(
            is_in_quarantine=True, quarantine_dt__lte=quarantine_time_dt)
        task_history.add_detail("Databases to purge: {}".format(
            len(databases)))

        for database in databases:
            task_history.add_detail('Deleting {}...'.format(database), level=2)
            database.destroy(user)

        task_history.update_status_for(
            TaskHistory.STATUS_SUCCESS,
            details='Listed databases were destroyed successfully.')
        return

    except Exception as e:
        task_history.update_status_for(TaskHistory.STATUS_ERROR,
                                       details="Error\n{}".format(e))
        return
    finally:
        AuditRequest.cleanup_request()
Esempio n. 15
0
def clone_database(self, origin_database, clone_name, user=None):
	# register History
	AuditRequest.new_request("clone_database", self.request.kwargs["user"], "localhost")
	try:
		task_history = TaskHistory.register(request=self.request, user=user)

		LOG.info("origin_database: %s" % origin_database)

		dest_database = Database.objects.get(pk=origin_database.pk)
		dest_database.name = clone_name
		dest_database.pk = None

		task_history.update_details(persist=True, details="Loading Process...")
		result = make_infra(plan=origin_database.plan, environment=origin_database.environment, name=clone_name,
		                    task=task_history)

		if result['created']==False:

			if 'exceptions' in result:
				error = "\n\n".join(": ".join(err) for err in result['exceptions']['error_codes'])
				traceback = "\n\nException Traceback\n".join(result['exceptions']['traceback'])
				error = "{}\n{}".format(error, traceback)
			else:
				error = "There is not any infra-structure to allocate this database."

			task_history.update_status_for(TaskHistory.STATUS_ERROR, details=error)
			return
					
	except Exception, e:
	    traceback = full_stack()
	    LOG.error("Ops... something went wrong: %s" % e)
	    LOG.error(traceback)
	    
	    if 'result' in locals() and result['created']:
	        destroy_infra(databaseinfra = result['databaseinfra'])
	    
	    task_history.update_status_for(TaskHistory.STATUS_ERROR, details=traceback)
	    return
Esempio n. 16
0
def purge_quarantine(self,):
    user = AccountUser.objects.get(username='******')
    AuditRequest.new_request("purge_quarantine", user, "localhost")
    try:

        task_history = TaskHistory.register(request=self.request, user=user)

        LOG.info("id: %s | task: %s | kwargs: %s | args: %s" % (
            self.request.id, self.request.task, self.request.kwargs, str(self.request.args)))
        quarantine_time = Configuration.get_by_name_as_int(
            'quarantine_retention_days')
        quarantine_time_dt = date.today() - timedelta(days=quarantine_time)

        databases = Database.objects.filter(is_in_quarantine=True,
                                            quarantine_dt__lte=quarantine_time_dt)

        for database in databases:
            if database.plan.provider == database.plan.CLOUDSTACK:
                databaseinfra = database.databaseinfra

                destroy_infra(databaseinfra=databaseinfra, task=task_history)
            else:
                database.delete()

            LOG.info("The database %s was deleted, because it was set to quarentine %d days ago" % (
                database.name, quarantine_time))

        task_history.update_status_for(
            TaskHistory.STATUS_SUCCESS, details='Databases destroyed successfully')
        return

    except Exception:
        task_history.update_status_for(
            TaskHistory.STATUS_ERROR, details="Error")
        return
    finally:
        AuditRequest.cleanup_request()
Esempio n. 17
0
def execute_database_region_migration(self,
                                      database_region_migration_detail_id,
                                      task_history=None,
                                      user=None):
    AuditRequest.new_request("execute_database_region_migration", user,
                             "localhost")
    try:

        if task_history:
            arguments = task_history.arguments
        else:
            arguments = None

        task_history = TaskHistory.register(request=self.request,
                                            task_history=task_history,
                                            user=user,
                                            worker_name=get_worker_name())

        if arguments:
            task_history.arguments = arguments
            task_history.save()

        database_region_migration_detail = DatabaseRegionMigrationDetail.objects.get(
            id=database_region_migration_detail_id)

        database_region_migration_detail.started_at = datetime.now()
        database_region_migration_detail.status = database_region_migration_detail.RUNNING
        database_region_migration_detail.save()

        database_region_migration = database_region_migration_detail.database_region_migration
        database = database_region_migration.database
        databaseinfra = database.databaseinfra
        source_environment = databaseinfra.environment
        target_environment = source_environment.equivalent_environment
        engine = database.engine_type
        steps = get_engine_steps(engine)
        workflow_steps = steps[
            database_region_migration_detail.step].step_classes
        source_instances = []
        source_hosts = []
        for instance in Instance.objects.filter(databaseinfra=databaseinfra):
            if database_region_migration.current_step > 0 and not instance.future_instance:
                continue
            source_instances.append(instance)
            if instance.instance_type != instance.REDIS:
                source_hosts.append(instance.hostname)

        source_plan = databaseinfra.plan
        target_plan = source_plan.equivalent_plan

        source_offering = databaseinfra.cs_dbinfra_offering.get().offering
        target_offering = source_offering.equivalent_offering

        source_secondary_ips = []

        for secondary_ip in DatabaseInfraAttr.objects.filter(
                databaseinfra=databaseinfra):
            if database_region_migration.current_step > 0 and\
                    not secondary_ip.equivalent_dbinfraattr:
                continue
            source_secondary_ips.append(secondary_ip)

        workflow_dict = build_dict(
            databaseinfra=databaseinfra,
            database=database,
            source_environment=source_environment,
            target_environment=target_environment,
            steps=workflow_steps,
            source_instances=source_instances,
            source_hosts=source_hosts,
            source_plan=source_plan,
            target_plan=target_plan,
            source_offering=source_offering,
            target_offering=target_offering,
            source_secondary_ips=source_secondary_ips,
        )

        start_workflow(workflow_dict=workflow_dict, task=task_history)

        if workflow_dict['created'] == False:

            if 'exceptions' in workflow_dict:
                error = "\n".join(
                    ": ".join(err)
                    for err in workflow_dict['exceptions']['error_codes'])
                traceback = "\nException Traceback\n".join(
                    workflow_dict['exceptions']['traceback'])
                error = "{}\n{}\n{}".format(error, traceback, error)
            else:
                error = "There is not any infra-structure to allocate this database."

            database_region_migration_detail.status = database_region_migration_detail.ROLLBACK
            database_region_migration_detail.finished_at = datetime.now()
            database_region_migration_detail.save()

            task_history.update_status_for(TaskHistory.STATUS_ERROR,
                                           details=error)

            return

        else:
            database_region_migration_detail.status = database_region_migration_detail.SUCCESS
            database_region_migration_detail.finished_at = datetime.now()
            database_region_migration_detail.save()

            current_step = database_region_migration.current_step
            database_region_migration.current_step = current_step + 1
            database_region_migration.save()

            task_history.update_status_for(
                TaskHistory.STATUS_SUCCESS,
                details='Database region migration was succesfully')
            return

    except Exception, e:
        traceback = full_stack()
        LOG.error("Ops... something went wrong: %s" % e)
        LOG.error(traceback)

        database_region_migration_detail.status = database_region_migration_detail.ROLLBACK
        database_region_migration_detail.finished_at = datetime.now()
        database_region_migration_detail.save()

        task_history.update_status_for(TaskHistory.STATUS_ERROR,
                                       details=traceback)
        return
Esempio n. 18
0
def execute_database_region_migration_undo(self,
                                           database_region_migration_detail_id,
                                           task_history=None,
                                           user=None):
    AuditRequest.new_request("execute_database_region_migration", user,
                             "localhost")
    try:

        if task_history:
            arguments = task_history.arguments
        else:
            arguments = None

        task_history = TaskHistory.register(request=self.request,
                                            task_history=task_history,
                                            user=user,
                                            worker_name=get_worker_name())

        if arguments:
            task_history.arguments = arguments
            task_history.save()

        database_region_migration_detail = DatabaseRegionMigrationDetail.objects.get(
            id=database_region_migration_detail_id)

        database_region_migration_detail.started_at = datetime.now()
        database_region_migration_detail.status = database_region_migration_detail.RUNNING
        database_region_migration_detail.is_migration_up = False
        database_region_migration_detail.save()

        database_region_migration = database_region_migration_detail.database_region_migration
        database = database_region_migration.database
        databaseinfra = database.databaseinfra
        source_environment = databaseinfra.environment
        target_environment = source_environment.equivalent_environment
        engine = database.engine_type
        steps = get_engine_steps(engine)
        workflow_steps = steps[
            database_region_migration_detail.step].step_classes
        source_instances = []
        source_hosts = []
        for instance in databaseinfra.instances.filter(
                future_instance__isnull=False):
            source_instances.append(instance)
            if instance.instance_type != instance.REDIS:
                source_hosts.append(instance.hostname)

        target_instances = []
        target_hosts = []
        for instance in databaseinfra.instances.filter(
                future_instance__isnull=True):
            target_instances.append(instance)
            if instance.instance_type != instance.REDIS:
                target_hosts.append(instance.hostname)

        source_plan = databaseinfra.plan
        target_plan = source_plan.equivalent_plan_id

        if not source_hosts:
            raise Exception('There is no source host')
        if not source_instances:
            raise Exception('There is no source instance')
        if not target_hosts:
            raise Exception('There is no target host')
        if not target_instances:
            raise Exception('There is no target instance')

        source_secondary_ips = DatabaseInfraAttr.objects.filter(
            databaseinfra=databaseinfra, equivalent_dbinfraattr__isnull=False)

        source_secondary_ips = list(source_secondary_ips)

        workflow_dict = build_dict(
            database_region_migration_detail=database_region_migration_detail,
            database_region_migration=database_region_migration,
            database=database,
            databaseinfra=databaseinfra,
            source_environment=source_environment,
            target_environment=target_environment,
            steps=workflow_steps,
            engine=engine,
            source_instances=source_instances,
            source_plan=source_plan,
            target_plan=target_plan,
            source_hosts=source_hosts,
            target_instances=target_instances,
            target_hosts=target_hosts,
            source_secondary_ips=source_secondary_ips,
        )

        stop_workflow(workflow_dict=workflow_dict, task=task_history)

        current_step = database_region_migration.current_step
        database_region_migration.current_step = current_step - 1
        database_region_migration.save()

        database_region_migration_detail.status = database_region_migration_detail.SUCCESS
        database_region_migration_detail.finished_at = datetime.now()
        database_region_migration_detail.save()

        task_history.update_status_for(
            TaskHistory.STATUS_SUCCESS,
            details='Database region migration was succesfully')

    except Exception, e:
        traceback = full_stack()
        LOG.error("Ops... something went wrong: %s" % e)
        LOG.error(traceback)

        task_history.update_status_for(TaskHistory.STATUS_ERROR,
                                       details=traceback)

        database_region_migration_detail.status = database_region_migration_detail.ERROR
        database_region_migration_detail.finished_at = datetime.now()
        database_region_migration_detail.save()

        return
Esempio n. 19
0
    except Exception, e:
        traceback = full_stack()
        LOG.error("Ops... something went wrong: %s" % e)
        LOG.error(traceback)

        database_region_migration_detail.status = database_region_migration_detail.ROLLBACK
        database_region_migration_detail.finished_at = datetime.now()
        database_region_migration_detail.save()

        task_history.update_status_for(TaskHistory.STATUS_ERROR,
                                       details=traceback)
        return

    finally:
        AuditRequest.cleanup_request()
        pass


@app.task(bind=True)
def execute_database_region_migration_undo(self,
                                           database_region_migration_detail_id,
                                           task_history=None,
                                           user=None):
    AuditRequest.new_request("execute_database_region_migration", user,
                             "localhost")
    try:

        if task_history:
            arguments = task_history.arguments
        else:
Esempio n. 20
0
def database_disk_resize(self, database, disk_offering, task_history, user):
    from dbaas_nfsaas.models import HostAttr
    from workflow.steps.util.nfsaas_utils import resize_disk

    AuditRequest.new_request("database_disk_resize", user, "localhost")

    databaseinfra = database.databaseinfra
    old_disk_offering = database.databaseinfra.disk_offering
    resized = []

    try:
        worker_name = get_worker_name()
        task_history = TaskHistory.register(request=self.request,
                                            task_history=task_history,
                                            user=user,
                                            worker_name=worker_name)

        task_history.update_details(persist=True,
                                    details='\nLoading Disk offering')

        for instance in databaseinfra.get_driver().get_database_instances():
            if not HostAttr.objects.filter(
                    host_id=instance.hostname_id).exists():
                continue

            task_history.update_details(persist=True,
                                        details='\nChanging instance {} to '
                                        'NFS {}'.format(
                                            instance, disk_offering))
            if resize_disk(environment=database.environment,
                           host=instance.hostname,
                           disk_offering=disk_offering):
                resized.append(instance)

        task_history.update_details(
            persist=True,
            details='\nUpdate DBaaS metadata from {} to '
            '{}'.format(databaseinfra.disk_offering, disk_offering))
        databaseinfra.disk_offering = disk_offering
        databaseinfra.save()

        task_history.update_status_for(
            status=TaskHistory.STATUS_SUCCESS,
            details='\nDisk resize successfully done.')
        return True

    except Exception as e:
        error = "Disk resize ERROR: {}".format(e)
        LOG.error(error)

        if databaseinfra.disk_offering != old_disk_offering:
            task_history.update_details(persist=True,
                                        details='\nUndo update DBaaS metadata')
            databaseinfra.disk_offering = old_disk_offering
            databaseinfra.save()

        for instance in resized:
            task_history.update_details(
                persist=True,
                details='\nUndo NFS change for instance {}'.format(instance))
            resize_disk(environment=database.environment,
                        host=instance.hostname,
                        disk_offering=old_disk_offering)

        task_history.update_status_for(TaskHistory.STATUS_ERROR, details=error)
    finally:
        AuditRequest.cleanup_request()
Esempio n. 21
0
def resize_database(self,
                    database,
                    cloudstackpack,
                    task_history=None,
                    user=None):
    AuditRequest.new_request("resize_database", user, "localhost")

    try:
        worker_name = get_worker_name()
        task_history = TaskHistory.register(request=self.request,
                                            task_history=task_history,
                                            user=user,
                                            worker_name=worker_name)
        from util.providers import resize_database_instances
        from util import get_credentials_for
        from dbaas_cloudstack.provider import CloudStackProvider
        from dbaas_credentials.models import CredentialType

        cs_credentials = get_credentials_for(
            environment=database.environment,
            credential_type=CredentialType.CLOUDSTACK)
        cs_provider = CloudStackProvider(credentials=cs_credentials)

        databaseinfra = database.databaseinfra
        driver = databaseinfra.get_driver()
        instances = driver.get_slave_instances()
        instances.append(driver.get_master_instance())
        instances_to_resize = []
        resized_instances = []

        disable_zabbix_alarms(database)

        for instance in instances:
            host = instance.hostname
            host_attr = host.cs_host_attributes.get()
            offering_id = cs_provider.get_vm_offering_id(
                vm_id=host_attr.vm_id, project_id=cs_credentials.project)

            if offering_id == cloudstackpack.offering.serviceofferingid:
                LOG.info("Instance offering: {}".format(offering_id))
                resized_instances.append(instance)
            else:
                instances_to_resize.append(instance)

        result = resize_database_instances(database=database,
                                           cloudstackpack=cloudstackpack,
                                           instances=instances_to_resize,
                                           task=task_history)

        if result['created']:
            resized_instances += result['completed_instances']
        else:
            if 'exceptions' not in result:
                error = "Something went wrong."
            else:
                error = "\n".join(
                    ": ".join(err)
                    for err in result['exceptions']['error_codes'])
                traceback = "\nException Traceback\n".join(
                    result['exceptions']['traceback'])

                error = "{}\n{}\n{}".format(error, traceback, error)

        if databaseinfra.plan.is_ha:
            LOG.info("Waiting 60s to check continue...")
            sleep(60)
            instance = driver.get_slave_instances()[0]
            driver.check_replication_and_switch(instance)

        if len(instances) == len(resized_instances):
            from dbaas_cloudstack.models import DatabaseInfraOffering
            LOG.info('Updating offering DatabaseInfra.')

            databaseinfraoffering = DatabaseInfraOffering.objects.get(
                databaseinfra=databaseinfra)
            databaseinfraoffering.offering = cloudstackpack.offering
            databaseinfraoffering.save()

            if databaseinfra.engine.engine_type.name == 'redis':
                new_max_memory = databaseinfraoffering.offering.memory_size_mb
                resize_factor = 0.5
                if new_max_memory > 1024:
                    resize_factor = 0.75

                new_max_memory *= resize_factor
                databaseinfra.per_database_size_mbytes = int(new_max_memory)
                databaseinfra.save()

            task_history.update_status_for(TaskHistory.STATUS_SUCCESS,
                                           details='Resize successfully done.')
            return

        task_history.update_status_for(TaskHistory.STATUS_ERROR, details=error)
        return

    except Exception as e:
        error = "Resize Database ERROR: {}".format(e)
        LOG.error(error)
        task_history.update_status_for(TaskHistory.STATUS_ERROR, details=error)

    finally:
        enable_zabbix_alarms(database)
        AuditRequest.cleanup_request()
Esempio n. 22
0
def clone_database(self,
                   origin_database,
                   clone_name,
                   plan,
                   environment,
                   task_history=None,
                   user=None):
    AuditRequest.new_request("clone_database", user, "localhost")
    try:
        worker_name = get_worker_name()
        LOG.info("id: %s | task: %s | kwargs: %s | args: %s" %
                 (self.request.id, self.request.task, self.request.kwargs,
                  str(self.request.args)))

        task_history = TaskHistory.register(request=self.request,
                                            task_history=task_history,
                                            user=user,
                                            worker_name=worker_name)

        LOG.info("origin_database: %s" % origin_database)

        task_history.update_details(persist=True, details="Loading Process...")
        result = clone_infra(plan=plan,
                             environment=environment,
                             name=clone_name,
                             team=origin_database.team,
                             project=origin_database.project,
                             description=origin_database.description,
                             task=task_history,
                             clone=origin_database,
                             contacts=origin_database.contacts,
                             subscribe_to_email_events=origin_database.
                             subscribe_to_email_events)

        if result['created'] is False:
            if 'exceptions' in result:
                error = "\n\n".join(
                    ": ".join(err)
                    for err in result['exceptions']['error_codes'])
                traceback = "\n\nException Traceback\n".join(
                    result['exceptions']['traceback'])
                error = "{}\n{}".format(error, traceback)
            else:
                error = "There is not any infra-structure to allocate this database."

            task_history.update_status_for(TaskHistory.STATUS_ERROR,
                                           details=error)
            return

        task_history.update_dbid(db=result['database'])
        task_history.update_status_for(
            TaskHistory.STATUS_SUCCESS,
            details='\nDatabase cloned successfully')

    except SoftTimeLimitExceeded:
        LOG.error("task id %s - timeout exceeded" % self.request.id)
        task_history.update_status_for(TaskHistory.STATUS_ERROR,
                                       details="timeout exceeded")
        if 'result' in locals() and result['created']:
            destroy_infra(databaseinfra=result['databaseinfra'],
                          task=task_history)
            return
    except Exception as e:
        traceback = full_stack()
        LOG.error("Ops... something went wrong: %s" % e)
        LOG.error(traceback)

        if 'result' in locals() and result['created']:
            destroy_infra(databaseinfra=result['databaseinfra'],
                          task=task_history)

        task_history.update_status_for(TaskHistory.STATUS_ERROR,
                                       details=traceback)

        return

    finally:
        AuditRequest.cleanup_request()
Esempio n. 23
0
def resize_database(self, database, cloudstackpack, task_history=None, user=None):
    AuditRequest.new_request("resize_database", user, "localhost")

    try:
        worker_name = get_worker_name()
        task_history = TaskHistory.register(request=self.request, task_history=task_history,
                                            user=user, worker_name=worker_name)
        from util.providers import resize_database_instance
        from util.providers import undo_resize_database_instance
        from util import get_credentials_for
        from dbaas_cloudstack.provider import CloudStackProvider
        from dbaas_credentials.models import CredentialType

        cs_credentials = get_credentials_for(environment=database.environment,
                                             credential_type=CredentialType.CLOUDSTACK)
        cs_provider = CloudStackProvider(credentials=cs_credentials)

        databaseinfra = database.databaseinfra
        driver = databaseinfra.get_driver()
        instances = driver.get_database_instances()
        resized_instances = []

        for instance in instances:
            host = instance.hostname
            host_attr = host.cs_host_attributes.get()
            offering = cs_provider.get_vm_offering_id(vm_id=host_attr.vm_id,
                                                      project_id=cs_credentials.project)

            if offering == cloudstackpack.offering:
                LOG.info("Instance offering: {}".format(offering))
                continue

            if databaseinfra.plan.is_ha:
                LOG.info("Waiting 60s to check continue...")
                sleep(60)
                driver.check_replication_and_switch(instance)
                LOG.info("Waiting 60s to check continue...")
                sleep(60)

            result = resize_database_instance(database=database,
                                              cloudstackpack=cloudstackpack,
                                              instance=instance,
                                              task=task_history)
            result = {"created": True}

            if result['created'] == False:
                if 'exceptions' in result:
                    error = "\n".join(": ".join(err)
                                      for err in result['exceptions']['error_codes'])
                    traceback = "\nException Traceback\n".join(
                        result['exceptions']['traceback'])
                    error = "{}\n{}\n{}".format(error, traceback, error)
                else:
                    error = "Something went wrong."

                break

            else:
                resized_instances.append(instance)

        if len(instances) == len(resized_instances):
            from dbaas_cloudstack.models import DatabaseInfraOffering
            LOG.info('Updating offering DatabaseInfra.')

            databaseinfraoffering = DatabaseInfraOffering.objects.get(
                databaseinfra=databaseinfra)
            databaseinfraoffering.offering = cloudstackpack.offering
            databaseinfraoffering.save()

            task_history.update_status_for(TaskHistory.STATUS_SUCCESS,
                                           details='Resize successfully done.')
            return

        for instance in resized_instances:
            if databaseinfra.plan.is_ha:
                if driver.check_instance_is_master(instance):
                    LOG.info("Waiting 60s to check continue...")
                    sleep(60)
                    driver.check_replication_and_switch(instance, attempts=60)
                    LOG.info("Waiting 60s to check continue...")
                    sleep(60)

            undo_resize_database_instance(database=database,
                                          cloudstackpack=cloudstackpack,
                                          instance=instance,
                                          task=task_history)

        task_history.update_status_for(TaskHistory.STATUS_ERROR, details=error)
        return

    except Exception, e:
        error = "Resize Database ERROR: {}".format(e)
        LOG.error(error)
        task_history.update_status_for(TaskHistory.STATUS_ERROR, details=error)
Esempio n. 24
0
def execute_database_region_migration_undo(self,
                                           database_region_migration_detail_id,
                                           task_history=None, user=None):
    AuditRequest.new_request(
        "execute_database_region_migration", user, "localhost")
    try:

        if task_history:
            arguments = task_history.arguments
        else:
            arguments = None

        task_history = TaskHistory.register(request=self.request,
                                            task_history=task_history,
                                            user=user,
                                            worker_name=get_worker_name())

        if arguments:
            task_history.arguments = arguments
            task_history.save()

        database_region_migration_detail = DatabaseRegionMigrationDetail.objects.get(
            id=database_region_migration_detail_id)

        database_region_migration_detail.started_at = datetime.now()
        database_region_migration_detail.status = database_region_migration_detail.RUNNING
        database_region_migration_detail.is_migration_up = False
        database_region_migration_detail.save()

        database_region_migration = database_region_migration_detail.database_region_migration
        database = database_region_migration.database
        databaseinfra = database.databaseinfra
        source_environment = databaseinfra.environment
        target_environment = source_environment.equivalent_environment
        engine = database.engine_type
        steps = get_engine_steps(engine)
        workflow_steps = steps[
            database_region_migration_detail.step].step_classes
        source_instances = []
        source_hosts = []
        for instance in databaseinfra.instances.filter(future_instance__isnull=False):
            source_instances.append(instance)
            if instance.instance_type != instance.REDIS:
                source_hosts.append(instance.hostname)

        target_instances = []
        target_hosts = []
        for instance in databaseinfra.instances.filter(future_instance__isnull=True):
            target_instances.append(instance)
            if instance.instance_type != instance.REDIS:
                target_hosts.append(instance.hostname)

        source_plan = databaseinfra.plan
        target_plan = source_plan.equivalent_plan_id

        if not source_hosts:
            raise Exception('There is no source host')
        if not source_instances:
            raise Exception('There is no source instance')
        if not target_hosts:
            raise Exception('There is no target host')
        if not target_instances:
            raise Exception('There is no target instance')

        source_secondary_ips = DatabaseInfraAttr.objects.filter(databaseinfra=databaseinfra,
                                                                equivalent_dbinfraattr__isnull=False)

        source_secondary_ips = list(source_secondary_ips)

        workflow_dict = build_dict(database_region_migration_detail=database_region_migration_detail,
                                   database_region_migration=database_region_migration,
                                   database=database,
                                   databaseinfra=databaseinfra,
                                   source_environment=source_environment,
                                   target_environment=target_environment,
                                   steps=workflow_steps,
                                   engine=engine,
                                   source_instances=source_instances,
                                   source_plan=source_plan,
                                   target_plan=target_plan,
                                   source_hosts=source_hosts,
                                   target_instances=target_instances,
                                   target_hosts=target_hosts,
                                   source_secondary_ips=source_secondary_ips,
                                   )

        stop_workflow(workflow_dict=workflow_dict, task=task_history)

        current_step = database_region_migration.current_step
        database_region_migration.current_step = current_step - 1
        database_region_migration.save()

        database_region_migration_detail.status = database_region_migration_detail.SUCCESS
        database_region_migration_detail.finished_at = datetime.now()
        database_region_migration_detail.save()

        task_history.update_status_for(
            TaskHistory.STATUS_SUCCESS, details='Database region migration was succesfully')

    except Exception as e:
        traceback = full_stack()
        LOG.error("Ops... something went wrong: %s" % e)
        LOG.error(traceback)

        task_history.update_status_for(
            TaskHistory.STATUS_ERROR, details=traceback)

        database_region_migration_detail.status = database_region_migration_detail.ERROR
        database_region_migration_detail.finished_at = datetime.now()
        database_region_migration_detail.save()

        return

    finally:
        AuditRequest.cleanup_request()
Esempio n. 25
0
def execute_database_region_migration(self,
                                      database_region_migration_detail_id,
                                      task_history=None, user=None):
    AuditRequest.new_request(
        "execute_database_region_migration", user, "localhost")
    try:

        if task_history:
            arguments = task_history.arguments
        else:
            arguments = None

        task_history = TaskHistory.register(request=self.request,
                                            task_history=task_history,
                                            user=user,
                                            worker_name=get_worker_name())

        if arguments:
            task_history.arguments = arguments
            task_history.save()

        database_region_migration_detail = DatabaseRegionMigrationDetail.objects.get(
            id=database_region_migration_detail_id)

        database_region_migration_detail.started_at = datetime.now()
        database_region_migration_detail.status = database_region_migration_detail.RUNNING
        database_region_migration_detail.save()

        database_region_migration = database_region_migration_detail.database_region_migration
        database = database_region_migration.database
        databaseinfra = database.databaseinfra
        source_environment = databaseinfra.environment
        target_environment = source_environment.equivalent_environment
        engine = database.engine_type
        steps = get_engine_steps(engine)
        workflow_steps = steps[
            database_region_migration_detail.step].step_classes
        source_instances = []
        source_hosts = []
        for instance in Instance.objects.filter(databaseinfra=databaseinfra):
            if database_region_migration.current_step > 0 and not instance.future_instance:
                continue
            source_instances.append(instance)
            if instance.instance_type != instance.REDIS:
                source_hosts.append(instance.hostname)

        source_plan = databaseinfra.plan
        target_plan = source_plan.equivalent_plan

        source_offering = databaseinfra.cs_dbinfra_offering.get().offering
        target_offering = source_offering.equivalent_offering

        source_secondary_ips = []

        for secondary_ip in DatabaseInfraAttr.objects.filter(databaseinfra=databaseinfra):
            if database_region_migration.current_step > 0 and\
                    not secondary_ip.equivalent_dbinfraattr:
                continue
            source_secondary_ips.append(secondary_ip)

        workflow_dict = build_dict(
            databaseinfra=databaseinfra,
            database=database,
            source_environment=source_environment,
            target_environment=target_environment,
            steps=workflow_steps,
            source_instances=source_instances,
            source_hosts=source_hosts,
            source_plan=source_plan,
            target_plan=target_plan,
            source_offering=source_offering,
            target_offering=target_offering,
            source_secondary_ips=source_secondary_ips,
        )

        start_workflow(workflow_dict=workflow_dict, task=task_history)

        if workflow_dict['created'] == False:

            if 'exceptions' in workflow_dict:
                error = "\n".join(
                    ": ".join(err) for err in workflow_dict['exceptions']['error_codes'])
                traceback = "\nException Traceback\n".join(
                    workflow_dict['exceptions']['traceback'])
                error = "{}\n{}\n{}".format(error, traceback, error)
            else:
                error = "There is not any infra-structure to allocate this database."

            database_region_migration_detail.status = database_region_migration_detail.ROLLBACK
            database_region_migration_detail.finished_at = datetime.now()
            database_region_migration_detail.save()

            task_history.update_status_for(
                TaskHistory.STATUS_ERROR, details=error)

            return

        else:
            database_region_migration_detail.status = database_region_migration_detail.SUCCESS
            database_region_migration_detail.finished_at = datetime.now()
            database_region_migration_detail.save()

            current_step = database_region_migration.current_step
            database_region_migration.current_step = current_step + 1
            database_region_migration.save()

            task_history.update_status_for(
                TaskHistory.STATUS_SUCCESS, details='Database region migration was succesfully')
            return

    except Exception as e:
        traceback = full_stack()
        LOG.error("Ops... something went wrong: %s" % e)
        LOG.error(traceback)

        database_region_migration_detail.status = database_region_migration_detail.ROLLBACK
        database_region_migration_detail.finished_at = datetime.now()
        database_region_migration_detail.save()

        task_history.update_status_for(
            TaskHistory.STATUS_ERROR, details=traceback)
        return

    finally:
        AuditRequest.cleanup_request()
        pass
Esempio n. 26
0
        return

    except Exception, e:
        traceback = full_stack()
        LOG.error("Ops... something went wrong: %s" % e)
        LOG.error(traceback)

        if 'result' in locals() and result['created']:
            destroy_infra(databaseinfra = result['databaseinfra'], task=task_history)

        task_history.update_status_for(TaskHistory.STATUS_ERROR, details=traceback)
        return

    finally:
        AuditRequest.cleanup_request()



@app.task(bind=True)
def destroy_database(self, database, task_history=None, user=None):
    # register History
    AuditRequest.new_request("destroy_database", user, "localhost")
    try:
        worker_name = get_worker_name()
        task_history = TaskHistory.register(request=self.request, task_history=task_history,
            user=user, worker_name= worker_name)

        LOG.info("id: %s | task: %s | kwargs: %s | args: %s" % (
            self.request.id, self.request.task, self.request.kwargs, str(self.request.args)))