def create(self, request): serializer = self.get_serializer(data=request.DATA, files=request.FILES) if serializer.is_valid(): self.pre_save(serializer.object) data = serializer.restore_fields(request.DATA, request.FILES) task_history = TaskHistory() task_history.task_name = "create_database" task_history.task_status = task_history.STATUS_PENDING task_history.arguments = "Database name: {}".format(data['name']) task_history.save() result = create_database.delay( name=data['name'], plan=data['plan'], environment=data['environment'], team=data['team'], project=data['project'], description=data['description'], subscribe_to_email_events=data['subscribe_to_email_events'], contacts=data['contacts'], task_history=task_history, user=request.user) headers = self.get_success_headers(data) task_url = Site.objects.get_current().domain + \ '/api/task?task_id=%s' % str(result.id) return Response({"task": task_url}, status=status.HTTP_201_CREATED, headers=headers) return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def destroy(self, user): if not self.is_in_quarantine: self.delete() return if self.plan.provider != self.plan.CLOUDSTACK: self.delete() return LOG.debug("call destroy_database - name={}, team={}, project={}, " "user={}".format(self.name, self.team, self.project, user)) from notification.models import TaskHistory from notification.tasks import destroy_database task_history = TaskHistory() task_history.task_name = "destroy_database" task_history.task_status = task_history.STATUS_WAITING task_history.arguments = "Database name: {}".format(self.name) task_history.user = user task_history.save() destroy_database.delay(database=self, task_history=task_history, user=user) return
def upgrade_retry(self, request, database_id): database = Database.objects.get(id=database_id) can_do_upgrade, error = database.can_do_upgrade_retry() if can_do_upgrade: source_plan = database.databaseinfra.plan upgrades = database.upgrades.filter(source_plan=source_plan) last_upgrade = upgrades.last() if not last_upgrade: error = "Database does not have upgrades from {} {}!".format( source_plan.engine.engine_type, source_plan.engine.version) elif not last_upgrade.is_status_error: error = "Cannot do retry, last upgrade status is '{}'!".format( last_upgrade.get_status_display()) else: since_step = last_upgrade.current_step if error: url = reverse('admin:logical_database_change', args=[database.id]) self.message_user(request, error, level=messages.ERROR) return HttpResponseRedirect(url) task_history = TaskHistory() task_history.task_name = "upgrade_database_retry" task_history.task_status = task_history.STATUS_WAITING task_history.arguments = "Retrying upgrade database {}".format( database) task_history.user = request.user task_history.save() upgrade_database.delay(database, request.user, task_history, since_step) url = reverse('admin:notification_taskhistory_changelist') return HttpResponseRedirect(url)
def add_database_instances(self, request, database_id): database = Database.objects.get(id=database_id) #can_do_upgrade, error = database.can_do_upgrade() #if not can_do_upgrade: # url = reverse('admin:logical_database_change', args=[database.id]) # self.message_user(request, error, level=messages.ERROR) # return HttpResponseRedirect(url) url = reverse('admin:notification_taskhistory_changelist') tasks = TaskHistory.objects.filter( arguments__contains=database.name, task_status__in=['RUNNING', 'PENDING', 'WAITING']) if tasks: LOG.info('there is a task') return HttpResponseRedirect(url) LOG.info('it is ok') task_history = TaskHistory() task_history.task_name = "add_database_instances" task_history.task_status = task_history.STATUS_WAITING task_history.arguments = "Adding instances on database {}".format( database) task_history.user = request.user task_history.save() add_instances_to_database.delay(database, request.user, task_history) return HttpResponseRedirect(url)
def create(self, request): serializer = self.get_serializer(data=request.DATA, files=request.FILES) if serializer.is_valid(): self.pre_save(serializer.object) data = serializer.restore_fields(request.DATA, request.FILES) task_history = TaskHistory() task_history.task_name="create_database" task_history.task_status= task_history.STATUS_PENDING task_history.arguments="Database name: {}".format(data['name']) task_history.save() result = create_database.delay(name=data['name'], plan=data['plan'], environment=data['environment'], team=data['team'], project=data['project'], description=data['description'], task_history=task_history, user=request.user) headers = self.get_success_headers(data) task_url = Site.objects.get_current().domain + '/api/task?task_id=%s' % str(result.id) return Response({"task":task_url}, status=status.HTTP_201_CREATED, headers=headers) return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete_model(modeladmin, request, obj): LOG.debug("Deleting {}".format(obj)) database = obj if database.is_in_quarantine: if database.plan.provider == database.plan.CLOUDSTACK: LOG.debug( "call destroy_database - name=%s, team=%s, project=%s, user=%s" % ( database.name, database.team, database.project, request.user)) task_history = TaskHistory() task_history.task_name="destroy_database" task_history.task_status= task_history.STATUS_WAITING task_history.arguments="Database name: {}".format(database.name) task_history.user= request.user task_history.save() destroy_database.delay(database=database, task_history=task_history, user=request.user ) url = reverse('admin:notification_taskhistory_changelist') else: database.delete() else: database.delete()
def delete_model(modeladmin, request, obj): LOG.debug("Deleting {}".format(obj)) database = obj if database.status != Database.ALIVE or not database.database_status.is_alive: modeladmin.message_user( request, "Database {} is not alive and cannot be deleted".format( database.name), level=messages.ERROR) url = reverse('admin:logical_database_changelist') return HttpResponseRedirect(url) if database.is_beeing_used_elsewhere(): modeladmin.message_user( request, "Database {} cannot be deleted because it is in use by another task." .format(database.name), level=messages.ERROR) url = reverse('admin:logical_database_changelist') return HttpResponseRedirect(url) if database.has_migration_started(): modeladmin.message_user( request, "Database {} cannot be deleted because it is beeing migrated.". format(database.name), level=messages.ERROR) url = reverse('admin:logical_database_changelist') return HttpResponseRedirect(url) if database.is_in_quarantine: if database.plan.provider == database.plan.CLOUDSTACK: LOG.debug( "call destroy_database - name=%s, team=%s, project=%s, user=%s" % (database.name, database.team, database.project, request.user)) task_history = TaskHistory() task_history.task_name = "destroy_database" task_history.task_status = task_history.STATUS_WAITING task_history.arguments = "Database name: {}".format( database.name) task_history.user = request.user task_history.save() destroy_database.delay(database=database, task_history=task_history, user=request.user) url = reverse('admin:notification_taskhistory_changelist') else: database.delete() else: database.delete()
def register_task(self): task_history = TaskHistory() task_history.task_id = datetime.now().strftime("%Y%m%d%H%M%S") task_history.task_name = "switch_masters_in_zone" task_history.relevance = TaskHistory.RELEVANCE_WARNING task_history.task_status = TaskHistory.STATUS_RUNNING task_history.context = {'hostname': gethostname()} task_history.user = '******' task_history.save() return task_history
def mongodb_engine_version_upgrade(self, request, database_id): from notification.tasks import upgrade_mongodb_24_to_30 url = reverse('admin:logical_database_change', args=[database_id]) database = Database.objects.get(id=database_id) if database.is_in_quarantine: self.message_user(request, "Database in quarantine and cannot be upgraded!", level=messages.ERROR) return HttpResponseRedirect(url) if database.status != Database.ALIVE or not database.database_status.is_alive: self.message_user(request, "Database is dead and cannot be upgraded!", level=messages.ERROR) return HttpResponseRedirect(url) if database.has_flipperfox_migration_started(): self.message_user( request, "Database {} is being migrated and cannot be upgraded!".format( database.name), level=messages.ERROR) return HttpResponseRedirect(url) if not database.is_mongodb_24: self.message_user( request, "Database {} cannot be upgraded. Please contact you DBA". format(database.name), level=messages.ERROR) return HttpResponseRedirect(url) if not request.user.has_perm(constants.PERM_UPGRADE_MONGO24_TO_30): self.message_user( request, "You have no permissions to upgrade {}. Please, contact your DBA" .format(database.name), level=messages.ERROR) return HttpResponseRedirect(url) task_history = TaskHistory() task_history.task_name = "upgrade_mongodb_24_to_30" task_history.task_status = task_history.STATUS_WAITING task_history.arguments = "Upgrading MongoDB 2.4 to 3.0" task_history.user = request.user task_history.save() upgrade_mongodb_24_to_30.delay(database=database, user=request.user, task_history=task_history) url = reverse('admin:notification_taskhistory_changelist') return HttpResponseRedirect(url)
def databaseregionmigration_view(self, request, databaseregionmigration_id): form = DatabaseRegionMigrationDetailForm database_region_migration = DatabaseRegionMigration.objects.get( id=databaseregionmigration_id) if request.method == 'POST': form = DatabaseRegionMigrationDetailForm(request.POST) if form.is_valid(): scheduled_for = form.cleaned_data['scheduled_for'] database_region_migration_detail = DatabaseRegionMigrationDetail( database_region_migration=database_region_migration, step=database_region_migration.current_step, scheduled_for=scheduled_for, created_by=request.user.username) database_region_migration_detail.save() task_history = TaskHistory() task_history.task_name = "execute_database_region_migration" task_history.task_status = task_history.STATUS_WAITING description = database_region_migration.description() task_history.arguments = "Database name: {},\ Macro step: {}".format(database_region_migration.database.name, description) task_history.user = request.user task_history.save() is_rollback = request.GET.get('rollback') scheduled_for.replace( tzinfo=tz.tzlocal()).astimezone(tz.tzutc()) if is_rollback: LOG.info("Rollback!") database_region_migration_detail.step -= 1 database_region_migration_detail.save() task = execute_database_region_migration_undo.apply_async(args=[database_region_migration_detail.id, task_history, request.user], eta=scheduled_for) else: task = execute_database_region_migration.apply_async(args=[database_region_migration_detail.id, task_history, request.user], eta=scheduled_for) database_region_migration_detail.celery_task_id = task.task_id database_region_migration_detail.save() url = reverse('admin:notification_taskhistory_changelist') return HttpResponseRedirect(url + "?user=%s" % request.user.username) return render_to_response("region_migration/databaseregionmigrationdetail/schedule_next_step.html", locals(), context_instance=RequestContext(request))
def restore_snapshot(self, request, database_id): database = Database.objects.get(id=database_id) url = reverse('admin:logical_database_change', args=[database.id]) if database.is_in_quarantine: self.message_user( request, "Database in quarantine and cannot be restored", level=messages.ERROR) return HttpResponseRedirect(url) if database.status != Database.ALIVE or not database.database_status.is_alive: self.message_user( request, "Database is dead and cannot be restored", level=messages.ERROR) return HttpResponseRedirect(url) if database.is_beeing_used_elsewhere(): self.message_user( request, "Database is beeing used by another task, please check your tasks", level=messages.ERROR) return HttpResponseRedirect(url) if database.has_migration_started(): self.message_user( request, "Database {} cannot be restored because it is beeing migrated.".format(database.name), level=messages.ERROR) url = reverse('admin:logical_database_changelist') return HttpResponseRedirect(url) form = None if request.method == 'POST': form = RestoreDatabaseForm( request.POST, initial={"database_id": database_id},) if form.is_valid(): target_snapshot = request.POST.get('target_snapshot') task_history = TaskHistory() task_history.task_name = "restore_snapshot" task_history.task_status = task_history.STATUS_WAITING task_history.arguments = "Restoring {} to an older version.".format( database.name) task_history.user = request.user task_history.save() Database.recover_snapshot(database=database, snapshot=target_snapshot, user=request.user, task_history=task_history.id) url = reverse('admin:notification_taskhistory_changelist') return HttpResponseRedirect(url + "?user=%s" % request.user.username) else: form = RestoreDatabaseForm(initial={"database_id": database_id, }) return render_to_response("logical/database/restore.html", locals(), context_instance=RequestContext(request))
def post(self, request, *args, **kwargs): from dbaas_services.analyzing.tasks import analyze_databases task_history = TaskHistory() task_history.task_name = "analyze_databases" task_history.task_status = task_history.STATUS_WAITING task_history.arguments = "Waiting to start" task_history.save() analyze_databases.delay(task_history=task_history) url = reverse('admin:notification_taskhistory_changelist') return HttpResponseRedirect(url)
def resize(cls, database, cloudstackpack, user): from notification.tasks import resize_database from notification.models import TaskHistory task_history = TaskHistory() task_history.task_name = "resize_database" task_history.task_status = task_history.STATUS_WAITING task_history.arguments = "Database name: {}".format(database.name) task_history.user = user task_history.save() resize_database.delay(database=database, cloudstackpack=cloudstackpack, user=user, task_history=task_history )
def delete_model(modeladmin, request, obj): LOG.debug("Deleting {}".format(obj)) database = obj if database.status != Database.ALIVE or not database.database_status.is_alive: modeladmin.message_user( request, "Database {} is not alive and cannot be deleted".format(database.name), level=messages.ERROR) url = reverse('admin:logical_database_changelist') return HttpResponseRedirect(url) if database.is_beeing_used_elsewhere(): modeladmin.message_user( request, "Database {} cannot be deleted because it is in use by another task.".format(database.name), level=messages.ERROR) url = reverse('admin:logical_database_changelist') return HttpResponseRedirect(url) if database.has_migration_started(): modeladmin.message_user( request, "Database {} cannot be deleted because it is beeing migrated.".format(database.name), level=messages.ERROR) url = reverse('admin:logical_database_changelist') return HttpResponseRedirect(url) if database.is_in_quarantine: if database.plan.provider == database.plan.CLOUDSTACK: LOG.debug( "call destroy_database - name=%s, team=%s, project=%s, user=%s" % ( database.name, database.team, database.project, request.user)) task_history = TaskHistory() task_history.task_name = "destroy_database" task_history.task_status = task_history.STATUS_WAITING task_history.arguments = "Database name: {}".format( database.name) task_history.user = request.user task_history.save() destroy_database.delay(database=database, task_history=task_history, user=request.user ) url = reverse('admin:notification_taskhistory_changelist') else: database.delete() else: database.delete()
def register_task(self, database): task_history = TaskHistory() task_history.task_id = datetime.now().strftime("%Y%m%d%H%M%S") task_history.task_name = self.TASK_NAME task_history.relevance = TaskHistory.RELEVANCE_WARNING task_history.task_status = TaskHistory.STATUS_RUNNING task_history.context = {'hostname': gethostname()} task_history.user = '******' task_history.db_id = database.id task_history.object_class = "logical_database" task_history.object_id = database.id task_history.database_name = database.name task_history.arguments = 'Database_name: {}'.format(database.name) task_history.save() return task_history
def resize(cls, database, cloudstackpack, user): from notification.tasks import resize_database from notification.models import TaskHistory task_history = TaskHistory() task_history.task_name = "resize_database" task_history.task_status = task_history.STATUS_WAITING task_history.arguments = "Database name: {}".format(database.name) task_history.user = user task_history.save() resize_database.delay(database=database, cloudstackpack=cloudstackpack, user=user, task_history=task_history)
def restore(cls, database, snapshot, user): from notification.models import TaskHistory task_history = TaskHistory() task_history.task_name = "restore_snapshot" task_history.task_status = task_history.STATUS_WAITING task_history.arguments = "Restoring {} to an older version.".format( database.name) task_history.user = user task_history.save() Database.recover_snapshot(database=database, snapshot=snapshot, user=user, task_history=task_history.id)
def clone(cls, database, clone_name, plan, environment, user): from notification.tasks import clone_database from notification.models import TaskHistory task_history = TaskHistory() task_history.task_name = "clone_database" task_history.task_status = task_history.STATUS_WAITING task_history.arguments = "Database name: {}".format(database.name) task_history.user = user task_history.save() clone_database.delay(origin_database=database, clone_name=clone_name, plan=plan, environment=environment, user=user, task_history=task_history )
def clone(cls, database, clone_name, plan, environment, user): from notification.tasks import clone_database from notification.models import TaskHistory task_history = TaskHistory() task_history.task_name = "clone_database" task_history.task_status = task_history.STATUS_WAITING task_history.arguments = "Database name: {}".format(database.name) task_history.user = user task_history.save() clone_database.delay(origin_database=database, clone_name=clone_name, plan=plan, environment=environment, user=user, task_history=task_history)
def disk_resize(cls, database, new_disk_offering, user): from notification.tasks import database_disk_resize from notification.models import TaskHistory from physical.models import DiskOffering task_history = TaskHistory() task_history.task_name = "database_disk_resize" task_history.task_status = task_history.STATUS_WAITING task_history.arguments = "Database name: {}".format(database.name) task_history.user = user task_history.save() disk_offering = DiskOffering.objects.get(id=new_disk_offering) database_disk_resize.delay(database=database, disk_offering=disk_offering, user=user, task_history=task_history)
def upgrade(self, request, database_id): database = Database.objects.get(id=database_id) can_do_upgrade, error = database.can_do_upgrade() if not can_do_upgrade: url = reverse('admin:logical_database_change', args=[database.id]) self.message_user(request, error, level=messages.ERROR) return HttpResponseRedirect(url) task_history = TaskHistory() task_history.task_name = "upgrade_database" task_history.task_status = task_history.STATUS_WAITING task_history.arguments = "Upgrading database {}".format(database) task_history.user = request.user task_history.save() upgrade_database.delay(database, request.user, task_history) url = reverse('admin:notification_taskhistory_changelist') return HttpResponseRedirect(url)
def databaseregionmigration_view(self, request, databaseregionmigration_id): database_region_migration = DatabaseRegionMigration.objects.get(id=databaseregionmigration_id) database_region_migration_detail = DatabaseRegionMigrationDetail( database_region_migration=database_region_migration, step = database_region_migration.next_step, scheduled_for = datetime.now(), created_by = request.user.username) database_region_migration_detail.save() task_history = TaskHistory() task_history.task_name = "execute_database_region_migration" task_history.task_status = task_history.STATUS_WAITING task_history.arguments = "Database name: {}, Step: {}".format(database_region_migration.database.name, database_region_migration_detail.database_region_migration.next_step_description()) task_history.user = request.user task_history.save() task = execute_database_region_migration.apply_async(args=[database_region_migration_detail.id, task_history, request.user], countdown=1) url = reverse('admin:notification_taskhistory_changelist') return HttpResponseRedirect(url + "?user=%s" % request.user.username) # Redirect after POST #return HttpResponseRedirect(request.META.get('HTTP_REFERER')) #databaseregionmigration = DatabaseRegionMigration.objects.get(id=databaseregionmigration_id) #if request.method == 'POST': # form = DatabaseRegionMigrationDetailForm(request.POST) # return HttpResponseRedirect(url + "?user=%s" % request.user.username) #print locals() #url = reverse('admin:schedulenextstep', args=[databaseregionmigration.id]) #form = DatabaseRegionMigrationDetailForm(initial={"database_region_migration": databaseregionmigration}) form = None return render_to_response("region_migration/databaseregionmigrationdetail/schedule_next_step.html", locals(), context_instance=RequestContext(request))
def mongodb_engine_version_upgrade(self, request, database_id): from notification.tasks import upgrade_mongodb_24_to_30 database = Database.objects.get(id=database_id) url = reverse('admin:logical_database_change', args=[database_id]) if database.is_in_quarantine: self.message_user( request, "Database in quarantine and cannot be upgraded!", level=messages.ERROR) return HttpResponseRedirect(url) if database.status != Database.ALIVE or not database.database_status.is_alive: self.message_user( request, "Database is dead and cannot be upgraded!", level=messages.ERROR) return HttpResponseRedirect(url) if database.has_migration_started(): self.message_user( request, "Database {} is beeing migrated and cannot be upgraded!".format(database.name), level=messages.ERROR) return HttpResponseRedirect(url) if not database.is_mongodb_24: self.message_user( request, "Database {} cannot be upgraded, please contact you DBA.".format(database.name), level=messages.ERROR) return HttpResponseRedirect(url) task_history = TaskHistory() task_history.task_name = "upgrade_mongodb_24_to_30" task_history.task_status = task_history.STATUS_WAITING task_history.arguments = "Upgrading MongoDB 2.4 to 3.0" task_history.user = request.user task_history.save() upgrade_mongodb_24_to_30.delay(database=database, user=request.user, task_history=task_history) url = reverse('admin:notification_taskhistory_changelist') return HttpResponseRedirect(url)
def add_view(self, request, form_url='', extra_context=None): self.form = DatabaseForm try: if request.method == 'POST': teams = Team.objects.filter(users=request.user) LOG.info("user %s teams: %s" % (request.user, teams)) if not teams: self.message_user(request, self.database_add_perm_message, level=messages.ERROR) return HttpResponseRedirect(reverse('admin:logical_database_changelist')) # if no team is specified and the user has only one team, then set it to the database if teams.count() == 1 and request.method == 'POST' and not request.user.has_perm( self.perm_add_database_infra): post_data = request.POST.copy() if 'team' in post_data: post_data['team'] = u"%s" % teams[0].pk request.POST = post_data form = DatabaseForm(request.POST) if not form.is_valid(): return super(DatabaseAdmin, self).add_view(request, form_url, extra_context=extra_context) LOG.debug( "call create_database - name=%s, plan=%s, environment=%s, team=%s, project=%s, description=%s, user=%s" % ( form.cleaned_data['name'], form.cleaned_data['plan'], form.cleaned_data['environment'], form.cleaned_data['team'], form.cleaned_data['project'], form.cleaned_data['description'], request.user)) task_history = TaskHistory() task_history.task_name="create_database" task_history.task_status= task_history.STATUS_WAITING task_history.arguments="Database name: {}".format(form.cleaned_data['name']) task_history.user= request.user task_history.save() create_database.delay(name=form.cleaned_data['name'], plan=form.cleaned_data['plan'], environment=form.cleaned_data['environment'], team=form.cleaned_data['team'], project=form.cleaned_data['project'], description=form.cleaned_data['description'], task_history=task_history, user=request.user) url = reverse('admin:notification_taskhistory_changelist') return HttpResponseRedirect(url + "?user=%s" % request.user.username) # Redirect after POST else: return super(DatabaseAdmin, self).add_view(request, form_url, extra_context=extra_context) except DatabaseAlreadyExists: self.message_user(request, _( 'An inconsistency was found: The database "%s" already exists in infra-structure but not in DBaaS.') % request.POST['name'], level=messages.ERROR) request.method = 'GET' return super(DatabaseAdmin, self).add_view(request, form_url, extra_context=extra_context)
def restore_snapshot(self, request, database_id): database = Database.objects.get(id=database_id) url = reverse('admin:logical_database_change', args=[database.id]) if database.is_in_quarantine: self.message_user(request, "Database in quarantine and cannot be restored", level=messages.ERROR) return HttpResponseRedirect(url) if database.status != Database.ALIVE or not database.database_status.is_alive: self.message_user(request, "Database is dead and cannot be restored", level=messages.ERROR) return HttpResponseRedirect(url) if database.is_beeing_used_elsewhere(): self.message_user( request, "Database is beeing used by another task, please check your tasks", level=messages.ERROR) return HttpResponseRedirect(url) if database.has_migration_started(): self.message_user( request, "Database {} cannot be restored because it is beeing migrated." .format(database.name), level=messages.ERROR) url = reverse('admin:logical_database_changelist') return HttpResponseRedirect(url) form = None if request.method == 'POST': form = RestoreDatabaseForm( request.POST, initial={"database_id": database_id}, ) if form.is_valid(): target_snapshot = request.POST.get('target_snapshot') task_history = TaskHistory() task_history.task_name = "restore_snapshot" task_history.task_status = task_history.STATUS_WAITING task_history.arguments = "Restoring {} to an older version.".format( database.name) task_history.user = request.user task_history.save() Database.recover_snapshot(database=database, snapshot=target_snapshot, user=request.user, task_history=task_history.id) url = reverse('admin:notification_taskhistory_changelist') return HttpResponseRedirect(url + "?user=%s" % request.user.username) else: form = RestoreDatabaseForm(initial={ "database_id": database_id, }) return render_to_response("logical/database/restore.html", locals(), context_instance=RequestContext(request))
if any(plan): dbaas_plan = Plan.objects.get(pk=plan[0]['pk']) else: msg = "Plan was not found" return log_and_response(msg=msg, http_status=status.HTTP_500_INTERNAL_SERVER_ERROR) try: dbaas_environment = Environment.objects.get(name= env) except(ObjectDoesNotExist,IndexError), e: msg = "Environment does not exist." return log_and_response(msg=msg, http_status=status.HTTP_500_INTERNAL_SERVER_ERROR) task_history = TaskHistory() task_history.task_name="create_database" task_history.arguments="Database name: {}".format(name) task_history.save() create_database.delay(name=name, plan=dbaas_plan, environment=dbaas_environment, team=dbaas_team,project=None, description='Database from Tsuru', task_history=task_history, user=dbaas_user) return Response(status=status.HTTP_201_CREATED,) class ServiceRemove(APIView): renderer_classes = (JSONRenderer, JSONPRenderer) model = Database def delete(self, request, database_name, format=None): env = get_url_env(request) try:
def region_migration_start(self, infra, instances, since_step=None): steps = [{ 'Disable monitoring and alarms': ( 'workflow.steps.util.zabbix.DestroyAlarms', 'workflow.steps.util.db_monitor.DisableMonitoring', )}, { 'Stopping infra': ( 'workflow.steps.util.database.Stop', 'workflow.steps.util.database.CheckIsDown', )}, { 'Creating new virtual machine': ( 'workflow.steps.util.vm.MigrationCreateNewVM', )}, { 'Creating new infra': ( 'workflow.steps.util.vm.MigrationWaitingBeReady', 'workflow.steps.util.infra.MigrationCreateInstance', 'workflow.steps.util.disk.MigrationCreateExport', )}, { 'Configuring new infra': ( 'workflow.steps.util.volume_provider.MountDataVolume', 'workflow.steps.util.plan.InitializationMigration', 'workflow.steps.util.plan.ConfigureMigration', 'workflow.steps.util.metric_collector.ConfigureTelegraf', )}, { 'Preparing new environment': ( 'workflow.steps.util.disk.AddDiskPermissionsOldest', 'workflow.steps.util.disk.MountOldestExportMigration', 'workflow.steps.util.disk.CopyDataBetweenExportsMigration', 'workflow.steps.util.disk.FilePermissionsMigration', 'workflow.steps.util.disk.UnmountNewerExportMigration', 'workflow.steps.util.vm.ChangeInstanceHost', 'workflow.steps.util.vm.UpdateOSDescription', 'workflow.steps.util.infra.OfferingMigration', 'workflow.steps.util.infra.UpdateMigrateEnvironment', 'workflow.steps.util.infra.UpdateMigratePlan', )}, { 'Starting new infra': ( 'workflow.steps.util.database.Start', 'workflow.steps.util.database.CheckIsUp', 'workflow.steps.util.metric_collector.RestartTelegraf', )}, { 'Enabling access': ( 'workflow.steps.util.dns.ChangeEndpoint', 'workflow.steps.util.acl.ReplicateAclsMigration', )}, { 'Destroying old infra': ( 'workflow.steps.util.disk.DisableOldestExportMigration', 'workflow.steps.util.disk.DiskUpdateHost', 'workflow.steps.util.vm.RemoveHostMigration', )}, { 'Enabling monitoring and alarms': ( 'workflow.steps.util.db_monitor.EnableMonitoring', 'workflow.steps.util.zabbix.CreateAlarms', )}, { 'Restart replication': ( 'workflow.steps.util.database.SetSlavesMigration', ) }] task = TaskHistory() task.task_id = self.request.id task.task_name = "migrating_zone" task.task_status = TaskHistory.STATUS_RUNNING task.context = {'infra': infra, 'instances': instances} task.arguments = {'infra': infra, 'instances': instances} task.user = '******' task.save() if steps_for_instances(steps, instances, task, since_step=since_step): task.set_status_success('Region migrated with success') else: task.set_status_error('Could not migrate region') database = infra.databases.first() database.environment = infra.environment database.save()
class Command(BaseCommand): help = "Check if all Tasks with status running are in celery" option_list = BaseCommand.option_list + (make_option( "-n", "--celery_hosts", dest="celery_hosts", help="Number of celery hosts", type="int", ), ) def __init__(self): super(Command, self).__init__() self.task = TaskHistory() self.task.task_id = datetime.datetime.now().strftime("%Y%m%d%H%M%S") self.task.task_name = "sync_celery_tasks" self.task.task_status = TaskHistory.STATUS_RUNNING self.task.context = {'hostname': socket.gethostname()} self.task.save() self.task.add_detail('Syncing metadata tasks with celery tasks') def handle(self, *args, **kwargs): self.task.arguments = {'args': args, 'kwargs': kwargs} if not kwargs['celery_hosts']: raise CommandError("Please specified the --celery_hosts count") try: tasks_with_problem = self.check_tasks(kwargs['celery_hosts']) except CeleryActivesNodeError as celery_error: self.task.update_status_for( TaskHistory.STATUS_WARNING, 'Could not check celery tasks.\n{}{}'.format( full_stack(), celery_error)) return except Exception as e: self.task.update_status_for( TaskHistory.STATUS_ERROR, 'Could not execute task.\n{}{}'.format(full_stack(), e)) return problems = len(tasks_with_problem) status = TaskHistory.STATUS_SUCCESS if problems > 0: status = TaskHistory.STATUS_WARNING self.task.update_status_for(status, 'Problems: {}'.format(problems)) def check_tasks(self, celery_hosts): tasks_running = TaskHistory.objects.filter( task_status=TaskHistory.STATUS_RUNNING).exclude(id=self.task.id) self.task.add_detail("\nTasks with status running: {}\n".format( len(tasks_running))) celery_tasks = self.get_celery_active_tasks(celery_hosts) self.task.add_detail("Celery running: {}\n".format(len(celery_tasks))) tasks_with_problem = [] self.task.add_detail("Checking tasks status") for task in tasks_running: self.task.add_detail("{} - {}".format(task.task_id, task.task_name), level=1) task = TaskHistory.objects.get(id=task.id) if task.is_running and task.task_id in celery_tasks: self.task.add_detail("OK: Running in celery", level=2) continue tasks_with_problem.append(task) self.task.add_detail("ERROR: Not running in celery", level=2) self.task.add_detail("Setting task to ERROR status", level=3) task.update_status_for(status=TaskHistory.STATUS_ERROR, details="Celery is not running this task") database_upgrade = task.database_upgrades.first() if database_upgrade: self.task.add_detail( "Setting database upgrade {} status to ERROR".format( database_upgrade.id), level=3) database_upgrade.set_error() return tasks_with_problem def get_celery_active_tasks(self, expected_hosts): self.task.add_detail('Collecting celery tasks...') actives = app.control.inspect().active() activated_hosts = [] if actives: activated_hosts = actives.keys() if len(activated_hosts) != expected_hosts: raise CeleryActivesNodeError(expected_hosts, activated_hosts) active_tasks = [] for host, tasks in actives.items(): self.task.add_detail('Host {} tasks:'.format(host), level=1) for task in tasks: task_id = task['id'] self.task.add_detail('{}'.format(task_id), level=2) active_tasks.append(task_id) return active_tasks
class Command(BaseCommand): help = "Check if all Tasks with status running are in celery" option_list = BaseCommand.option_list + ( make_option( "-n", "--celery_hosts", dest="celery_hosts", help="Number of celery hosts", type="int", ), ) def __init__(self): super(Command, self).__init__() self.task = TaskHistory() self.task.task_id = datetime.datetime.now().strftime("%Y%m%d%H%M%S") self.task.task_name = "sync_celery_tasks" self.task.relevance = TaskHistory.RELEVANCE_WARNING self.task.task_status = TaskHistory.STATUS_RUNNING self.task.context = {'hostname': socket.gethostname()} self.task.save() self.task.add_detail('Syncing metadata tasks with celery tasks') self.unique_tasks = [{ 'name': 'backup.tasks.make_databases_backup', 'unique_key': 'makedatabasebackupkey' }] self._redis_conn = None @property def redis_conn(self): if not self._redis_conn: self._redis_conn = Redis( host=settings.REDIS_HOST, port=settings.REDIS_PORT, password=settings.REDIS_PASSWORD ) return self._redis_conn def handle(self, *args, **kwargs): self.task.arguments = {'args': args, 'kwargs': kwargs} if not kwargs['celery_hosts']: raise CommandError("Please specified the --celery_hosts count") try: tasks_with_problem = self.check_tasks(kwargs['celery_hosts']) except CeleryActivesNodeError as celery_error: self.task.update_status_for( TaskHistory.STATUS_WARNING, 'Could not check celery tasks.\n{}{}'.format( full_stack(), celery_error ) ) return except Exception as e: self.task.update_status_for( TaskHistory.STATUS_ERROR, 'Could not execute task.\n{}{}'.format(full_stack(), e) ) return problems = len(tasks_with_problem) status = TaskHistory.STATUS_SUCCESS if problems > 0: status = TaskHistory.STATUS_WARNING self.task.update_status_for(status, 'Problems: {}'.format(problems)) self.check_unique_keys() def check_unique_keys(self): for unique_task in self.unique_tasks: task_running = TaskHistory.objects.filter( task_status='RUNNING', task_name=unique_task['name'] ) if not task_running: unique_key = unique_task['unique_key'] if unique_key in self.redis_conn.keys(): self.redis_conn.delete(unique_key) def check_tasks(self, celery_hosts): tasks_running = TaskHistory.objects.filter( task_status=TaskHistory.STATUS_RUNNING ).exclude( id=self.task.id ) self.task.add_detail( "\nTasks with status running: {}\n".format(len(tasks_running)) ) celery_tasks = self.get_celery_active_tasks(celery_hosts) self.task.add_detail("Celery running: {}\n".format(len(celery_tasks))) tasks_with_problem = [] self.task.add_detail("Checking tasks status") for task in tasks_running: self.task.add_detail( "{} - {}".format(task.task_id, task.task_name), level=1 ) task = TaskHistory.objects.get(id=task.id) if not task.is_running: self.task.add_detail( "OK: Tasks was finished with {}".format(task.task_status), level=2 ) continue if task.task_id in celery_tasks: self.task.add_detail("OK: Running in celery", level=2) continue tasks_with_problem.append(task) self.task.add_detail("ERROR: Not running in celery", level=2) self.task.add_detail("Setting task to ERROR status", level=3) task.update_status_for( status=TaskHistory.STATUS_ERROR, details="Celery is not running this task" ) database_upgrade = task.database_upgrades.first() if database_upgrade: self.task.add_detail( "Setting database upgrade {} status to ERROR".format( database_upgrade.id ), level=3 ) database_upgrade.set_error() database_resize = task.database_resizes.first() if database_resize: self.task.add_detail( "Setting database resize {} status to ERROR".format( database_resize.id ), level=3 ) database_resize.set_error() database_create = task.create_database.first() if database_create: self.task.add_detail( "Setting database create {} status to ERROR".format( database_create.id ), level=3 ) database_create.set_error() database_restore = task.database_restore.first() if database_restore: self.task.add_detail( "Setting database restore {} status to ERROR".format( database_restore.id ), level=3 ) database_restore.set_error() return tasks_with_problem def get_celery_active_tasks(self, expected_hosts): self.task.add_detail('Collecting celery tasks...') actives = app.control.inspect().active() activated_hosts = [] if actives: activated_hosts = actives.keys() if len(activated_hosts) != expected_hosts: raise CeleryActivesNodeError(expected_hosts, activated_hosts) active_tasks = [] for host, tasks in actives.items(): self.task.add_detail('Host {} tasks:'.format(host), level=1) for task in tasks: task_id = task['id'] self.task.add_detail('{}'.format(task_id), level=2) active_tasks.append(task_id) return active_tasks
def region_migration_start(self, infra, instances, since_step=None): steps = [{ 'Disable monitoring and alarms': ( 'workflow.steps.util.zabbix.DestroyAlarms', 'workflow.steps.util.db_monitor.DisableMonitoring', ) }, { 'Stopping infra': ( 'workflow.steps.util.database.Stop', 'workflow.steps.util.database.CheckIsDown', ) }, { 'Creating new virtual machine': ('workflow.steps.util.vm.MigrationCreateNewVM', ) }, { 'Creating new infra': ( 'workflow.steps.util.vm.MigrationWaitingBeReady', 'workflow.steps.util.infra.MigrationCreateInstance', 'workflow.steps.util.disk.MigrationCreateExport', ) }, { 'Configuring new infra': ( 'workflow.steps.util.volume_provider.MountDataVolume', 'workflow.steps.util.plan.InitializationMigration', 'workflow.steps.util.plan.ConfigureMigration', 'workflow.steps.util.plan.ConfigureLog', 'workflow.steps.util.metric_collector.ConfigureTelegraf', ) }, { 'Preparing new environment': ( 'workflow.steps.util.disk.AddDiskPermissionsOldest', 'workflow.steps.util.disk.MountOldestExportMigration', 'workflow.steps.util.disk.CopyDataBetweenExportsMigration', 'workflow.steps.util.disk.FilePermissionsMigration', 'workflow.steps.util.disk.UnmountNewerExportMigration', 'workflow.steps.util.vm.ChangeInstanceHost', 'workflow.steps.util.vm.UpdateOSDescription', 'workflow.steps.util.infra.OfferingMigration', 'workflow.steps.util.infra.UpdateMigrateEnvironment', 'workflow.steps.util.infra.UpdateMigratePlan', ) }, { 'Starting new infra': ( 'workflow.steps.util.database.Start', 'workflow.steps.util.database.CheckIsUp', 'workflow.steps.util.metric_collector.RestartTelegraf', ) }, { 'Enabling access': ( 'workflow.steps.util.dns.ChangeEndpoint', 'workflow.steps.util.acl.ReplicateAclsMigration', ) }, { 'Destroying old infra': ( 'workflow.steps.util.disk.DisableOldestExportMigration', 'workflow.steps.util.disk.DiskUpdateHost', 'workflow.steps.util.vm.RemoveHostMigration', ) }, { 'Enabling monitoring and alarms': ( 'workflow.steps.util.db_monitor.EnableMonitoring', 'workflow.steps.util.zabbix.CreateAlarms', ) }, { 'Restart replication': ('workflow.steps.util.database.SetSlavesMigration', ) }] task = TaskHistory() task.task_id = self.request.id task.task_name = "migrating_zone" task.task_status = TaskHistory.STATUS_RUNNING task.context = {'infra': infra, 'instances': instances} task.arguments = {'infra': infra, 'instances': instances} task.user = '******' task.save() if steps_for_instances(steps, instances, task, since_step=since_step): task.set_status_success('Region migrated with success') else: task.set_status_error('Could not migrate region') database = infra.databases.first() database.environment = infra.environment database.save()
plans = get_plans_dict(hard_plans) plan = [splan for splan in plans if splan['name'] == plan] LOG.info("Plan: {}".format(plan)) if any(plan): dbaas_plan = Plan.objects.get(pk=plan[0]['pk']) else: msg = "Plan was not found" return log_and_response( msg=msg, http_status=status.HTTP_500_INTERNAL_SERVER_ERROR) task_history = TaskHistory() task_history.task_name = "create_database" task_history.arguments = "Database name: {}".format(name) task_history.save() create_database.delay(name=name, plan=dbaas_plan, environment=dbaas_environment, team=dbaas_team, project=None, description='Database from Tsuru', task_history=task_history, user=dbaas_user) return Response(status=status.HTTP_201_CREATED, ) class ServiceRemove(APIView): renderer_classes = (JSONRenderer, JSONPRenderer)
def post(self, request, format=None): data = request.DATA name = data['name'] user = data['user'] team = data['team'] env = get_url_env(request) try: description = data['description'] if not description: raise Exception("A description must be provided") except Exception as e: msg = "A description must be provided." return log_and_response( msg=msg, http_status=status.HTTP_500_INTERNAL_SERVER_ERROR) name_regexp = re.compile('^[a-z][a-z0-9_]+$') if name_regexp.match(name) is None: msg = "Your database name must match /^[a-z][a-z0-9_]+$/ ." return log_and_response( msg=msg, http_status=status.HTTP_500_INTERNAL_SERVER_ERROR) try: Database.objects.get(name=name, environment__name=env) msg = "There is already a database called {} in {}.".format( name, env) return log_and_response( msg=msg, http_status=status.HTTP_500_INTERNAL_SERVER_ERROR) except ObjectDoesNotExist: pass if database_name_evironment_constraint(name, env): msg = "{} already exists in production!".format(name) return log_and_response( msg=msg, http_status=status.HTTP_500_INTERNAL_SERVER_ERROR) try: dbaas_user = AccountUser.objects.get(email=user) except ObjectDoesNotExist as e: msg = "User does not exist." return log_and_response( msg=msg, e=e, http_status=status.HTTP_500_INTERNAL_SERVER_ERROR) try: dbaas_team = Team.objects.get(name=team) except ObjectDoesNotExist as e: msg = "Team does not exist." return log_and_response( msg=msg, e=e, http_status=status.HTTP_500_INTERNAL_SERVER_ERROR) try: dbaas_user.team_set.get(name=dbaas_team.name) except ObjectDoesNotExist as e: msg = "The user is not on {} team.".format(dbaas_team.name) return log_and_response( msg=msg, e=e, http_status=status.HTTP_500_INTERNAL_SERVER_ERROR) try: dbaas_environment = Environment.objects.get(name=env) except (ObjectDoesNotExist) as e: msg = "Environment does not exist." return log_and_response( msg=msg, http_status=status.HTTP_500_INTERNAL_SERVER_ERROR) databases_used_by_team = dbaas_team.count_databases_in_use( environment=dbaas_environment) database_alocation_limit = dbaas_team.database_alocation_limit if databases_used_by_team >= database_alocation_limit: msg = "The database alocation limit of {} has been exceeded for the selected team: {}".format( database_alocation_limit, dbaas_team) return log_and_response( msg=msg, http_status=status.HTTP_500_INTERNAL_SERVER_ERROR) if 'plan' not in data: msg = "Plan was not found" return log_and_response( msg=msg, http_status=status.HTTP_500_INTERNAL_SERVER_ERROR) plan = data['plan'] hard_plans = Plan.objects.values( 'name', 'description', 'pk', 'environments__name' ).extra( where=['is_active=True', 'provider={}'.format(Plan.CLOUDSTACK)]) plans = get_plans_dict(hard_plans) plan = [splan for splan in plans if splan['name'] == plan] LOG.info("Plan: {}".format(plan)) if any(plan): dbaas_plan = Plan.objects.get(pk=plan[0]['pk']) else: msg = "Plan was not found" return log_and_response( msg=msg, http_status=status.HTTP_500_INTERNAL_SERVER_ERROR) if dbaas_environment not in dbaas_plan.environments.all(): msg = 'Plan "{}" is not available to "{}" environment'.format( dbaas_plan, dbaas_environment) return log_and_response(msg=msg, http_status=status.HTTP_400_BAD_REQUEST) task_history = TaskHistory() task_history.task_name = "create_database" task_history.arguments = "Database name: {}".format(name) task_history.save() create_database.delay(name=name, plan=dbaas_plan, environment=dbaas_environment, team=dbaas_team, project=None, description=description, task_history=task_history, user=dbaas_user, is_protected=True) return Response(status=status.HTTP_201_CREATED)
def post(self, request, format=None): data = request.DATA name = data['name'] user = data['user'] team = data['team'] env = get_url_env(request) name_regexp = re.compile('^[a-z][a-z0-9_]+$') if name_regexp.match(name) is None: msg = "Your database name must match /^[a-z][a-z0-9_]+$/ ." return log_and_response( msg=msg, http_status=status.HTTP_500_INTERNAL_SERVER_ERROR) try: Database.objects.get(name=name, environment__name=env) msg = "There is already a database called {} in {}.".format( name, env) return log_and_response( msg=msg, http_status=status.HTTP_500_INTERNAL_SERVER_ERROR) except ObjectDoesNotExist: pass try: dbaas_user = AccountUser.objects.get(email=user) except ObjectDoesNotExist as e: msg = "User does not exist." return log_and_response( msg=msg, e=e, http_status=status.HTTP_500_INTERNAL_SERVER_ERROR) try: dbaas_team = Team.objects.get(name=team) except ObjectDoesNotExist as e: msg = "Team does not exist." return log_and_response( msg=msg, e=e, http_status=status.HTTP_500_INTERNAL_SERVER_ERROR) try: dbaas_user.team_set.get(name=dbaas_team.name) except ObjectDoesNotExist as e: msg = "The user is not on {} team.".format(dbaas_team.name) return log_and_response( msg=msg, e=e, http_status=status.HTTP_500_INTERNAL_SERVER_ERROR) try: dbaas_environment = Environment.objects.get(name=env) except(ObjectDoesNotExist) as e: msg = "Environment does not exist." return log_and_response( msg=msg, http_status=status.HTTP_500_INTERNAL_SERVER_ERROR) databases_used_by_team = dbaas_team.count_databases_in_use( environment=dbaas_environment) database_alocation_limit = dbaas_team.database_alocation_limit if databases_used_by_team >= database_alocation_limit: msg = "The database alocation limit of {} has been exceeded for the selected team: {}".format( database_alocation_limit, dbaas_team) return log_and_response( msg=msg, http_status=status.HTTP_500_INTERNAL_SERVER_ERROR) if 'plan' not in data: msg = "Plan was not found" return log_and_response( msg=msg, http_status=status.HTTP_500_INTERNAL_SERVER_ERROR) plan = data['plan'] hard_plans = Plan.objects.values( 'name', 'description', 'pk', 'environments__name' ).extra( where=['is_active=True', 'provider={}'.format(Plan.CLOUDSTACK)] ) plans = get_plans_dict(hard_plans) plan = [splan for splan in plans if splan['name'] == plan] LOG.info("Plan: {}".format(plan)) if any(plan): dbaas_plan = Plan.objects.get(pk=plan[0]['pk']) else: msg = "Plan was not found" return log_and_response( msg=msg, http_status=status.HTTP_500_INTERNAL_SERVER_ERROR) task_history = TaskHistory() task_history.task_name = "create_database" task_history.arguments = "Database name: {}".format(name) task_history.save() create_database.delay(name=name, plan=dbaas_plan, environment=dbaas_environment, team=dbaas_team, project=None, description='Database from Tsuru', task_history=task_history, user=dbaas_user) return Response(status=status.HTTP_201_CREATED,)