def set_quotas(self, request, **kwargs): if not request.user.is_staff: raise exceptions.PermissionDenied() spl = self.get_object() if spl.state != SynchronizationStates.IN_SYNC: return IncorrectStateException( "Service project link must be in stable state.") serializer = serializers.ServiceProjectLinkQuotaSerializer( data=request.data) serializer.is_valid(raise_exception=True) data = dict(serializer.validated_data) if data.get('instances') is not None: quotas = settings.NODECONDUCTOR.get( 'OPENSTACK_QUOTAS_INSTANCE_RATIOS', {}) volume_ratio = quotas.get('volumes', 4) snapshots_ratio = quotas.get('snapshots', 20) data['volumes'] = volume_ratio * data['instances'] data['snapshots'] = snapshots_ratio * data['instances'] send_task('structure', 'sync_service_project_links')(spl.to_string(), quotas=data) return response.Response({'detail': 'Quota update was scheduled'}, status=status.HTTP_202_ACCEPTED)
def perform_update(self, serializer): super(SecurityGroupViewSet, self).perform_update(serializer) security_group = self.get_object() security_group.schedule_syncing() security_group.save() send_task('openstack', 'update_security_group')(security_group.uuid.hex)
def assign_floating_ip(self, request, uuid): instance = self.get_object() serializer = serializers.AssignFloatingIpSerializer(instance, data=request.data) serializer.is_valid(raise_exception=True) if not instance.service_project_link.external_network_id: return response.Response( { 'detail': 'External network ID of the service project link is missing.' }, status=status.HTTP_409_CONFLICT) elif instance.service_project_link.state not in SynchronizationStates.STABLE_STATES: raise IncorrectStateException( "Service project link of instance should be in stable state.") elif instance.state not in instance.States.STABLE_STATES: raise IncorrectStateException( "Cannot add floating IP to instance in unstable state.") send_task('openstack', 'assign_floating_ip')( instance.uuid.hex, serializer.validated_data['floating_ip_uuid']) return response.Response( { 'detail': 'Assigning floating IP to the instance has been scheduled.' }, status=status.HTTP_202_ACCEPTED)
def resize(self, request, uuid=None): instance = self.get_object() serializer = serializers.InstanceResizeSerializer(instance, data=request.data) serializer.is_valid(raise_exception=True) flavor = serializer.validated_data.get('flavor') new_size = serializer.validated_data.get('disk_size') # Serializer makes sure that exactly one of the branches will match if flavor is not None: send_task('openstack', 'change_flavor')(instance.uuid.hex, flavor_uuid=flavor.uuid.hex) event_logger.openstack_flavor.info( 'Virtual machine {resource_name} has been scheduled to change flavor.', event_type='resource_flavor_change_scheduled', event_context={ 'resource': instance, 'flavor': flavor }) else: send_task('openstack', 'extend_disk')(instance.uuid.hex, disk_size=new_size) event_logger.openstack_volume.info( 'Virtual machine {resource_name} has been scheduled to extend disk.', event_type='resource_volume_extension_scheduled', event_context={ 'resource': instance, 'volume_size': new_size }) return response.Response({'detail': 'Resizing has been scheduled.'}, status=status.HTTP_202_ACCEPTED)
def provision(self, resource, zone=None, template=None, username=None, password=None): params = { 'zone': self.manager.URI.DBZONE % zone.backend_id, 'name': resource.name, 'description': resource.description, 'params': { 'username': username, 'password': password, } } if isinstance(resource, models.Database): params[ 'based_on'] = self.manager.URI.TEMPLATE_DB % template.backend_id params['params'].update({ 'database_sid': resource.backend_database_sid, 'service_name': resource.backend_service_name, }) send_task('oracle', 'provision_database')(resource.uuid.hex, params) elif template.type == template.Types.SCHEMA: params[ 'based_on'] = self.manager.URI.TEMPLATE_SCHEMA % template.backend_id raise NotImplementedError
def update_projected_estimate(self, request, queryset): customers_without_backend_id = [] succeeded_customers = [] for customer in queryset: if not customer.billing_backend_id: customers_without_backend_id.append(customer) continue send_task( 'cost_tracking', 'update_projected_estimate')(customer_uuid=customer.uuid.hex) succeeded_customers.append(customer) if succeeded_customers: message = ungettext( 'Projected estimate generation successfully scheduled for customer %(customers_names)s', 'Projected estimate generation successfully scheduled for customers: %(customers_names)s', len(succeeded_customers)) message = message % { 'customers_names': ', '.join( [c.name for c in succeeded_customers]) } self.message_user(request, message) if customers_without_backend_id: message = ungettext( 'Cannot generate estimate for customer without backend id: %(customers_names)s', 'Cannot generate estimate for customers without backend id: %(customers_names)s', len(customers_without_backend_id)) message = message % { 'customers_names': ', '.join([c.name for c in customers_without_backend_id]) } self.message_user(request, message)
def restart(self, resource): if isinstance(resource, models.Database): resource.schedule_restarting() resource.save() send_task('oracle', 'restart')(resource.uuid.hex) else: raise NotImplementedError
def external_network(self, request, pk=None): spl = self.get_object() if request.method == 'DELETE': if spl.external_network_id: send_task('openstack', 'sync_external_network')(spl.to_string(), 'delete') return response.Response( { 'detail': 'External network deletion has been scheduled.' }, status=status.HTTP_202_ACCEPTED) else: return response.Response( {'detail': 'External network does not exist.'}, status=status.HTTP_204_NO_CONTENT) serializer = serializers.ExternalNetworkSerializer(data=request.data) serializer.is_valid(raise_exception=True) send_task('openstack', 'sync_external_network')(spl.to_string(), 'create', serializer.data) return response.Response( {'detail': 'External network creation has been scheduled.'}, status=status.HTTP_202_ACCEPTED)
def destroy(self, request, *args, **kwargs): security_group = self.get_object() security_group.schedule_syncing() security_group.save() send_task('openstack', 'delete_security_group')(security_group.uuid.hex) return response.Response({'detail': 'Deletion was scheduled'}, status=status.HTTP_202_ACCEPTED)
def sync(self, request): if NodeConductorExtension.is_installed('nodeconductor_killbill'): send_task('killbill', 'sync_pricelist')() self.message_user(request, "Price lists scheduled for sync") else: self.message_user(request, "Unknown billing backend. Can't sync", level=messages.ERROR) return redirect(reverse('admin:cost_tracking_defaultpricelistitem_changelist'))
def sync(self, request): send_task('killbill', 'sync_invoices')() self.message_user( request, "KillBill invoices scheduled for sync from backend.") return redirect( reverse('admin:nodeconductor_killbill_invoice_changelist'))
def destroy(self, crm, force=False): # CRM cannot be stopped by user - so we need to stop it before deletion on destroy crm.state = crm.States.STOPPING_SCHEDULED crm.save() send_task('sugarcrm', 'stop_and_destroy_crm')( crm.uuid.hex, force=force, )
def check_project_name_update(sender, instance=None, created=False, **kwargs): if created: return old_name = instance.tracker.previous('name') if old_name != instance.name: for spl in OpenStackServiceProjectLink.objects.filter( project__uuid=instance.uuid): send_task('openstack', 'update_tenant_name')(spl.to_string())
def destroy(self, host, force=False): if force: host.delete() return # Skip stopping, because host can be deleted directly from state ONLINE host.schedule_deletion() host.save() send_task('zabbix', 'destroy')(host.uuid.hex, )
def sync_service_project_link_with_backend(sender, instance, created=False, **kwargs): if created: if instance.state != SynchronizationStates.NEW: send_task('structure', 'sync_service_project_links')(instance.to_string(), initial=True)
def detect_vm_coordinates(sender, instance, name, source, target, **kwargs): # Check if geolocation is enabled if not settings.NODECONDUCTOR.get('ENABLE_GEOIP', True): return # VM already has coordinates if instance.latitude is not None and instance.longitude is not None: return if target == StateMixin.States.OK: send_task('structure', 'detect_vm_coordinates')(utils.serialize_instance(instance))
def detect_coordinates(self, request, queryset): send_task('structure', 'detect_vm_coordinates_batch')([vm.to_string() for vm in queryset]) tasks_scheduled = queryset.count() message = ungettext( 'Coordinates detection has been scheduled for one virtual machine', 'Coordinates detection has been scheduled for %(tasks_scheduled)d virtual machines', tasks_scheduled ) message = message % {'tasks_scheduled': tasks_scheduled} self.message_user(request, message)
def sync_with_backend(self, request, queryset): queryset = queryset.filter(state=SynchronizationStates.IN_SYNC) send_task('structure', 'sync_service_project_links')( [spl.to_string() for spl in queryset]) tasks_scheduled = queryset.count() message = ungettext( 'One service project link scheduled for update', '%(tasks_scheduled)d service project links scheduled for update', tasks_scheduled) message = message % {'tasks_scheduled': tasks_scheduled} self.message_user(request, message)
def recover_erred_service_project_links(self, request, queryset): queryset = queryset.filter(state=SynchronizationStates.ERRED) send_task('structure', 'recover_erred_services')( [spl.to_string() for spl in queryset]) tasks_scheduled = queryset.count() message = ungettext( 'One service project link scheduled for recovery', '%(tasks_scheduled)d service project links scheduled for recovery', tasks_scheduled) message = message % {'tasks_scheduled': tasks_scheduled} self.message_user(request, message)
def users(self, request, pk=None, **kwargs): tenant = self.get_object() if request.method in ('POST', 'PUT'): if 'csv' not in request.data: return Response( "Expecting 'csv' parameter as a file or JSON string", status=HTTP_400_BAD_REQUEST) csvfile = request.data['csv'] if isinstance(csvfile, basestring): csvfile = StringIO(csvfile.encode('utf-8')) reader = UnicodeDictReader(csvfile) tenant_url = self.get_serializer(instance=tenant).data['url'] data = [dict(tenant=tenant_url, **row) for row in reader] serializer = serializers.UserSerializer( data=data, many=True, context={'request': request}) serializer.is_valid(raise_exception=True) for user in serializer.validated_data: del user['tenant'] send_task('exchange', 'create_user')(tenant_uuid=tenant.uuid.hex, notify=user.pop('notify'), **user) return Response("%s users scheduled for creation" % len(serializer.validated_data)) elif request.method == 'GET': users = models.User.objects.filter(tenant=tenant) serializer = serializers.UserSerializer( instance=users, many=True, context={'request': request}) response = HttpResponse(content_type='text/csv') response[ 'Content-Disposition'] = 'attachment; filename="%s_users.csv"' % tenant.backend_id exclude = ('url', 'tenant', 'tenant_uuid', 'tenant_domain', 'manager', 'notify') headers = [ f for f in serializers.UserSerializer.Meta.fields if f not in exclude ] writer = UnicodeDictWriter(response, fieldnames=headers) writer.writeheader() writer.writerows(serializer.data) return response
def sync(self, request, queryset): queryset = queryset.filter(state=SynchronizationStates.IN_SYNC) service_uuids = list(queryset.values_list('uuid', flat=True)) tasks_scheduled = queryset.count() send_task('structure', 'sync_service_settings')(service_uuids) message = ungettext( 'One service settings record scheduled for sync', '%(tasks_scheduled)d service settings records scheduled for sync', tasks_scheduled) message = message % {'tasks_scheduled': tasks_scheduled} self.message_user(request, message)
def debit_account(self, amount): # Reduce customer's balance at specified amount new_balance = (self.balance or 0) - amount self._meta.model.objects.filter(uuid=self.uuid).update( balance=new_balance if self.balance is None else F('balance') - amount) self.balance = new_balance BalanceHistory.objects.create(customer=self, amount=self.balance) customer_account_debited.send(sender=Customer, instance=self, amount=float(amount)) # Fully prepaid mode # TODO: Introduce threshold value to allow over-usage if new_balance <= 0: send_task('structure', 'stop_customer_resources')(self.uuid.hex)
def sync_site_collections(self, request, queryset): tenant_uuids = [ uuid.hex for uuid in queryset.values_list('uuid', flat=True) ] tasks_scheduled = queryset.count() send_task('sharepoint', 'sync_site_collection_quotas')(tenant_uuids) message = ungettext( 'One tenant site collections scheduled for sync', '%(tasks_scheduled)d enant site collections scheduled for sync', tasks_scheduled) message = message % {'tasks_scheduled': tasks_scheduled} self.message_user(request, message)
def push_ssh_public_keys(self, request, queryset): tasks_scheduled = queryset.count() if tasks_scheduled: send_task('structure', 'push_ssh_public_keys')( [spl.to_string() for spl in queryset]) message = ungettext( 'One cloud project membership scheduled for pushing SSH public keys', '%(tasks_scheduled)d cloud project memberships scheduled for pushing SSH public keys', tasks_scheduled) message = message % { 'tasks_scheduled': tasks_scheduled, } self.message_user(request, message)
def sync_quotas(self, request, queryset): tenant_uuids = [ uuid.hex for uuid in queryset.values_list('uuid', flat=True) ] tasks_scheduled = queryset.count() send_task('exchange', 'sync_tenant_quotas')(tenant_uuids) message = ungettext( 'One tenant cheduled for quotas sync', '%(tasks_scheduled)d tenant scheduled for quotas sync', tasks_scheduled) message = message % {'tasks_scheduled': tasks_scheduled} self.message_user(request, message)
def emit(self, record): # Check that record contains event if hasattr(record, 'event_type') and hasattr(record, 'event_context'): # Convert record to plain dictionary event = { 'timestamp': record.created, 'levelname': record.levelname, 'message': record.getMessage(), 'type': record.event_type, 'context': record.event_context } # Perform hook processing in background thread send_task('logging', 'process_event')(event)
def recover_erred_cloud_memberships(self, request, queryset): queryset = queryset.filter(state=SynchronizationStates.ERRED) tasks_scheduled = queryset.count() if tasks_scheduled: send_task('structure', 'recover_erred_services')( [spl.to_string() for spl in queryset]) message = ungettext( 'One cloud project membership scheduled for recovery', '%(tasks_scheduled)d cloud project memberships scheduled for recovery', tasks_scheduled) message = message % { 'tasks_scheduled': tasks_scheduled, } self.message_user(request, message)
def emit(self, record): # Check that record contains event if hasattr(record, 'event_type') and hasattr(record, 'event_context'): # Convert record to plain dictionary event = { 'timestamp': record.created, 'levelname': record.levelname, 'message': record.getMessage(), 'type': record.event_type, 'context': record.event_context } # XXX: This import provides circular dependencies between core and # logging applications. from nodeconductor.core.tasks import send_task # Perform hook processing in background thread send_task('logging', 'process_event')(event)
def sync_users(self, request, queryset): selected_tenants = queryset.count() queryset = queryset.filter(state=SynchronizationStates.IN_SYNC) for tenant in queryset.iterator(): send_task('sharepoint', 'sync_tenant_users')(tenant.uuid.hex) tasks_scheduled = queryset.count() if selected_tenants != tasks_scheduled: message = 'Only in sync tenants can be scheduled for users sync' self.message_user(request, message, level=messages.WARNING) message = ungettext( 'One tenant scheduled for users sync', '%(tasks_scheduled)d tenants scheduled for users sync', tasks_scheduled) message = message % {'tasks_scheduled': tasks_scheduled} self.message_user(request, message)
def recover(self, request, queryset): selected_settings = queryset.count() queryset = queryset.filter(state=SynchronizationStates.ERRED) service_uuids = list(queryset.values_list('uuid', flat=True)) send_task('structure', 'recover_service_settings')(service_uuids) tasks_scheduled = queryset.count() if selected_settings != tasks_scheduled: message = 'Only erred service settings can be recovered' self.message_user(request, message, level=messages.WARNING) message = ungettext( 'One service settings record scheduled for recover', '%(tasks_scheduled)d service settings records scheduled for recover', tasks_scheduled) message = message % {'tasks_scheduled': tasks_scheduled} self.message_user(request, message)