class SubNetViewSet(structure_views.ResourceViewSet): queryset = models.SubNet.objects.all().order_by('network') serializer_class = serializers.SubNetSerializer filterset_class = filters.SubNetFilter disabled_actions = ['create'] update_executor = executors.SubNetUpdateExecutor delete_executor = executors.SubNetDeleteExecutor pull_executor = executors.SubNetPullExecutor @decorators.action(detail=True, methods=['post']) def connect(self, request, uuid=None): executors.SubnetConnectExecutor.execute(self.get_object()) return response.Response(status=status.HTTP_202_ACCEPTED) connect_validators = [ core_validators.StateValidator(models.SubNet.States.OK) ] connect_serializer_class = rf_serializers.Serializer @decorators.action(detail=True, methods=['post']) def disconnect(self, request, uuid=None): executors.SubnetDisconnectExecutor.execute(self.get_object()) return response.Response(status=status.HTTP_202_ACCEPTED) disconnect_validators = [ core_validators.StateValidator(models.SubNet.States.OK) ] disconnect_serializer_class = rf_serializers.Serializer
class VolumeViewSet(structure_views.ResourceViewSet): queryset = models.Volume.objects.all() serializer_class = serializers.VolumeSerializer create_executor = executors.VolumeCreateExecutor delete_executor = executors.VolumeDeleteExecutor def _has_instance(volume): if not volume.instance: raise core_exceptions.IncorrectStateException(_('Volume is already detached.')) @decorators.action(detail=True, methods=['post']) def detach(self, request, uuid=None): executors.VolumeDetachExecutor.execute(self.get_object()) detach_validators = [core_validators.StateValidator(models.Volume.States.OK), _has_instance] detach_serializer_class = rf_serializers.Serializer @decorators.action(detail=True, methods=['post']) def attach(self, request, volume, uuid=None): serializer = self.get_serializer(volume, data=request.data) serializer.is_valid(raise_exception=True) serializer.save() executors.VolumeAttachExecutor.execute(volume) attach_validators = [core_validators.StateValidator(models.Volume.States.OK)] attach_serializer_class = serializers.VolumeAttachSerializer
class OrderViewSet(BaseMarketplaceView): queryset = models.Order.objects.all() serializer_class = serializers.OrderSerializer filter_backends = (structure_filters.GenericRoleFilter, DjangoFilterBackend) filter_class = filters.OrderFilter destroy_validators = partial_update_validators = [structure_utils.check_customer_blocked] @detail_route(methods=['post']) def approve(self, request, uuid=None): tasks.approve_order(self.get_object(), request.user) return Response({'detail': _('Order has been approved.')}, status=status.HTTP_200_OK) approve_validators = [core_validators.StateValidator(models.Order.States.REQUESTED_FOR_APPROVAL), structure_utils.check_customer_blocked] approve_permissions = [permissions.check_permissions_for_state_change] @detail_route(methods=['post']) def reject(self, request, uuid=None): order = self.get_object() order.reject() order.save(update_fields=['state']) return Response({'detail': _('Order has been rejected.')}, status=status.HTTP_200_OK) def check_permissions_for_reject(request, view, order=None): if not order: return user = request.user if user.is_staff: return if user == order.created_by: return raise rf_exceptions.PermissionDenied() reject_validators = [core_validators.StateValidator(models.Order.States.REQUESTED_FOR_APPROVAL), structure_utils.check_customer_blocked] reject_permissions = [check_permissions_for_reject] @detail_route() def pdf(self, request, uuid=None): order = self.get_object() if not order.has_file(): raise Http404() file_response = HttpResponse(order.file, content_type='application/pdf') filename = order.get_filename() file_response['Content-Disposition'] = 'attachment; filename="{filename}"'.format(filename=filename) return file_response def perform_create(self, serializer): project = serializer.validated_data['project'] structure_utils.check_customer_blocked(project) super(OrderViewSet, self).perform_create(serializer)
class ClusterViewSet(structure_views.ImportableResourceViewSet): queryset = models.Cluster.objects.all() serializer_class = serializers.ClusterSerializer filterset_class = filters.ClusterFilter update_executor = executors.ClusterUpdateExecutor def perform_create(self, serializer): cluster = serializer.save() user = self.request.user nodes = serializer.validated_data.get('node_set') for node_data in nodes: node_data['cluster'] = cluster models.Node.objects.create(**node_data) transaction.on_commit( lambda: executors.ClusterCreateExecutor.execute( cluster, user=user, is_heavy_task=True, ) ) def destroy(self, request, *args, **kwargs): user = self.request.user instance = self.get_object() executors.ClusterDeleteExecutor.execute( instance, user=user, is_heavy_task=True, ) return response.Response( {'detail': _('Deletion was scheduled.')}, status=status.HTTP_202_ACCEPTED ) update_validators = partial_update_validators = [ core_validators.StateValidator(models.Cluster.States.OK), ] destroy_validators = ( structure_views.ImportableResourceViewSet.destroy_validators + [validators.all_cluster_related_vms_can_be_deleted,] ) importable_resources_backend_method = 'get_clusters_for_import' importable_resources_serializer_class = serializers.ClusterImportableSerializer import_resource_serializer_class = serializers.ClusterImportSerializer pull_executor = executors.ClusterPullExecutor @decorators.action(detail=True, methods=['get']) def kubeconfig_file(self, request, uuid=None): cluster = self.get_object() backend = cluster.get_backend() try: config = backend.get_kubeconfig_file(cluster) except exceptions.RancherException: raise ValidationError('Unable to get kubeconfig file.') return response.Response({'config': config}, status=status.HTTP_200_OK) kubeconfig_file_validators = [ core_validators.StateValidator(models.Cluster.States.OK) ]
class NetworkViewSet(structure_views.ResourceViewSet): queryset = models.Network.objects.all().order_by('name') serializer_class = serializers.NetworkSerializer filterset_class = filters.NetworkFilter disabled_actions = ['create'] update_executor = executors.NetworkUpdateExecutor delete_executor = executors.NetworkDeleteExecutor pull_executor = executors.NetworkPullExecutor @decorators.action(detail=True, methods=['post']) def create_subnet(self, request, uuid=None): serializer = self.get_serializer(data=request.data) serializer.is_valid(raise_exception=True) subnet = serializer.save() executors.SubNetCreateExecutor.execute(subnet) return response.Response(serializer.data, status=status.HTTP_201_CREATED) create_subnet_validators = [ core_validators.StateValidator(models.Network.States.OK) ] create_subnet_serializer_class = serializers.SubNetSerializer @decorators.action(detail=True, methods=['post']) def set_mtu(self, request, uuid=None): serializer = self.get_serializer(instance=self.get_object(), data=request.data) serializer.is_valid(raise_exception=True) network = serializer.save() executors.SetMtuExecutor.execute(network) return response.Response(serializer.data, status=status.HTTP_202_ACCEPTED) set_mtu_validators = [ core_validators.StateValidator(models.Network.States.OK) ] set_mtu_serializer_class = serializers.SetMtuSerializer @decorators.action(detail=True, methods=['post']) def create_port(self, request, uuid=None): network: models.Network = self.get_object() serializer = self.get_serializer(data=request.data) serializer.is_valid(raise_exception=True) port: models.Port = serializer.save() executors.PortCreateExecutor().execute( port, network=core_utils.serialize_instance(network)) return response.Response(serializer.data, status=status.HTTP_201_CREATED) create_port_serializer_class = serializers.PortSerializer create_port_validators = [ core_validators.StateValidator(models.Network.States.OK) ]
class SQLServerViewSet(structure_views.BaseResourceViewSet): queryset = models.SQLServer.objects.all() filterset_class = filters.SQLServerFilter serializer_class = serializers.SQLServerSerializer create_executor = executors.SQLServerCreateExecutor delete_executor = executors.SQLServerDeleteExecutor @decorators.action(detail=True, methods=['post']) def create_database(self, request, uuid=None): serializer = self.get_serializer(data=request.data) serializer.is_valid(raise_exception=True) database = serializer.save() transaction.on_commit( lambda: executors.SQLDatabaseCreateExecutor().execute(database)) payload = { 'status': _('SQL database creation was scheduled'), 'database_uuid': database.uuid.hex, } return response.Response(payload, status=status.HTTP_202_ACCEPTED) create_database_validators = [ core_validators.StateValidator(models.SQLServer.States.OK) ] create_database_serializer_class = serializers.SQLDatabaseCreateSerializer
class BackupViewSet(structure_views.BaseResourceViewSet): queryset = models.Backup.objects.all().order_by('name') serializer_class = serializers.BackupSerializer filter_class = filters.BackupFilter disabled_actions = ['create'] delete_executor = executors.BackupDeleteExecutor # method has to be overridden in order to avoid triggering of UpdateExecutor # which is a default action for all ResourceViewSet(s) def perform_update(self, serializer): serializer.save() @decorators.detail_route(methods=['post']) def restore(self, request, uuid=None): instance = self.get_object() serializer = self.get_serializer(instance, data=request.data) serializer.is_valid(raise_exception=True) backup_restoration = serializer.save() # It is assumed that SSH public key is already stored in OpenStack system volume. # Therefore we don't need to specify it explicitly for cloud init service. executors.InstanceCreateExecutor.execute( backup_restoration.instance, flavor=backup_restoration.flavor, is_heavy_task=True, ) instance_serializer = serializers.InstanceSerializer( backup_restoration.instance, context={'request': self.request}) return response.Response(instance_serializer.data, status=status.HTTP_201_CREATED) restore_validators = [core_validators.StateValidator(models.Backup.States.OK)] restore_serializer_class = serializers.BackupRestorationSerializer
class SnapshotViewSet(structure_views.ResourceViewSet): queryset = models.Snapshot.objects.all().order_by('name') serializer_class = serializers.SnapshotSerializer update_executor = executors.SnapshotUpdateExecutor delete_executor = executors.SnapshotDeleteExecutor pull_executor = executors.SnapshotPullExecutor filterset_class = filters.SnapshotFilter disabled_actions = ['create'] @decorators.action(detail=True, methods=['post']) def restore(self, request, uuid=None): serializer = self.get_serializer(data=request.data) serializer.is_valid(raise_exception=True) restoration = serializer.save() executors.SnapshotRestorationExecutor().execute(restoration) serialized_volume = serializers.VolumeSerializer( restoration.volume, context={'request': self.request} ) resource_imported.send( sender=models.Volume, instance=restoration.volume, ) return response.Response(serialized_volume.data, status=status.HTTP_201_CREATED) restore_serializer_class = serializers.SnapshotRestorationSerializer restore_validators = [core_validators.StateValidator(models.Snapshot.States.OK)] @decorators.action(detail=True, methods=['get']) def restorations(self, request, uuid=None): snapshot = self.get_object() serializer = self.get_serializer(snapshot.restorations.all(), many=True) return response.Response(serializer.data, status=status.HTTP_200_OK) restorations_serializer_class = serializers.SnapshotRestorationSerializer
class ReviewViewSet(ActionsViewSet): lookup_field = 'flow__uuid' disabled_actions = ['create', 'destroy', 'update', 'partial_update'] @action(detail=True, methods=['post']) def approve(self, request, **kwargs): review_request = self.get_object() serializer = self.get_serializer(data=request.data) serializer.is_valid(raise_exception=True) comment = serializer.validated_data.get('comment') review_request.approve(request.user, comment) return Response(status=status.HTTP_200_OK) @action(detail=True, methods=['post']) def reject(self, request, **kwargs): review_request = self.get_object() serializer = self.get_serializer(data=request.data) serializer.is_valid(raise_exception=True) comment = serializer.validated_data.get('comment') review_request.reject(request.user, comment) return Response(status=status.HTTP_200_OK) approve_serializer_class = ( reject_serializer_class) = serializers.ReviewCommentSerializer approve_validators = reject_validators = [ core_validators.StateValidator(models.ReviewMixin.States.PENDING) ]
class FlowViewSet(ActionsViewSet): queryset = models.FlowTracker.objects.all() lookup_field = 'uuid' update_validators = ( partial_update_validators) = submit_validators = cancel_validators = [ core_validators.StateValidator(models.ReviewMixin.States.DRAFT) ] disabled_actions = ['destroy'] serializer_class = serializers.FlowSerializer filterset_class = filters.FlowFilter @action(detail=True, methods=['post']) def submit(self, request, uuid=None): flow = self.get_object() flow.submit() return Response(status=status.HTTP_200_OK) @action(detail=True, methods=['post']) def cancel(self, request, uuid=None): flow = self.get_object() flow.cancel() return Response(status=status.HTTP_200_OK) def get_queryset(self): qs = super(FlowViewSet, self).get_queryset() if self.request.user.is_staff: return qs # Allow to see user's own requests only return qs.filter(requested_by=self.request.user)
class NetworkViewSet(structure_views.BaseResourceViewSet): queryset = models.Network.objects.all() serializer_class = serializers.NetworkSerializer filterset_class = filters.NetworkFilter disabled_actions = ['create'] update_executor = executors.NetworkUpdateExecutor delete_executor = executors.NetworkDeleteExecutor pull_executor = executors.NetworkPullExecutor @decorators.action(detail=True, methods=['post']) def create_subnet(self, request, uuid=None): serializer = self.get_serializer(data=request.data) serializer.is_valid(raise_exception=True) subnet = serializer.save() enable_default_gateway = serializer.validated_data['enable_default_gateway'] executors.SubNetCreateExecutor.execute( subnet, enable_default_gateway=enable_default_gateway ) return response.Response(serializer.data, status=status.HTTP_201_CREATED) create_subnet_validators = [ core_validators.StateValidator(models.Network.States.OK) ] create_subnet_serializer_class = serializers.SubNetSerializer
class OfferingViewSet(CheckExtensionMixin, core_views.ActionsViewSet): queryset = models.Offering.objects.all() serializer_class = serializers.OfferingSerializer lookup_field = 'uuid' metadata_class = structure_metadata.ActionsMetadata filter_backends = ( structure_filters.GenericRoleFilter, DjangoFilterBackend, ) filter_class = filters.OfferingFilter @decorators.list_route() def configured(self, request): summary_config = {} for template in models.OfferingTemplate.objects.all(): summary_config[template.name] = template.config return response.Response(summary_config, status=status.HTTP_200_OK) @transaction.atomic() def create(self, request, *args, **kwargs): serializer = self.get_serializer(data=request.data) serializer.is_valid(raise_exception=True) offering = serializer.save() backend.get_active_backend().create_issue(offering.issue) return response.Response(serializer.data, status=status.HTTP_201_CREATED) create_serializer_class = serializers.OfferingCreateSerializer create_permissions = [structure_permissions.is_owner, structure_permissions.is_manager, structure_permissions.is_administrator] def offering_is_in_requested_state(offering): if offering.state != models.Offering.States.REQUESTED: raise rf_exceptions.ValidationError(_('Offering must be in requested state.')) @decorators.detail_route(methods=['post']) def complete(self, request, uuid=None): serializer = self.get_serializer(instance=self.get_object(), data=request.data) serializer.is_valid(raise_exception=True) serializer.save() return response.Response({'status': _('Offering is marked as completed.')}, status=status.HTTP_200_OK) complete_validators = [offering_is_in_requested_state] complete_permissions = [structure_permissions.is_staff] complete_serializer_class = serializers.OfferingCompleteSerializer @decorators.detail_route(methods=['post']) def terminate(self, request, uuid=None): offering = self.get_object() offering.state = models.Offering.States.TERMINATED offering.terminated_at = timezone.now() offering.save() return response.Response({'status': _('Offering is marked as terminated.')}, status=status.HTTP_200_OK) terminate_permissions = [structure_permissions.is_staff] update_permissions = partial_update_permissions = [structure_permissions.is_staff] destroy_permissions = [structure_permissions.is_staff] destroy_validators = [core_validators.StateValidator(models.Offering.States.TERMINATED)]
class VirtualMachineViewSet(structure_views.ResourceViewSet): queryset = models.VirtualMachine.objects.all().order_by('name') filterset_class = filters.VirtualMachineFilter serializer_class = serializers.VirtualMachineSerializer create_executor = executors.VirtualMachineCreateExecutor delete_executor = executors.VirtualMachineDeleteExecutor pull_executor = executors.VirtualMachinePullExecutor @decorators.action(detail=True, methods=['post']) def start(self, request, uuid=None): virtual_machine = self.get_object() executors.VirtualMachineStartExecutor().execute(virtual_machine) return response.Response({'status': _('start was scheduled')}, status=status.HTTP_202_ACCEPTED) start_validators = [ core_validators.StateValidator(models.VirtualMachine.States.OK), core_validators.RuntimeStateValidator('stopped'), ] start_serializer_class = rf_serializers.Serializer @decorators.action(detail=True, methods=['post']) def stop(self, request, uuid=None): virtual_machine = self.get_object() executors.VirtualMachineStopExecutor().execute(virtual_machine) return response.Response({'status': _('stop was scheduled')}, status=status.HTTP_202_ACCEPTED) stop_validators = [ core_validators.StateValidator(models.VirtualMachine.States.OK), core_validators.RuntimeStateValidator('running'), ] stop_serializer_class = rf_serializers.Serializer @decorators.action(detail=True, methods=['post']) def restart(self, request, uuid=None): virtual_machine = self.get_object() executors.VirtualMachineRestartExecutor().execute(virtual_machine) return response.Response({'status': _('restart was scheduled')}, status=status.HTTP_202_ACCEPTED) restart_validators = [ core_validators.StateValidator(models.VirtualMachine.States.OK), core_validators.RuntimeStateValidator('running'), ] restart_serializer_class = rf_serializers.Serializer
class ResourceViewSet(core_mixins.ExecutorMixin, core_views.ActionsViewSet): """ Basic view set for all resource view sets. """ lookup_field = 'uuid' filter_backends = (filters.GenericRoleFilter, DjangoFilterBackend) unsafe_methods_permissions = [permissions.is_administrator] update_validators = partial_update_validators = [ core_validators.StateValidator(models.BaseResource.States.OK) ] destroy_validators = [ core_validators.StateValidator(models.BaseResource.States.OK, models.BaseResource.States.ERRED) ] @action(detail=True, methods=['post']) def pull(self, request, uuid=None): if self.pull_executor == NotImplemented: return Response( {'detail': _('Pull operation is not implemented.')}, status=status.HTTP_409_CONFLICT, ) self.pull_executor.execute(self.get_object()) return Response( {'detail': _('Pull operation was successfully scheduled.')}, status=status.HTTP_202_ACCEPTED, ) pull_executor = NotImplemented pull_validators = [ core_validators.StateValidator(models.BaseResource.States.OK, models.BaseResource.States.ERRED), check_resource_backend_id, ] @action(detail=True, methods=['post']) def unlink(self, request, resource, uuid=None): """ Delete resource from the database without scheduling operations on backend and without checking current state of the resource. It is intended to be used for removing resource stuck in transitioning state. """ obj = self.get_object() obj.delete() return Response(status=status.HTTP_204_NO_CONTENT) unlink_permissions = [permissions.is_staff]
class SecurityGroupViewSet(structure_views.BaseResourceViewSet): queryset = models.SecurityGroup.objects.all() serializer_class = serializers.SecurityGroupSerializer filterset_class = filters.SecurityGroupFilter disabled_actions = [ 'create', 'pull', ] # pull operation should be implemented in WAL-323 def default_security_group_validator(security_group): if security_group.name == 'default': raise exceptions.ValidationError({ 'name': _('Default security group is managed by OpenStack itself.') }) update_validators = partial_update_validators = ( structure_views.ResourceViewSet.update_validators + [default_security_group_validator]) update_executor = executors.SecurityGroupUpdateExecutor partial_update_serializer_class = ( update_serializer_class) = serializers.SecurityGroupUpdateSerializer destroy_validators = structure_views.ResourceViewSet.destroy_validators + [ default_security_group_validator ] delete_executor = executors.SecurityGroupDeleteExecutor @decorators.action(detail=True, methods=['POST']) def set_rules(self, request, uuid=None): """ WARNING! Auto-generated HTML form is wrong for this endpoint. List should be defined as input. Example: [ { "protocol": "tcp", "from_port": 1, "to_port": 10, "cidr": "10.1.1.0/24" } ] """ serializer = self.get_serializer(data=request.data) serializer.is_valid(raise_exception=True) serializer.save() executors.PushSecurityGroupRulesExecutor().execute(self.get_object()) return response.Response( {'status': _('Rules update was successfully scheduled.')}, status=status.HTTP_202_ACCEPTED, ) set_rules_validators = [ core_validators.StateValidator(models.Tenant.States.OK) ] set_rules_serializer_class = serializers.SecurityGroupRuleListUpdateSerializer
class OfferingActivateRequestViewSet(ReviewViewSet): lookup_field = 'uuid' queryset = models.OfferingStateRequest.objects.all() approve_permissions = reject_permissions = [structure_permissions.is_staff] filterset_class = filters.OfferingActivateRequestFilter serializer_class = serializers.OfferingActivateRequestSerializer disabled_actions = ['destroy', 'update', 'partial_update'] def get_queryset(self): qs = super(OfferingActivateRequestViewSet, self).get_queryset() if self.request.user.is_staff: return qs # Allow to see user's own requests only return qs.filter(requested_by=self.request.user) @action(detail=True, methods=['post']) def submit(self, request, **kwargs): review_request = self.get_object() review_request.submit() return Response(status=status.HTTP_200_OK) @action(detail=True, methods=['post']) def cancel(self, request, **kwargs): review_request = self.get_object() review_request.cancel() return Response(status=status.HTTP_200_OK) approve_validators = reject_validators = [ core_validators.StateValidator(models.ReviewMixin.States.PENDING) ] submit_validators = [ core_validators.StateValidator(models.ReviewMixin.States.DRAFT) ] cancel_validators = [ core_validators.StateValidator(models.ReviewMixin.States.DRAFT, models.ReviewMixin.States.PENDING) ]
class JobViewSet(core_mixins.CreateExecutorMixin, core_views.ActionsViewSet): lookup_field = 'uuid' queryset = models.Job.objects.all().order_by('pk') filter_backends = (structure_filters.GenericRoleFilter, DjangoFilterBackend) filterset_class = filters.AnsibleJobsFilter unsafe_methods_permissions = [structure_permissions.is_administrator] serializer_class = serializers.JobSerializer metadata_class = structure_metadata.ActionsMetadata create_executor = executors.RunJobExecutor destroy_validators = [ check_all_related_resource_are_stable, core_validators.StateValidator(models.Job.States.OK, models.Job.States.ERRED) ] delete_executor = executors.DeleteJobExecutor
class RouterViewSet(core_views.ReadOnlyActionsViewSet): lookup_field = 'uuid' queryset = models.Router.objects.all().order_by('tenant__name') filter_backends = (DjangoFilterBackend, structure_filters.GenericRoleFilter) filterset_class = filters.RouterFilter serializer_class = serializers.RouterSerializer @decorators.action(detail=True, methods=['POST']) def set_routes(self, request, uuid=None): router = self.get_object() serializer = self.get_serializer(router, data=request.data) serializer.is_valid(raise_exception=True) old_routes = router.routes new_routes = serializer.validated_data['routes'] router.routes = new_routes router.save(update_fields=['routes']) executors.RouterSetRoutesExecutor().execute(router) event_logger.openstack_router.info( 'Static routes have been updated.', event_type='openstack_router_updated', event_context={ 'router': router, 'old_routes': old_routes, 'new_routes': new_routes, 'tenant_backend_id': router.tenant.backend_id, }, ) logger.info( 'Static routes have been updated for router %s from %s to %s.', router, old_routes, new_routes, ) return response.Response( {'status': _('Routes update was successfully scheduled.')}, status=status.HTTP_202_ACCEPTED, ) set_routes_serializer_class = serializers.RouterSetRoutesSerializer set_routes_validators = [ core_validators.StateValidator(models.Router.States.OK) ]
class DiskViewSet(structure_views.BaseResourceViewSet): queryset = models.Disk.objects.all().order_by('-created') serializer_class = serializers.DiskSerializer filterset_class = filters.DiskFilter disabled_actions = ['create', 'update', 'partial_update'] pull_executor = executors.DiskPullExecutor delete_executor = executors.DiskDeleteExecutor @action(detail=True, methods=['post']) def extend(self, request, uuid=None): """ Increase disk capacity """ disk = self.get_object() serializer = self.get_serializer(disk, data=request.data) serializer.is_valid(raise_exception=True) serializer.save() disk.refresh_from_db() transaction.on_commit( lambda: executors.DiskExtendExecutor().execute(disk)) return Response({'status': _('extend was scheduled')}, status=status.HTTP_202_ACCEPTED) def validate_total_size(disk): options = disk.vm.service_settings.options max_disk = serializers.get_int_or_none(options, 'max_disk') if max_disk and abs(max_disk - disk.size) < 1024: raise rf_serializers.ValidationError( 'Storage limit has been reached.') max_disk_total = serializers.get_int_or_none(options, 'max_disk_total') if max_disk_total: remaining_quota = max_disk_total - disk.vm.total_disk if remaining_quota < 1024: raise rf_serializers.ValidationError( 'Storage quota has been reached.') extend_validators = [ core_validators.StateValidator(models.Disk.States.OK), validate_total_size, ] extend_serializer_class = serializers.DiskExtendSerializer
class ResourceViewSet(core_views.ReadOnlyActionsViewSet): queryset = models.Resource.objects.filter( offering__type=PLUGIN_NAME).order_by('name') filter_backends = ( DjangoFilterBackend, filters.ResourceOwnerOrCreatorFilterBackend, ) filterset_class = filters.BookingResourceFilter lookup_field = 'uuid' serializer_class = serializers.BookingResourceSerializer @action(detail=True, methods=['post']) def reject(self, request, uuid=None): resource = self.get_object() with transaction.atomic(): order_item = resource_creation_canceled(resource, validate=True) return Response({'order_item_uuid': order_item.uuid.hex}, status=status.HTTP_200_OK) @action(detail=True, methods=['post']) def accept(self, request, uuid=None): resource = self.get_object() with transaction.atomic(): order_item = resource_creation_succeeded(resource, validate=True) return Response({'order_item_uuid': order_item.uuid.hex}, status=status.HTTP_200_OK) reject_validators = accept_validators = [ core_validators.StateValidator(models.Resource.States.CREATING) ] accept_permissions = [ marketplace_permissions.user_is_owner_or_service_manager ]
class ClusterViewSet(structure_views.ImportableResourceViewSet): queryset = models.Cluster.objects.all() serializer_class = serializers.ClusterSerializer filterset_class = filters.ClusterFilter delete_executor = executors.ClusterDeleteExecutor update_executor = executors.ClusterUpdateExecutor def perform_create(self, serializer): cluster = serializer.save() user = self.request.user nodes = serializer.validated_data.get('node_set') executors.ClusterCreateExecutor.execute( cluster, nodes=nodes, user=user, is_heavy_task=True, ) update_validators = partial_update_validators = [ core_validators.StateValidator(models.Cluster.States.OK), ] importable_resources_backend_method = 'get_clusters_for_import' importable_resources_serializer_class = serializers.ClusterImportableSerializer import_resource_serializer_class = serializers.ClusterImportSerializer
class VolumeViewSet(structure_views.ImportableResourceViewSet): queryset = models.Volume.objects.all() serializer_class = serializers.VolumeSerializer filterset_class = filters.VolumeFilter create_executor = executors.VolumeCreateExecutor update_executor = executors.VolumeUpdateExecutor pull_executor = executors.VolumePullExecutor def _can_destroy_volume(volume): if volume.state == models.Volume.States.ERRED: return if volume.state != models.Volume.States.OK: raise core_exceptions.IncorrectStateException( _('Volume should be in OK state.')) core_validators.RuntimeStateValidator('available', 'error', 'error_restoring', 'error_extending', '')(volume) def _volume_snapshots_exist(volume): if volume.snapshots.exists(): raise core_exceptions.IncorrectStateException( _('Volume has dependent snapshots.')) delete_executor = executors.VolumeDeleteExecutor destroy_validators = [ _can_destroy_volume, _volume_snapshots_exist, ] def _is_volume_bootable(volume): if volume.bootable: raise core_exceptions.IncorrectStateException( _('Volume cannot be bootable.')) def _is_volume_instance_shutoff(volume): if (volume.instance and volume.instance.runtime_state != models.Instance.RuntimeStates.SHUTOFF): raise core_exceptions.IncorrectStateException( _('Volume instance should be in shutoff state.')) def _is_volume_instance_ok(volume): if volume.instance and volume.instance.state != models.Instance.States.OK: raise core_exceptions.IncorrectStateException( _('Volume instance should be in OK state.')) @decorators.action(detail=True, methods=['post']) def extend(self, request, uuid=None): """ Increase volume size """ volume = self.get_object() old_size = volume.size serializer = self.get_serializer(volume, data=request.data) serializer.is_valid(raise_exception=True) serializer.save() volume.refresh_from_db() executors.VolumeExtendExecutor().execute(volume, old_size=old_size, new_size=volume.size) return response.Response({'status': _('extend was scheduled')}, status=status.HTTP_202_ACCEPTED) extend_validators = [ _is_volume_bootable, _is_volume_instance_ok, _is_volume_instance_shutoff, core_validators.StateValidator(models.Volume.States.OK), ] extend_serializer_class = serializers.VolumeExtendSerializer @decorators.action(detail=True, methods=['post']) def snapshot(self, request, uuid=None): """ Create snapshot from volume """ serializer = self.get_serializer(data=request.data) serializer.is_valid(raise_exception=True) snapshot = serializer.save() executors.SnapshotCreateExecutor().execute(snapshot) return response.Response(serializer.data, status=status.HTTP_201_CREATED) snapshot_serializer_class = serializers.SnapshotSerializer @decorators.action(detail=True, methods=['post']) def create_snapshot_schedule(self, request, uuid=None): serializer = self.get_serializer(data=request.data) serializer.is_valid(raise_exception=True) serializer.save() return response.Response(serializer.data, status=status.HTTP_201_CREATED) create_snapshot_schedule_validators = [ core_validators.StateValidator(models.Volume.States.OK) ] create_snapshot_schedule_serializer_class = serializers.SnapshotScheduleSerializer @decorators.action(detail=True, methods=['post']) def attach(self, request, uuid=None): """ Attach volume to instance """ volume = self.get_object() serializer = self.get_serializer(volume, data=request.data) serializer.is_valid(raise_exception=True) serializer.save() executors.VolumeAttachExecutor().execute(volume) return response.Response({'status': _('attach was scheduled')}, status=status.HTTP_202_ACCEPTED) attach_validators = [ core_validators.RuntimeStateValidator('available'), core_validators.StateValidator(models.Volume.States.OK), ] attach_serializer_class = serializers.VolumeAttachSerializer @decorators.action(detail=True, methods=['post']) def detach(self, request, uuid=None): """ Detach instance from volume """ volume = self.get_object() executors.VolumeDetachExecutor().execute(volume) return response.Response({'status': _('detach was scheduled')}, status=status.HTTP_202_ACCEPTED) detach_validators = [ _is_volume_bootable, core_validators.RuntimeStateValidator('in-use'), core_validators.StateValidator(models.Volume.States.OK), ] @decorators.action(detail=True, methods=['post']) def retype(self, request, uuid=None): """ Retype detached volume """ volume = self.get_object() serializer = self.get_serializer(volume, data=request.data) serializer.is_valid(raise_exception=True) serializer.save() executors.VolumeRetypeExecutor().execute(volume) return response.Response({'status': _('retype was scheduled')}, status=status.HTTP_202_ACCEPTED) retype_validators = [ core_validators.RuntimeStateValidator('available'), core_validators.StateValidator(models.Volume.States.OK), ] retype_serializer_class = serializers.VolumeRetypeSerializer importable_resources_backend_method = 'get_volumes_for_import' importable_resources_serializer_class = serializers.VolumeImportableSerializer import_resource_serializer_class = serializers.VolumeImportSerializer
class ResourceViewSet(core_views.ReadOnlyActionsViewSet): queryset = models.Resource.objects.exclude(state=models.Resource.States.TERMINATED) filter_backends = ( structure_filters.GenericRoleFilter, DjangoFilterBackend, filters.ResourceScopeFilterBackend ) filter_class = filters.ResourceFilter lookup_field = 'uuid' serializer_class = serializers.ResourceSerializer @detail_route(methods=['post']) def terminate(self, request, uuid=None): resource = self.get_object() with transaction.atomic(): order_item = models.OrderItem( resource=resource, offering=resource.offering, type=models.OrderItem.Types.TERMINATE, ) order = serializers.create_order( project=resource.project, user=self.request.user, items=[order_item], request=request, ) return Response({'order_uuid': order.uuid}, status=status.HTTP_200_OK) @detail_route(methods=['post']) def switch_plan(self, request, uuid=None): resource = self.get_object() serializer = self.get_serializer(data=request.data) serializer.is_valid(raise_exception=True) plan = serializer.validated_data['plan'] with transaction.atomic(): order_item = models.OrderItem( resource=resource, offering=resource.offering, plan=plan, type=models.OrderItem.Types.UPDATE, limits=resource.limits or {}, ) order = serializers.create_order( project=resource.project, user=self.request.user, items=[order_item], request=request, ) return Response({'order_uuid': order.uuid}, status=status.HTTP_200_OK) switch_plan_serializer_class = serializers.ResourceSwitchPlanSerializer def check_permissions_for_resource_actions(request, view, resource=None): if not resource: return structure_permissions.is_administrator(request, view, resource) terminate_permissions = \ switch_plan_permissions = [check_permissions_for_resource_actions] switch_plan_validators = \ terminate_validators = [core_validators.StateValidator(models.Resource.States.OK), structure_utils.check_customer_blocked]
class VirtualMachineViewSet(structure_views.BaseResourceViewSet): queryset = models.VirtualMachine.objects.all() serializer_class = serializers.VirtualMachineSerializer delete_executor = executors.VirtualMachineDeleteExecutor @decorators.detail_route() def rdp(self, request, uuid=None): vm = self.get_object() try: rdp_endpoint = vm.endpoints.get( name=models.InstanceEndpoint.Name.RDP) except models.InstanceEndpoint.DoesNotExist: raise exceptions.NotFound( "This virtual machine doesn't run remote desktop") response = HttpResponse(content_type='application/x-rdp') response[ 'Content-Disposition'] = 'attachment; filename="{}.rdp"'.format( vm.name) response.write("full address:s:%s.cloudapp.net:%s\n" "prompt for credentials:i:1\n\n" % (vm.service_project_link.cloud_service_name, rdp_endpoint.public_port)) return response rdp_validators = [ core_validators.StateValidator(models.VirtualMachine.States.OK), core_validators.RuntimeStateValidator('running') ] @decorators.detail_route(methods=['post']) def start(self, request, uuid=None): virtual_machine = self.get_object() executors.VirtualMachineStartExecutor().execute(virtual_machine) return response.Response({'status': _('start was scheduled')}, status=status.HTTP_202_ACCEPTED) start_validators = [ core_validators.StateValidator(models.VirtualMachine.States.OK), core_validators.RuntimeStateValidator('stopped') ] start_serializer_class = rf_serializers.Serializer @decorators.detail_route(methods=['post']) def stop(self, request, uuid=None): virtual_machine = self.get_object() executors.VirtualMachineStopExecutor().execute(virtual_machine) return response.Response({'status': _('stop was scheduled')}, status=status.HTTP_202_ACCEPTED) stop_validators = [ core_validators.StateValidator(models.VirtualMachine.States.OK), core_validators.RuntimeStateValidator('running') ] stop_serializer_class = rf_serializers.Serializer @decorators.detail_route(methods=['post']) def restart(self, request, uuid=None): virtual_machine = self.get_object() executors.VirtualMachineRestartExecutor().execute(virtual_machine) return response.Response({'status': _('restart was scheduled')}, status=status.HTTP_202_ACCEPTED) restart_validators = [ core_validators.StateValidator(models.VirtualMachine.States.OK), core_validators.RuntimeStateValidator('running') ] restart_serializer_class = rf_serializers.Serializer def perform_create(self, serializer): instance = serializer.save() executors.VirtualMachineCreateExecutor.execute( instance, backend_image_id=serializer.validated_data['image'].backend_id, backend_size_id=serializer.validated_data['size'].pk, )
class ResourceViewSet(core_views.ReadOnlyActionsViewSet): queryset = models.Resource.objects.all() filter_backends = (DjangoFilterBackend, filters.ResourceScopeFilterBackend) filterset_class = filters.ResourceFilter lookup_field = 'uuid' serializer_class = serializers.ResourceSerializer def get_queryset(self): """ Resources are available to both service provider and service consumer. """ if self.request.user.is_staff or self.request.user.is_support: return self.queryset return self.queryset.filter( Q( project__permissions__user=self.request.user, project__permissions__is_active=True, ) | Q( project__customer__permissions__user=self.request.user, project__customer__permissions__is_active=True, ) | Q( offering__customer__permissions__user=self.request.user, offering__customer__permissions__is_active=True, )).distinct() @action(detail=True, methods=['post']) def terminate(self, request, uuid=None): resource = self.get_object() serializer = self.get_serializer(data=request.data) serializer.is_valid(raise_exception=True) attributes = serializer.validated_data.get('attributes', {}) with transaction.atomic(): order_item = models.OrderItem( resource=resource, offering=resource.offering, type=models.OrderItem.Types.TERMINATE, attributes=attributes, ) order = serializers.create_order( project=resource.project, user=self.request.user, items=[order_item], request=request, ) return Response({'order_uuid': order.uuid.hex}, status=status.HTTP_200_OK) terminate_serializer_class = serializers.ResourceTerminateSerializer terminate_permissions = [permissions.user_can_terminate_resource] terminate_validators = [ core_validators.StateValidator(models.Resource.States.OK, models.Resource.States.ERRED), structure_utils.check_customer_blocked, ] @action(detail=True, methods=['post']) def switch_plan(self, request, uuid=None): resource = self.get_object() serializer = self.get_serializer(data=request.data) serializer.is_valid(raise_exception=True) plan = serializer.validated_data['plan'] with transaction.atomic(): order_item = models.OrderItem( resource=resource, offering=resource.offering, old_plan=resource.plan, plan=plan, type=models.OrderItem.Types.UPDATE, limits=resource.limits or {}, ) order = serializers.create_order( project=resource.project, user=self.request.user, items=[order_item], request=request, ) return Response({'order_uuid': order.uuid.hex}, status=status.HTTP_200_OK) switch_plan_serializer_class = serializers.ResourceSwitchPlanSerializer @action(detail=True, methods=['post']) def update_limits(self, request, uuid=None): resource = self.get_object() serializer = self.get_serializer(data=request.data) serializer.is_valid(raise_exception=True) limits = serializer.validated_data['limits'] with transaction.atomic(): order_item = models.OrderItem( resource=resource, offering=resource.offering, plan=resource.plan, type=models.OrderItem.Types.UPDATE, limits=limits, attributes={'old_limits': resource.limits}, ) order = serializers.create_order( project=resource.project, user=self.request.user, items=[order_item], request=request, ) return Response({'order_uuid': order.uuid.hex}, status=status.HTTP_200_OK) update_limits_serializer_class = serializers.ResourceUpdateLimitsSerializer switch_plan_permissions = update_limits_permissions = [ structure_permissions.is_administrator ] switch_plan_validators = update_limits_validators = [ core_validators.StateValidator(models.Resource.States.OK), structure_utils.check_customer_blocked, ] @action(detail=True, methods=['get']) def plan_periods(self, request, uuid=None): resource = self.get_object() qs = models.ResourcePlanPeriod.objects.filter(resource=resource) qs = qs.filter(Q(end=None) | Q(end__gte=month_start(timezone.now()))) serializer = serializers.ResourcePlanPeriodSerializer(qs, many=True) return Response(serializer.data, status=status.HTTP_200_OK)
class TenantViewSet(structure_views.ResourceViewSet): queryset = models.Tenant.objects.all().order_by('name') serializer_class = serializers.TenantSerializer filterset_class = structure_filters.BaseResourceFilter create_executor = executors.TenantCreateExecutor update_executor = executors.TenantUpdateExecutor pull_executor = executors.TenantPullExecutor def delete_permission_check(request, view, obj=None): if not obj: return if obj.service_settings.shared: if settings.WALDUR_OPENSTACK['MANAGER_CAN_MANAGE_TENANTS']: structure_permissions.is_manager(request, view, obj, soft_deleted_projects=True) elif settings.WALDUR_OPENSTACK['ADMIN_CAN_MANAGE_TENANTS']: structure_permissions.is_administrator( request, view, obj, soft_deleted_projects=True) else: structure_permissions.is_owner(request, view, obj, soft_deleted_projects=True) else: structure_permissions.is_administrator(request, view, obj, soft_deleted_projects=True) delete_executor = executors.TenantDeleteExecutor destroy_permissions = [ delete_permission_check, structure_permissions.check_access_to_services_management, ] create_permissions = update_permissions = partial_update_permissions = [ structure_permissions.check_access_to_services_management, ] @decorators.action(detail=True, methods=['post']) def set_quotas(self, request, uuid=None): """ A quota can be set for a particular tenant. Only staff users can do that. In order to set quota submit **POST** request to */api/openstack-tenants/<uuid>/set_quotas/*. The quota values are propagated to the backend. The following quotas are supported. All values are expected to be integers: - instances - maximal number of created instances. - ram - maximal size of ram for allocation. In MiB_. - storage - maximal size of storage for allocation. In MiB_. - vcpu - maximal number of virtual cores for allocation. - security_group_count - maximal number of created security groups. - security_group_rule_count - maximal number of created security groups rules. - volumes - maximal number of created volumes. - snapshots - maximal number of created snapshots. It is possible to update quotas by one or by submitting all the fields in one request. Waldur will attempt to update the provided quotas. Please note, that if provided quotas are conflicting with the backend (e.g. requested number of instances is below of the already existing ones), some quotas might not be applied. .. _MiB: http://en.wikipedia.org/wiki/Mebibyte Example of a valid request (token is user specific): .. code-block:: http POST /api/openstack-tenants/c84d653b9ec92c6cbac41c706593e66f567a7fa4/set_quotas/ HTTP/1.1 Content-Type: application/json Accept: application/json Host: example.com { "instances": 30, "ram": 100000, "storage": 1000000, "vcpu": 30, "security_group_count": 100, "security_group_rule_count": 100, "volumes": 10, "snapshots": 20 } Response code of a successful request is **202 ACCEPTED**. In case tenant is in a non-stable status, the response would be **409 CONFLICT**. In this case REST client is advised to repeat the request after some time. On successful completion the task will synchronize quotas with the backend. """ tenant = self.get_object() serializer = self.get_serializer(data=request.data) serializer.is_valid(raise_exception=True) quotas = dict(serializer.validated_data) for quota_name, limit in quotas.items(): tenant.set_quota_limit(quota_name, limit) executors.TenantPushQuotasExecutor.execute(tenant, quotas=quotas) return response.Response( {'detail': _('Quota update has been scheduled')}, status=status.HTTP_202_ACCEPTED, ) set_quotas_permissions = [structure_permissions.is_staff] set_quotas_validators = [ core_validators.StateValidator(models.Tenant.States.OK) ] set_quotas_serializer_class = serializers.TenantQuotaSerializer @decorators.action(detail=True, methods=['post']) def create_network(self, request, uuid=None): serializer = self.get_serializer(data=request.data) serializer.is_valid(raise_exception=True) network = serializer.save() executors.NetworkCreateExecutor().execute(network) return response.Response(serializer.data, status=status.HTTP_201_CREATED) create_network_validators = [ core_validators.StateValidator(models.Tenant.States.OK) ] create_network_serializer_class = serializers.NetworkSerializer def external_network_is_defined(tenant): if not tenant.external_network_id: raise core_exceptions.IncorrectStateException( _('Cannot create floating IP if tenant external network is not defined.' )) @decorators.action(detail=True, methods=['post']) def create_floating_ip(self, request, uuid=None): serializer = self.get_serializer(data=request.data) serializer.is_valid(raise_exception=True) floating_ip = serializer.save() executors.FloatingIPCreateExecutor.execute(floating_ip) return response.Response(serializer.data, status=status.HTTP_201_CREATED) create_floating_ip_validators = [ core_validators.StateValidator(models.Tenant.States.OK), external_network_is_defined, ] create_floating_ip_serializer_class = serializers.FloatingIPSerializer @decorators.action(detail=True, methods=['post']) def pull_floating_ips(self, request, uuid=None): tenant = self.get_object() executors.TenantPullFloatingIPsExecutor.execute(tenant) return response.Response(status=status.HTTP_202_ACCEPTED) pull_floating_ips_validators = [ core_validators.StateValidator(models.Tenant.States.OK) ] pull_floating_ips_serializer_class = rf_serializers.Serializer @decorators.action(detail=True, methods=['post']) def create_security_group(self, request, uuid=None): """ Example of a request: .. code-block:: http { "name": "Security group name", "description": "description", "rules": [ { "protocol": "tcp", "from_port": 1, "to_port": 10, "cidr": "10.1.1.0/24" }, { "protocol": "udp", "from_port": 10, "to_port": 8000, "cidr": "10.1.1.0/24" } ] } """ serializer = self.get_serializer(data=request.data) serializer.is_valid(raise_exception=True) security_group = serializer.save() executors.SecurityGroupCreateExecutor().execute(security_group) return response.Response(serializer.data, status=status.HTTP_201_CREATED) create_security_group_validators = [ core_validators.StateValidator(models.Tenant.States.OK) ] create_security_group_serializer_class = serializers.SecurityGroupSerializer @decorators.action(detail=True, methods=['post']) def pull_security_groups(self, request, uuid=None): executors.TenantPullSecurityGroupsExecutor.execute(self.get_object()) return response.Response( {'status': _('Security groups pull has been scheduled.')}, status=status.HTTP_202_ACCEPTED, ) pull_security_groups_validators = [ core_validators.StateValidator(models.Tenant.States.OK) ] @decorators.action(detail=True, methods=['post']) def change_password(self, request, uuid=None): serializer = self.get_serializer(instance=self.get_object(), data=request.data) serializer.is_valid(raise_exception=True) serializer.save() executors.TenantChangeUserPasswordExecutor.execute(self.get_object()) return response.Response( {'status': _('Password update has been scheduled.')}, status=status.HTTP_202_ACCEPTED, ) change_password_serializer_class = serializers.TenantChangePasswordSerializer change_password_validators = [ core_validators.StateValidator(models.Tenant.States.OK) ] @decorators.action(detail=True, methods=['post']) def pull_quotas(self, request, uuid=None): executors.TenantPullQuotasExecutor.execute(self.get_object()) return response.Response( {'status': _('Quotas pull has been scheduled.')}, status=status.HTTP_202_ACCEPTED, ) pull_quotas_validators = [ core_validators.StateValidator(models.Tenant.States.OK) ]
class InstanceViewSet(structure_views.ImportableResourceViewSet): """ OpenStack instance permissions ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - Staff members can list all available VM instances in any service. - Customer owners can list all VM instances in all the services that belong to any of the customers they own. - Project administrators can list all VM instances, create new instances and start/stop/restart instances in all the services that are connected to any of the projects they are administrators in. - Project managers can list all VM instances in all the services that are connected to any of the projects they are managers in. """ queryset = models.Instance.objects.all() serializer_class = serializers.InstanceSerializer filterset_class = filters.InstanceFilter filter_backends = structure_views.ResourceViewSet.filter_backends + ( structure_filters.StartTimeFilter, ) pull_executor = executors.InstancePullExecutor pull_serializer_class = rf_serializers.Serializer update_executor = executors.InstanceUpdateExecutor update_validators = partial_update_validators = [ core_validators.StateValidator(models.Instance.States.OK) ] def perform_create(self, serializer): instance = serializer.save() executors.InstanceCreateExecutor.execute( instance, ssh_key=serializer.validated_data.get('ssh_public_key'), flavor=serializer.validated_data['flavor'], is_heavy_task=True, ) def _has_backups(instance): if instance.backups.exists(): raise core_exceptions.IncorrectStateException( _('Cannot delete instance that has backups.')) def _can_destroy_instance(instance): if instance.state == models.Instance.States.ERRED: return if (instance.state == models.Instance.States.OK and instance.runtime_state == models.Instance.RuntimeStates.SHUTOFF): return if (instance.state == models.Instance.States.OK and instance.runtime_state == models.Instance.RuntimeStates.ACTIVE): raise core_exceptions.IncorrectStateException( _('Please stop the instance before its removal.')) raise core_exceptions.IncorrectStateException( _('Instance should be shutoff and OK or erred. ' 'Please contact support.')) def destroy(self, request, uuid=None): """ Deletion of an instance is done through sending a **DELETE** request to the instance URI. Valid request example (token is user specific): .. code-block:: http DELETE /api/openstacktenant-instances/abceed63b8e844afacd63daeac855474/ HTTP/1.1 Authorization: Token c84d653b9ec92c6cbac41c706593e66f567a7fa4 Host: example.com Only stopped instances or instances in ERRED state can be deleted. By default when instance is destroyed, all data volumes attached to it are destroyed too. In order to preserve data volumes use query parameter ?delete_volumes=false In this case data volumes are detached from the instance and then instance is destroyed. Note that system volume is deleted anyway. For example: .. code-block:: http DELETE /api/openstacktenant-instances/abceed63b8e844afacd63daeac855474/?delete_volumes=false HTTP/1.1 Authorization: Token c84d653b9ec92c6cbac41c706593e66f567a7fa4 Host: example.com """ serializer = self.get_serializer(data=request.query_params, instance=self.get_object()) serializer.is_valid(raise_exception=True) delete_volumes = serializer.validated_data['delete_volumes'] release_floating_ips = serializer.validated_data[ 'release_floating_ips'] resource = self.get_object() force = resource.state == models.Instance.States.ERRED executors.InstanceDeleteExecutor.execute( resource, force=force, delete_volumes=delete_volumes, release_floating_ips=release_floating_ips, is_async=self.async_executor, ) return response.Response({'status': _('destroy was scheduled')}, status=status.HTTP_202_ACCEPTED) destroy_validators = [_can_destroy_instance, _has_backups] destroy_serializer_class = serializers.InstanceDeleteSerializer @decorators.action(detail=True, methods=['post']) def change_flavor(self, request, uuid=None): instance = self.get_object() old_flavor_name = instance.flavor_name serializer = self.get_serializer(instance, data=request.data) serializer.is_valid(raise_exception=True) serializer.save() flavor = serializer.validated_data.get('flavor') executors.InstanceFlavorChangeExecutor().execute( instance, flavor=flavor, old_flavor_name=old_flavor_name) return response.Response( {'status': _('change_flavor was scheduled')}, status=status.HTTP_202_ACCEPTED, ) def _can_change_flavor(instance): if (instance.state == models.Instance.States.OK and instance.runtime_state == models.Instance.RuntimeStates.ACTIVE): raise core_exceptions.IncorrectStateException( _('Please stop the instance before changing its flavor.')) change_flavor_serializer_class = serializers.InstanceFlavorChangeSerializer change_flavor_validators = [ _can_change_flavor, core_validators.StateValidator(models.Instance.States.OK), core_validators.RuntimeStateValidator( models.Instance.RuntimeStates.SHUTOFF), ] @decorators.action(detail=True, methods=['post']) def start(self, request, uuid=None): instance = self.get_object() executors.InstanceStartExecutor().execute(instance) return response.Response({'status': _('start was scheduled')}, status=status.HTTP_202_ACCEPTED) def _can_start_instance(instance): if (instance.state == models.Instance.States.OK and instance.runtime_state == models.Instance.RuntimeStates.ACTIVE): raise core_exceptions.IncorrectStateException( _('Instance is already active.')) start_validators = [ _can_start_instance, core_validators.StateValidator(models.Instance.States.OK), core_validators.RuntimeStateValidator( models.Instance.RuntimeStates.SHUTOFF), ] start_serializer_class = rf_serializers.Serializer @decorators.action(detail=True, methods=['post']) def stop(self, request, uuid=None): instance = self.get_object() executors.InstanceStopExecutor().execute(instance) return response.Response({'status': _('stop was scheduled')}, status=status.HTTP_202_ACCEPTED) def _can_stop_instance(instance): if (instance.state == models.Instance.States.OK and instance.runtime_state == models.Instance.RuntimeStates.SHUTOFF): raise core_exceptions.IncorrectStateException( _('Instance is already stopped.')) stop_validators = [ _can_stop_instance, core_validators.StateValidator(models.Instance.States.OK), core_validators.RuntimeStateValidator( models.Instance.RuntimeStates.ACTIVE), ] stop_serializer_class = rf_serializers.Serializer @decorators.action(detail=True, methods=['post']) def restart(self, request, uuid=None): instance = self.get_object() executors.InstanceRestartExecutor().execute(instance) return response.Response({'status': _('restart was scheduled')}, status=status.HTTP_202_ACCEPTED) def _can_restart_instance(instance): if (instance.state == models.Instance.States.OK and instance.runtime_state == models.Instance.RuntimeStates.SHUTOFF): raise core_exceptions.IncorrectStateException( _('Please start instance first.')) restart_validators = [ _can_restart_instance, core_validators.StateValidator(models.Instance.States.OK), core_validators.RuntimeStateValidator( models.Instance.RuntimeStates.ACTIVE), ] restart_serializer_class = rf_serializers.Serializer @decorators.action(detail=True, methods=['post']) def update_security_groups(self, request, uuid=None): instance = self.get_object() serializer = self.get_serializer(instance, data=request.data) serializer.is_valid(raise_exception=True) serializer.save() executors.InstanceUpdateSecurityGroupsExecutor().execute(instance) return response.Response( {'status': _('security groups update was scheduled')}, status=status.HTTP_202_ACCEPTED, ) update_security_groups_validators = [ core_validators.StateValidator(models.Instance.States.OK) ] update_security_groups_serializer_class = ( serializers.InstanceSecurityGroupsUpdateSerializer) @decorators.action(detail=True, methods=['post']) def backup(self, request, uuid=None): serializer = self.get_serializer(data=request.data) serializer.is_valid(raise_exception=True) backup = serializer.save() executors.BackupCreateExecutor().execute(backup) return response.Response(serializer.data, status=status.HTTP_201_CREATED) backup_validators = [ core_validators.StateValidator(models.Instance.States.OK) ] backup_serializer_class = serializers.BackupSerializer @decorators.action(detail=True, methods=['post']) def create_backup_schedule(self, request, uuid=None): serializer = self.get_serializer(data=request.data) serializer.is_valid(raise_exception=True) serializer.save() return response.Response(serializer.data, status=status.HTTP_201_CREATED) create_backup_schedule_validators = [ core_validators.StateValidator(models.Instance.States.OK) ] create_backup_schedule_serializer_class = serializers.BackupScheduleSerializer @decorators.action(detail=True, methods=['post']) def update_internal_ips_set(self, request, uuid=None): instance = self.get_object() serializer = self.get_serializer(instance, data=request.data) serializer.is_valid(raise_exception=True) serializer.save() executors.InstanceInternalIPsSetUpdateExecutor().execute(instance) return response.Response( {'status': _('internal ips update was scheduled')}, status=status.HTTP_202_ACCEPTED, ) update_internal_ips_set_validators = [ core_validators.StateValidator(models.Instance.States.OK) ] update_internal_ips_set_serializer_class = ( serializers.InstanceInternalIPsSetUpdateSerializer) @decorators.action(detail=True, methods=['get']) def internal_ips_set(self, request, uuid=None): instance = self.get_object() serializer = self.get_serializer(instance.internal_ips_set.all(), many=True) return response.Response(serializer.data, status=status.HTTP_200_OK) internal_ips_set_serializer_class = serializers.NestedInternalIPSerializer @decorators.action(detail=True, methods=['post']) def update_floating_ips(self, request, uuid=None): instance = self.get_object() serializer = self.get_serializer(instance, data=request.data) serializer.is_valid(raise_exception=True) serializer.save() executors.InstanceFloatingIPsUpdateExecutor().execute(instance) return response.Response( {'status': _('floating ips update was scheduled')}, status=status.HTTP_202_ACCEPTED, ) update_floating_ips_validators = [ core_validators.StateValidator(models.Instance.States.OK) ] update_floating_ips_serializer_class = ( serializers.InstanceFloatingIPsUpdateSerializer) @decorators.action(detail=True, methods=['get']) def floating_ips(self, request, uuid=None): instance = self.get_object() serializer = self.get_serializer( instance=instance.floating_ips.all(), queryset=models.FloatingIP.objects.all(), many=True, ) return response.Response(serializer.data, status=status.HTTP_200_OK) floating_ips_serializer_class = serializers.NestedFloatingIPSerializer importable_resources_backend_method = 'get_instances_for_import' importable_resources_serializer_class = serializers.InstanceImportableSerializer import_resource_serializer_class = serializers.InstanceImportSerializer import_resource_executor = executors.InstancePullExecutor @decorators.action(detail=True, methods=['get']) def console(self, request, uuid=None): instance = self.get_object() backend = instance.get_backend() try: url = backend.get_console_url(instance) except OpenStackBackendError as e: raise exceptions.ValidationError(str(e)) return response.Response({'url': url}, status=status.HTTP_200_OK) console_validators = [ core_validators.StateValidator(models.Instance.States.OK) ] def check_permissions_for_console(request, view, instance=None): if not instance: return if request.user.is_staff: return if settings.WALDUR_OPENSTACK_TENANT[ 'ALLOW_CUSTOMER_USERS_OPENSTACK_CONSOLE_ACCESS']: structure_permissions.is_administrator(request, view, instance) else: raise exceptions.PermissionDenied() console_permissions = [check_permissions_for_console] @decorators.action(detail=True, methods=['get']) def console_log(self, request, uuid=None): instance = self.get_object() backend = instance.get_backend() serializer = self.get_serializer(data=request.query_params) serializer.is_valid(raise_exception=True) length = serializer.validated_data.get('length') try: log = backend.get_console_output(instance, length) except OpenStackBackendError as e: raise exceptions.ValidationError(str(e)) return response.Response(log, status=status.HTTP_200_OK) console_log_serializer_class = serializers.ConsoleLogSerializer console_log_permissions = [structure_permissions.is_administrator] @decorators.action(detail=True, methods=['delete']) def force_destroy(self, request, uuid=None): """This action completely repeats 'destroy', with the exclusion of validators. Destroy's validators require stopped VM. This requirement has expired. But for compatibility with old documentation, it must be left. """ return self.destroy(request, uuid) force_destroy_validators = [ _has_backups, core_validators.StateValidator(models.Instance.States.OK, models.Instance.States.ERRED), ] force_destroy_serializer_class = destroy_serializer_class
class VirtualMachineViewSet(structure_views.BaseResourceViewSet): queryset = models.VirtualMachine.objects.all() serializer_class = serializers.VirtualMachineSerializer filterset_class = filters.VirtualMachineFilter pull_executor = executors.VirtualMachinePullExecutor create_executor = executors.VirtualMachineCreateExecutor delete_executor = executors.VirtualMachineDeleteExecutor update_executor = executors.VirtualMachineUpdateExecutor update_validators = partial_update_validators = [ core_validators.StateValidator(models.VirtualMachine.States.OK), core_validators.RuntimeStateValidator(models.VirtualMachine.RuntimeStates.POWERED_OFF), ] destroy_validators = structure_views.BaseResourceViewSet.destroy_validators + [ core_validators.RuntimeStateValidator(models.VirtualMachine.RuntimeStates.POWERED_OFF) ] @action(detail=True, methods=['post']) def start(self, request, uuid=None): instance = self.get_object() executors.VirtualMachineStartExecutor().execute(instance) return Response({'status': _('start was scheduled')}, status=status.HTTP_202_ACCEPTED) start_validators = [ core_validators.StateValidator(models.VirtualMachine.States.OK), core_validators.RuntimeStateValidator( models.VirtualMachine.RuntimeStates.POWERED_OFF, models.VirtualMachine.RuntimeStates.SUSPENDED, ), ] start_serializer_class = rf_serializers.Serializer @action(detail=True, methods=['post']) def stop(self, request, uuid=None): instance = self.get_object() executors.VirtualMachineStopExecutor().execute(instance) return Response({'status': _('stop was scheduled')}, status=status.HTTP_202_ACCEPTED) stop_validators = [ core_validators.StateValidator(models.VirtualMachine.States.OK), core_validators.RuntimeStateValidator( models.VirtualMachine.RuntimeStates.POWERED_ON, models.VirtualMachine.RuntimeStates.SUSPENDED, ), ] stop_serializer_class = rf_serializers.Serializer @action(detail=True, methods=['post']) def reset(self, request, uuid=None): instance = self.get_object() executors.VirtualMachineResetExecutor().execute(instance) return Response({'status': _('reset was scheduled')}, status=status.HTTP_202_ACCEPTED) reset_validators = [ core_validators.StateValidator(models.VirtualMachine.States.OK), core_validators.RuntimeStateValidator( models.VirtualMachine.RuntimeStates.POWERED_ON, ), ] reset_serializer_class = rf_serializers.Serializer @action(detail=True, methods=['post']) def suspend(self, request, uuid=None): instance = self.get_object() executors.VirtualMachineSuspendExecutor().execute(instance) return Response({'status': _('suspend was scheduled')}, status=status.HTTP_202_ACCEPTED) suspend_validators = [ core_validators.StateValidator(models.VirtualMachine.States.OK), core_validators.RuntimeStateValidator( models.VirtualMachine.RuntimeStates.POWERED_ON, ), ] suspend_serializer_class = rf_serializers.Serializer def vm_tools_are_running(vm): if vm.tools_state != models.VirtualMachine.ToolsStates.RUNNING: raise rf_serializers.ValidationError('VMware Tools are not running.') @action(detail=True, methods=['post']) def shutdown_guest(self, request, uuid=None): instance = self.get_object() executors.VirtualMachineShutdownGuestExecutor().execute(instance) return Response({'status': _('shutdown was scheduled')}, status=status.HTTP_202_ACCEPTED) shutdown_guest_validators = reboot_guest_validators = [ core_validators.StateValidator(models.VirtualMachine.States.OK), core_validators.RuntimeStateValidator( models.VirtualMachine.RuntimeStates.POWERED_ON, ), vm_tools_are_running, ] shutdown_guest_serializer_class = rf_serializers.Serializer @action(detail=True, methods=['post']) def reboot_guest(self, request, uuid=None): instance = self.get_object() executors.VirtualMachineRebootGuestExecutor().execute(instance) return Response({'status': _('reboot was scheduled')}, status=status.HTTP_202_ACCEPTED) reboot_guest_serializer_class = rf_serializers.Serializer @action(detail=True, methods=['post']) def create_port(self, request, uuid=None): serializer = self.get_serializer(data=request.data) serializer.is_valid(raise_exception=True) port = serializer.save() transaction.on_commit(lambda: executors.PortCreateExecutor().execute(port)) return Response(serializer.data, status=status.HTTP_201_CREATED) def check_number_of_ports(vm): # Limit of the network adapter per VM is 10 in vSphere 6.7, 6.5 and 6.0 if vm.port_set.count() >= 10: raise rf_serializers.ValidationError('Virtual machine can have at most 10 network adapters.') create_port_validators = [ core_validators.StateValidator(models.VirtualMachine.States.OK), check_number_of_ports, ] create_port_serializer_class = serializers.PortSerializer @action(detail=True, methods=['post']) def create_disk(self, request, uuid=None): serializer = self.get_serializer(data=request.data) serializer.is_valid(raise_exception=True) disk = serializer.save() transaction.on_commit(lambda: executors.DiskCreateExecutor().execute(disk)) return Response(serializer.data, status=status.HTTP_201_CREATED) def validate_total_size(vm): max_disk_total = serializers.get_int_or_none(vm.service_settings.options, 'max_disk_total') if max_disk_total: remaining_quota = max_disk_total - vm.total_disk if remaining_quota < 1024: raise rf_serializers.ValidationError('Storage quota has been reached.') create_disk_validators = [ core_validators.StateValidator(models.VirtualMachine.States.OK), validate_total_size, ] create_disk_serializer_class = serializers.DiskSerializer @action(detail=True, methods=['get']) def console(self, request, uuid=None): """ This endpoint provides access to Virtual Machine Remote Console aka VMRC. """ instance = self.get_object() backend = instance.get_backend() try: url = backend.get_console_url(instance) except Exception: logger.exception('Unable to get console URL.') raise rf_serializers.ValidationError('Unable to get console URL.') return Response({'url': url}, status=status.HTTP_200_OK) console_validators = [core_validators.StateValidator(models.VirtualMachine.States.OK)] @action(detail=True, methods=['get']) def web_console(self, request, uuid=None): """ This endpoint provides access to HTML Console aka WMKS. """ instance = self.get_object() backend = instance.get_backend() try: url = backend.get_web_console_url(instance) except Exception: logger.exception('Unable to get web console URL.') raise rf_serializers.ValidationError('Unable to get web console URL.') return Response({'url': url}, status=status.HTTP_200_OK) web_console_validators = [ core_validators.StateValidator(models.VirtualMachine.States.OK), core_validators.RuntimeStateValidator(models.VirtualMachine.RuntimeStates.POWERED_ON) ]
class FloatingIPViewSet(structure_views.ResourceViewSet): queryset = models.FloatingIP.objects.all().order_by('address') serializer_class = serializers.FloatingIPSerializer filterset_class = filters.FloatingIPFilter disabled_actions = ['update', 'partial_update', 'create'] delete_executor = executors.FloatingIPDeleteExecutor pull_executor = executors.FloatingIPPullExecutor def list(self, request, *args, **kwargs): """ To get a list of all available floating IPs, issue **GET** against */api/floating-ips/*. Floating IPs are read only. Each floating IP has fields: 'address', 'status'. Status *DOWN* means that floating IP is not linked to a VM, status *ACTIVE* means that it is in use. """ return super(FloatingIPViewSet, self).list(request, *args, **kwargs) @decorators.action(detail=True, methods=['post']) def attach_to_port(self, request, uuid=None): floating_ip: models.FloatingIP = self.get_object() serializer = self.get_serializer(data=request.data) serializer.is_valid(raise_exception=True) port: models.Port = serializer.validated_data['port'] if port.state != models.Port.States.OK: raise core_exceptions.IncorrectStateException( _('The port [%(port)s] is expected to have [OK] state, but actual one is [%(state)s]' ) % { 'port': port, 'state': port.get_state_display() }) if port.tenant != floating_ip.tenant: raise exceptions.ValidationError({ 'detail': _('The port [%(port)s] is expected to belong to the same tenant [%(tenant)s] , but actual one is [%(actual_tenant)s]' ) % { 'port': port, 'tenant': floating_ip.tenant, 'actual_tenant': port.tenant, } }) executors.FloatingIPAttachExecutor().execute( floating_ip, port=core_utils.serialize_instance(port)) return response.Response({'status': _('attaching was scheduled')}, status=status.HTTP_202_ACCEPTED) attach_to_port_serializer_class = serializers.FloatingIPAttachSerializer attach_to_port_validators = [ core_validators.StateValidator(models.FloatingIP.States.OK) ] @decorators.action(detail=True, methods=['post']) def detach_from_port(self, request=None, uuid=None): floating_ip: models.FloatingIP = self.get_object() if not floating_ip.port: raise exceptions.ValidationError({ 'port': _('Floating IP [%(fip)s] is not attached to any port.') % { 'fip': floating_ip } }) executors.FloatingIPDetachExecutor().execute(floating_ip) return response.Response({'status': _('detaching was scheduled')}, status=status.HTTP_202_ACCEPTED) detach_from_port_validators = [ core_validators.StateValidator(models.FloatingIP.States.OK) ]
class ClusterViewSet( OptionalReadonlyViewset, structure_views.ImportableResourceViewSet ): queryset = models.Cluster.objects.all() serializer_class = serializers.ClusterSerializer filterset_class = filters.ClusterFilter update_executor = executors.ClusterUpdateExecutor def perform_create(self, serializer): cluster = serializer.save() user = self.request.user nodes = serializer.validated_data.get('node_set') install_longhorn = serializer.validated_data['install_longhorn'] for node_data in nodes: node_data['cluster'] = cluster models.Node.objects.create(**node_data) transaction.on_commit( lambda: executors.ClusterCreateExecutor.execute( cluster, user=user, install_longhorn=install_longhorn, is_heavy_task=True, ) ) def destroy(self, request, *args, **kwargs): user = self.request.user instance = self.get_object() executors.ClusterDeleteExecutor.execute( instance, user=user, is_heavy_task=True, ) return response.Response( {'detail': _('Deletion was scheduled.')}, status=status.HTTP_202_ACCEPTED ) update_validators = partial_update_validators = [ core_validators.StateValidator(models.Cluster.States.OK), ] destroy_validators = ( structure_views.ImportableResourceViewSet.destroy_validators + [validators.all_cluster_related_vms_can_be_deleted,] ) importable_resources_backend_method = 'get_clusters_for_import' importable_resources_serializer_class = serializers.ClusterImportableSerializer import_resource_serializer_class = serializers.ClusterImportSerializer pull_executor = executors.ClusterPullExecutor @decorators.action(detail=True, methods=['get']) def kubeconfig_file(self, request, uuid=None): cluster = self.get_object() backend = cluster.get_backend() try: config = backend.get_kubeconfig_file(cluster) except exceptions.RancherException: raise ValidationError('Unable to get kubeconfig file.') return response.Response({'config': config}, status=status.HTTP_200_OK) kubeconfig_file_validators = [ core_validators.StateValidator(models.Cluster.States.OK) ] @decorators.action(detail=True, methods=['post']) def import_yaml(self, request, uuid=None): cluster = self.get_object() serializer = self.get_serializer(data=request.data) serializer.is_valid(raise_exception=True) yaml = serializer.validated_data['yaml'] default_namespace = serializer.validated_data.get('default_namespace') namespace = serializer.validated_data.get('namespace') backend = cluster.get_backend() try: backend.import_yaml( cluster, yaml, default_namespace=default_namespace, namespace=namespace ) except exceptions.RancherException as e: message = e.args[0].get('message', 'Server error') return response.Response( {'details': message}, status=status.HTTP_400_BAD_REQUEST ) executors.ClusterPullExecutor.execute(cluster) return response.Response(status.HTTP_200_OK) import_yaml_serializer_class = serializers.ImportYamlSerializer